merge to stable for 4.8 release freeze stable 4.8rc0
authorAugie Fackler <augie@google.com>
Mon, 22 Oct 2018 14:46:06 -0400
branchstable
changeset 40404 956ec6f1320d
parent 40131 535fc8a22365 (current diff)
parent 40403 bf249bb60087 (diff)
child 40405 4185bc53d1e3
merge to stable for 4.8 release freeze
contrib/phabricator.py
contrib/simplemerge
hgext/narrow/narrowchangegroup.py
hgext/narrow/narrowcopies.py
hgext/narrow/narrowpatch.py
hgext/narrow/narrowrevlog.py
mercurial/dagutil.py
tests/test-lrucachedict.py.out
tests/test-parseindex2.py.out
tests/test-py3-commands.t
--- a/.hgignore	Wed Oct 10 12:25:28 2018 -0400
+++ b/.hgignore	Mon Oct 22 14:46:06 2018 -0400
@@ -19,6 +19,7 @@
 *.zip
 \#*\#
 .\#*
+tests/artifacts/cache/big-file-churn.hg
 tests/.coverage*
 tests/.testtimes*
 tests/.hypothesis
@@ -55,6 +56,7 @@
 hgext/__index__.py
 
 rust/target/
+rust/*/target/
 
 # Generated wheels
 wheelhouse/
--- a/Makefile	Wed Oct 10 12:25:28 2018 -0400
+++ b/Makefile	Mon Oct 22 14:46:06 2018 -0400
@@ -9,7 +9,8 @@
 $(eval HGROOT := $(shell pwd))
 HGPYTHONS ?= $(HGROOT)/build/pythons
 PURE=
-PYFILES:=$(shell find mercurial hgext doc -name '*.py')
+PYFILESCMD=find mercurial hgext doc -name '*.py'
+PYFILES:=$(shell $(PYFILESCMD))
 DOCFILES=mercurial/help/*.txt
 export LANGUAGE=C
 export LC_ALL=C
@@ -145,7 +146,7 @@
         # parse them even though they are not marked for translation.
         # Extracting with an explicit encoding of ISO-8859-1 will make
         # xgettext "parse" and ignore them.
-	echo $(PYFILES) | xargs \
+	$(PYFILESCMD) | xargs \
 	  xgettext --package-name "Mercurial" \
 	  --msgid-bugs-address "<mercurial-devel@mercurial-scm.org>" \
 	  --copyright-holder "Matt Mackall <mpm@selenic.com> and others" \
--- a/contrib/bash_completion	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/bash_completion	Mon Oct 22 14:46:06 2018 -0400
@@ -152,7 +152,7 @@
 {
     local cur prev cmd cmd_index opts i aliashg
     # global options that receive an argument
-    local global_args='--cwd|-R|--repository'
+    local global_args='--cwd|-R|--repository|--color|--config|--encoding|--encodingmode|--pager'
     local hg="$1"
     local canonical=0
 
@@ -206,6 +206,18 @@
             _hg_fix_wordlist
             return
         ;;
+        --color)
+            local choices='true false yes no always auto never debug'
+            COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$choices' -- "$cur"))
+            _hg_fix_wordlist
+            return
+        ;;
+        --pager)
+            local choices='true false yes no always auto never'
+            COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$choices' -- "$cur"))
+            _hg_fix_wordlist
+            return
+        ;;
     esac
 
     if [ -z "$cmd" ] || [ $COMP_CWORD -eq $i ]; then
--- a/contrib/byteify-strings.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/byteify-strings.py	Mon Oct 22 14:46:06 2018 -0400
@@ -169,6 +169,11 @@
                 yield adjusttokenpos(t._replace(string=fn[4:]), coloffset)
                 continue
 
+        # Looks like "if __name__ == '__main__'".
+        if (t.type == token.NAME and t.string == '__name__'
+            and _isop(i + 1, '==')):
+            _ensuresysstr(i + 2)
+
         # Emit unmodified token.
         yield adjusttokenpos(t, coloffset)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/catapipe.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,90 @@
+#!/usr/bin/env python3
+#
+# Copyright 2018 Google LLC.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""Tool read primitive events from a pipe to produce a catapult trace.
+
+For now the event stream supports
+
+  START $SESSIONID ...
+
+and
+
+  END $SESSIONID ...
+
+events. Everything after the SESSIONID (which must not contain spaces)
+is used as a label for the event. Events are timestamped as of when
+they arrive in this process and are then used to produce catapult
+traces that can be loaded in Chrome's about:tracing utility. It's
+important that the event stream *into* this process stay simple,
+because we have to emit it from the shell scripts produced by
+run-tests.py.
+
+Typically you'll want to place the path to the named pipe in the
+HGCATAPULTSERVERPIPE environment variable, which both run-tests and hg
+understand.
+"""
+from __future__ import absolute_import, print_function
+
+import argparse
+import json
+import os
+import timeit
+
+_TYPEMAP = {
+    'START': 'B',
+    'END': 'E',
+}
+
+_threadmap = {}
+
+# Timeit already contains the whole logic about which timer to use based on
+# Python version and OS
+timer = timeit.default_timer
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('pipe', type=str, nargs=1,
+                        help='Path of named pipe to create and listen on.')
+    parser.add_argument('output', default='trace.json', type=str, nargs='?',
+                        help='Path of json file to create where the traces '
+                             'will be stored.')
+    parser.add_argument('--debug', default=False, action='store_true',
+                        help='Print useful debug messages')
+    args = parser.parse_args()
+    fn = args.pipe[0]
+    os.mkfifo(fn)
+    try:
+        with open(fn) as f, open(args.output, 'w') as out:
+            out.write('[\n')
+            start = timer()
+            while True:
+                ev = f.readline().strip()
+                if not ev:
+                    continue
+                now = timer()
+                if args.debug:
+                    print(ev)
+                verb, session, label = ev.split(' ', 2)
+                if session not in _threadmap:
+                    _threadmap[session] = len(_threadmap)
+                pid = _threadmap[session]
+                ts_micros = (now - start) * 1000000
+                out.write(json.dumps(
+                    {
+                        "name": label,
+                        "cat": "misc",
+                        "ph": _TYPEMAP[verb],
+                        "ts": ts_micros,
+                        "pid": pid,
+                        "tid": 1,
+                        "args": {}
+                    }))
+                out.write(',\n')
+    finally:
+        os.unlink(fn)
+
+if __name__ == '__main__':
+    main()
--- a/contrib/check-code.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/check-code.py	Mon Oct 22 14:46:06 2018 -0400
@@ -30,7 +30,7 @@
     opentext = open
 else:
     def opentext(f):
-        return open(f, encoding='ascii')
+        return open(f, encoding='latin1')
 try:
     xrange
 except NameError:
@@ -503,7 +503,7 @@
   [
     (r'os\.environ', "use encoding.environ instead (py3)", r'#.*re-exports'),
     (r'os\.name', "use pycompat.osname instead (py3)"),
-    (r'os\.getcwd', "use pycompat.getcwd instead (py3)"),
+    (r'os\.getcwd', "use encoding.getcwd instead (py3)", r'#.*re-exports'),
     (r'os\.sep', "use pycompat.ossep instead (py3)"),
     (r'os\.pathsep', "use pycompat.ospathsep instead (py3)"),
     (r'os\.altsep', "use pycompat.osaltsep instead (py3)"),
@@ -511,6 +511,7 @@
     (r'getopt\.getopt', "use pycompat.getoptb instead (py3)"),
     (r'os\.getenv', "use encoding.environ.get instead"),
     (r'os\.setenv', "modifying the environ dict is not preferred"),
+    (r'(?<!pycompat\.)xrange', "use pycompat.xrange instead (py3)"),
   ],
   # warnings
   [],
--- a/contrib/check-commit	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/check-commit	Mon Oct 22 14:46:06 2018 -0400
@@ -39,8 +39,6 @@
      "summary keyword should be most user-relevant one-word command or topic"),
     (afterheader + r".*\.\s*\n", "don't add trailing period on summary line"),
     (afterheader + r".{79,}", "summary line too long (limit is 78)"),
-    (r"\n\+\n( |\+)\n", "adds double empty line"),
-    (r"\n \n\+\n", "adds double empty line"),
     # Forbid "_" in function name.
     #
     # We skip the check for cffi related functions. They use names mapping the
--- a/contrib/check-config.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/check-config.py	Mon Oct 22 14:46:06 2018 -0400
@@ -42,6 +42,14 @@
     config:\s(?P<config>\S+\.\S+)$
     ''', re.VERBOSE | re.MULTILINE)
 
+if sys.version_info[0] > 2:
+    def mkstr(b):
+        if isinstance(b, str):
+            return b
+        return b.decode('utf8')
+else:
+    mkstr = lambda x: x
+
 def main(args):
     for f in args:
         sect = b''
@@ -92,7 +100,7 @@
             # look for ignore markers
             m = ignorere.search(l)
             if m:
-                if m.group('reason') == 'inconsistent':
+                if m.group('reason') == b'inconsistent':
                     allowinconsistent.add(m.group('config'))
                 else:
                     documented[m.group('config')] = 1
@@ -104,36 +112,45 @@
                 ctype = m.group('ctype')
                 if not ctype:
                     ctype = 'str'
-                name = m.group('section') + "." + m.group('option')
+                name = m.group('section') + b"." + m.group('option')
                 default = m.group('default')
-                if default in (None, 'False', 'None', '0', '[]', '""', "''"):
-                    default = ''
+                if default in (
+                        None, b'False', b'None', b'0', b'[]', b'""', b"''"):
+                    default = b''
                 if re.match(b'[a-z.]+$', default):
-                    default = '<variable>'
+                    default = b'<variable>'
                 if (name in foundopts and (ctype, default) != foundopts[name]
                     and name not in allowinconsistent):
-                    print(l.rstrip())
-                    print("conflict on %s: %r != %r" % (name, (ctype, default),
-                                                        foundopts[name]))
-                    print("at %s:%d:" % (f, linenum))
+                    print(mkstr(l.rstrip()))
+                    fctype, fdefault = foundopts[name]
+                    print("conflict on %s: %r != %r" % (
+                        mkstr(name),
+                        (mkstr(ctype), mkstr(default)),
+                        (mkstr(fctype), mkstr(fdefault))))
+                    print("at %s:%d:" % (mkstr(f), linenum))
                 foundopts[name] = (ctype, default)
-                carryover = ''
+                carryover = b''
             else:
                 m = re.search(configpartialre, line)
                 if m:
                     carryover = line
                 else:
-                    carryover = ''
+                    carryover = b''
 
     for name in sorted(foundopts):
         if name not in documented:
-            if not (name.startswith("devel.") or
-                    name.startswith("experimental.") or
-                    name.startswith("debug.")):
+            if not (name.startswith(b"devel.") or
+                    name.startswith(b"experimental.") or
+                    name.startswith(b"debug.")):
                 ctype, default = foundopts[name]
                 if default:
+                    if isinstance(default, bytes):
+                        default = mkstr(default)
                     default = ' [%s]' % default
-                print("undocumented: %s (%s)%s" % (name, ctype, default))
+                elif isinstance(default, bytes):
+                    default = mkstr(default)
+                print("undocumented: %s (%s)%s" % (
+                    mkstr(name), mkstr(ctype), default))
 
 if __name__ == "__main__":
     if len(sys.argv) > 1:
--- a/contrib/chg/hgclient.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/chg/hgclient.c	Mon Oct 22 14:46:06 2018 -0400
@@ -32,7 +32,7 @@
        CAP_ATTACHIO = 0x0100,
        CAP_CHDIR = 0x0200,
        CAP_SETENV = 0x0800,
-       CAP_SETUMASK = 0x1000,
+       CAP_SETUMASK2 = 0x1000,
        CAP_VALIDATE = 0x2000,
        CAP_SETPROCNAME = 0x4000,
 };
@@ -48,7 +48,7 @@
     {"attachio", CAP_ATTACHIO},
     {"chdir", CAP_CHDIR},
     {"setenv", CAP_SETENV},
-    {"setumask", CAP_SETUMASK},
+    {"setumask2", CAP_SETUMASK2},
     {"validate", CAP_VALIDATE},
     {"setprocname", CAP_SETPROCNAME},
     {NULL, 0}, /* terminator */
@@ -425,10 +425,11 @@
 	mode_t mask = umask(0);
 	umask(mask);
 
-	static const char command[] = "setumask\n";
-	sendall(hgc->sockfd, command, sizeof(command) - 1);
 	uint32_t data = htonl(mask);
-	sendall(hgc->sockfd, &data, sizeof(data));
+	enlargecontext(&hgc->ctx, sizeof(data));
+	memcpy(hgc->ctx.data, &data, sizeof(data));
+	hgc->ctx.datasize = sizeof(data);
+	writeblockrequest(hgc, "setumask2");
 }
 
 /*!
@@ -508,7 +509,7 @@
 		attachio(hgc);
 	if (hgc->capflags & CAP_CHDIR)
 		chdirtocwd(hgc);
-	if (hgc->capflags & CAP_SETUMASK)
+	if (hgc->capflags & CAP_SETUMASK2)
 		forwardumask(hgc);
 
 	return hgc;
--- a/contrib/clang-format-ignorelist	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/clang-format-ignorelist	Mon Oct 22 14:46:06 2018 -0400
@@ -6,6 +6,7 @@
 mercurial/cext/revlog.c
 # Vendored code that we should never format:
 contrib/python-zstandard/c-ext/bufferutil.c
+contrib/python-zstandard/c-ext/compressionchunker.c
 contrib/python-zstandard/c-ext/compressiondict.c
 contrib/python-zstandard/c-ext/compressionparams.c
 contrib/python-zstandard/c-ext/compressionreader.c
@@ -25,6 +26,8 @@
 contrib/python-zstandard/zstd/common/bitstream.h
 contrib/python-zstandard/zstd/common/compiler.h
 contrib/python-zstandard/zstd/common/cpu.h
+contrib/python-zstandard/zstd/common/debug.c
+contrib/python-zstandard/zstd/common/debug.h
 contrib/python-zstandard/zstd/common/entropy_common.c
 contrib/python-zstandard/zstd/common/error_private.c
 contrib/python-zstandard/zstd/common/error_private.h
@@ -42,6 +45,8 @@
 contrib/python-zstandard/zstd/common/zstd_errors.h
 contrib/python-zstandard/zstd/common/zstd_internal.h
 contrib/python-zstandard/zstd/compress/fse_compress.c
+contrib/python-zstandard/zstd/compress/hist.c
+contrib/python-zstandard/zstd/compress/hist.h
 contrib/python-zstandard/zstd/compress/huf_compress.c
 contrib/python-zstandard/zstd/compress/zstd_compress.c
 contrib/python-zstandard/zstd/compress/zstd_compress_internal.h
@@ -64,8 +69,10 @@
 contrib/python-zstandard/zstd/deprecated/zbuff_decompress.c
 contrib/python-zstandard/zstd/deprecated/zbuff.h
 contrib/python-zstandard/zstd/dictBuilder/cover.c
+contrib/python-zstandard/zstd/dictBuilder/cover.h
 contrib/python-zstandard/zstd/dictBuilder/divsufsort.c
 contrib/python-zstandard/zstd/dictBuilder/divsufsort.h
+contrib/python-zstandard/zstd/dictBuilder/fastcover.c
 contrib/python-zstandard/zstd/dictBuilder/zdict.c
 contrib/python-zstandard/zstd/dictBuilder/zdict.h
 contrib/python-zstandard/zstd/zstd.h
--- a/contrib/dumprevlog	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/dumprevlog	Mon Oct 22 14:46:06 2018 -0400
@@ -6,7 +6,9 @@
 
 import sys
 from mercurial import (
+    encoding,
     node,
+    pycompat,
     revlog,
 )
 from mercurial.utils import (
@@ -16,22 +18,26 @@
 for fp in (sys.stdin, sys.stdout, sys.stderr):
     procutil.setbinary(fp)
 
-def binopen(path, mode='rb'):
-    if 'b' not in mode:
-        mode = mode + 'b'
-    return open(path, mode)
+def binopen(path, mode=b'rb'):
+    if b'b' not in mode:
+        mode = mode + b'b'
+    return open(path, pycompat.sysstr(mode))
+
+def printb(data, end=b'\n'):
+    sys.stdout.flush()
+    pycompat.stdout.write(data + end)
 
 for f in sys.argv[1:]:
-    r = revlog.revlog(binopen, f)
+    r = revlog.revlog(binopen, encoding.strtolocal(f))
     print("file:", f)
     for i in r:
         n = r.node(i)
         p = r.parents(n)
         d = r.revision(n)
-        print("node:", node.hex(n))
-        print("linkrev:", r.linkrev(i))
-        print("parents:", node.hex(p[0]), node.hex(p[1]))
-        print("length:", len(d))
-        print("-start-")
-        print(d)
-        print("-end-")
+        printb(b"node: %s" % node.hex(n))
+        printb(b"linkrev: %d" % r.linkrev(i))
+        printb(b"parents: %s %s" % (node.hex(p[0]), node.hex(p[1])))
+        printb(b"length: %d" % len(d))
+        printb(b"-start-")
+        printb(d)
+        printb(b"-end-")
--- a/contrib/fuzz/Makefile	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/fuzz/Makefile	Mon Oct 22 14:46:06 2018 -0400
@@ -70,12 +70,59 @@
 	  fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o fuzzutil-oss-fuzz.o \
 	  -lFuzzingEngine -o $$OUT/xdiff_fuzzer
 
+# TODO use the $OUT env var instead of hardcoding /out
+/out/sanpy/bin/python:
+	cd /Python-2.7.15/ && ./configure --without-pymalloc --prefix=$$OUT/sanpy CFLAGS='-O1 -fno-omit-frame-pointer -g -fwrapv -fstack-protector-strong' LDFLAGS=-lasan  && ASAN_OPTIONS=detect_leaks=0 make && make install
+
+sanpy: /out/sanpy/bin/python
+
+manifest.o: sanpy ../../mercurial/cext/manifest.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o manifest.o ../../mercurial/cext/manifest.c
+
+charencode.o: sanpy ../../mercurial/cext/charencode.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o charencode.o ../../mercurial/cext/charencode.c
+
+parsers.o: sanpy ../../mercurial/cext/parsers.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o parsers.o ../../mercurial/cext/parsers.c
+
+dirs.o: sanpy ../../mercurial/cext/dirs.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o dirs.o ../../mercurial/cext/dirs.c
+
+pathencode.o: sanpy ../../mercurial/cext/pathencode.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o pathencode.o ../../mercurial/cext/pathencode.c
+
+revlog.o: sanpy ../../mercurial/cext/revlog.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o revlog.o ../../mercurial/cext/revlog.c
+
+manifest_fuzzer: sanpy manifest.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o
+	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -Wno-register -Wno-macro-redefined \
+	  -I../../mercurial manifest.cc \
+	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o \
+	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  -o $$OUT/manifest_fuzzer
+
+manifest_corpus.zip:
+	python manifest_corpus.py $$OUT/manifest_fuzzer_seed_corpus.zip
+
 clean:
 	$(RM) *.o *_fuzzer \
 	  bdiff \
 	  mpatch \
 	  xdiff
 
-oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer
+oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer manifest_fuzzer manifest_corpus.zip
 
-.PHONY: all clean oss-fuzz
+.PHONY: all clean oss-fuzz sanpy
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/manifest.cc	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,83 @@
+#include <Python.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <string>
+
+extern "C" {
+
+/* TODO: use Python 3 for this fuzzing? */
+PyMODINIT_FUNC initparsers(void);
+
+static char cpypath[8192] = "\0";
+
+static PyCodeObject *code;
+static PyObject *mainmod;
+static PyObject *globals;
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+	const std::string subdir = "/sanpy/lib/python2.7";
+	/* HACK ALERT: we need a full Python installation built without
+	   pymalloc and with ASAN, so we dump one in
+	   $OUT/sanpy/lib/python2.7. This helps us wire that up. */
+	std::string selfpath(*argv[0]);
+	std::string pypath;
+	auto pos = selfpath.rfind("/");
+	if (pos == std::string::npos) {
+		char wd[8192];
+		getcwd(wd, 8192);
+		pypath = std::string(wd) + subdir;
+	} else {
+		pypath = selfpath.substr(0, pos) + subdir;
+	}
+	strncpy(cpypath, pypath.c_str(), pypath.size());
+	setenv("PYTHONPATH", cpypath, 1);
+	setenv("PYTHONNOUSERSITE", "1", 1);
+	/* prevent Python from looking up users in the fuzz environment */
+	setenv("PYTHONUSERBASE", cpypath, 1);
+	Py_SetPythonHome(cpypath);
+	Py_InitializeEx(0);
+	initparsers();
+	code = (PyCodeObject *)Py_CompileString(R"py(
+from parsers import lazymanifest
+try:
+  lm = lazymanifest(mdata)
+  # iterate the whole thing, which causes the code to fully parse
+  # every line in the manifest
+  list(lm.iterentries())
+  lm[b'xyzzy'] = (b'\0' * 20, 'x')
+  # do an insert, text should change
+  assert lm.text() != mdata, "insert should change text and didn't: %r %r" % (lm.text(), mdata)
+  del lm[b'xyzzy']
+  # should be back to the same
+  assert lm.text() == mdata, "delete should have restored text but didn't: %r %r" % (lm.text(), mdata)
+except Exception as e:
+  pass
+  # uncomment this print if you're editing this Python code
+  # to debug failures.
+  # print e
+)py",
+	                                        "fuzzer", Py_file_input);
+	mainmod = PyImport_AddModule("__main__");
+	globals = PyModule_GetDict(mainmod);
+	return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+	PyObject *mtext =
+	    PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
+	PyObject *locals = PyDict_New();
+	PyDict_SetItemString(locals, "mdata", mtext);
+	PyObject *res = PyEval_EvalCode(code, globals, locals);
+	if (!res) {
+		PyErr_Print();
+	}
+	Py_XDECREF(res);
+	Py_DECREF(locals);
+	Py_DECREF(mtext);
+	return 0; // Non-zero return values are reserved for future use.
+}
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/manifest_corpus.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,30 @@
+from __future__ import absolute_import, print_function
+
+import argparse
+import zipfile
+
+ap = argparse.ArgumentParser()
+ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
+args = ap.parse_args()
+
+with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
+    zf.writestr("manifest_zero",
+'''PKG-INFO\09b3ed8f2b81095a13064402e930565f083346e9a
+README\080b6e76643dcb44d4bc729e932fc464b3e36dbe3
+hg\0b6444347c629cc058d478023905cfb83b7f5bb9d
+mercurial/__init__.py\0b80de5d138758541c5f05265ad144ab9fa86d1db
+mercurial/byterange.py\017f5a9fbd99622f31a392c33ac1e903925dc80ed
+mercurial/fancyopts.py\0b6f52e23e356748c5039313d8b639cda16bf67ba
+mercurial/hg.py\023cc12f225f1b42f32dc0d897a4f95a38ddc8f4a
+mercurial/mdiff.py\0a05f65c44bfbeec6a42336cd2ff0b30217899ca3
+mercurial/revlog.py\0217bc3fde6d82c0210cf56aeae11d05a03f35b2b
+mercurial/transaction.py\09d180df101dc14ce3dd582fd998b36c98b3e39aa
+notes.txt\0703afcec5edb749cf5cec67831f554d6da13f2fb
+setup.py\0ccf3f6daf0f13101ca73631f7a1769e328b472c9
+tkmerge\03c922edb43a9c143682f7bc7b00f98b3c756ebe7
+''')
+    zf.writestr("badmanifest_shorthashes",
+                "narf\0aa\nnarf2\0aaa\n")
+    zf.writestr("badmanifest_nonull",
+                "narf\0cccccccccccccccccccccccccccccccccccccccc\n"
+                "narf2aaaaaaaaaaaaaaaaaaaa\n")
--- a/contrib/hgclient.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/hgclient.py	Mon Oct 22 14:46:06 2018 -0400
@@ -1,7 +1,10 @@
 # A minimal client for Mercurial's command server
 
 from __future__ import absolute_import, print_function
+
+import io
 import os
+import re
 import signal
 import socket
 import struct
@@ -9,17 +12,25 @@
 import sys
 import time
 
-try:
-    import cStringIO as io
-    stringio = io.StringIO
-except ImportError:
-    import io
-    stringio = io.StringIO
+if sys.version_info[0] >= 3:
+    stdout = sys.stdout.buffer
+    stderr = sys.stderr.buffer
+    stringio = io.BytesIO
+    def bprint(*args):
+        # remove b'' as well for ease of test migration
+        pargs = [re.sub(br'''\bb(['"])''', br'\1', b'%s' % a) for a in args]
+        stdout.write(b' '.join(pargs) + b'\n')
+else:
+    import cStringIO
+    stdout = sys.stdout
+    stderr = sys.stderr
+    stringio = cStringIO.StringIO
+    bprint = print
 
 def connectpipe(path=None):
-    cmdline = ['hg', 'serve', '--cmdserver', 'pipe']
+    cmdline = [b'hg', b'serve', b'--cmdserver', b'pipe']
     if path:
-        cmdline += ['-R', path]
+        cmdline += [b'-R', path]
 
     server = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
                               stdout=subprocess.PIPE)
@@ -41,9 +52,9 @@
 class unixserver(object):
     def __init__(self, sockpath, logpath=None, repopath=None):
         self.sockpath = sockpath
-        cmdline = ['hg', 'serve', '--cmdserver', 'unix', '-a', sockpath]
+        cmdline = [b'hg', b'serve', b'--cmdserver', b'unix', b'-a', sockpath]
         if repopath:
-            cmdline += ['-R', repopath]
+            cmdline += [b'-R', repopath]
         if logpath:
             stdout = open(logpath, 'a')
             stderr = subprocess.STDOUT
@@ -64,7 +75,7 @@
         self.server.wait()
 
 def writeblock(server, data):
-    server.stdin.write(struct.pack('>I', len(data)))
+    server.stdin.write(struct.pack(b'>I', len(data)))
     server.stdin.write(data)
     server.stdin.flush()
 
@@ -73,48 +84,48 @@
     if not data:
         raise EOFError
     channel, length = struct.unpack('>cI', data)
-    if channel in 'IL':
+    if channel in b'IL':
         return channel, length
     else:
         return channel, server.stdout.read(length)
 
 def sep(text):
-    return text.replace('\\', '/')
+    return text.replace(b'\\', b'/')
 
-def runcommand(server, args, output=sys.stdout, error=sys.stderr, input=None,
+def runcommand(server, args, output=stdout, error=stderr, input=None,
                outfilter=lambda x: x):
-    print('*** runcommand', ' '.join(args))
-    sys.stdout.flush()
-    server.stdin.write('runcommand\n')
-    writeblock(server, '\0'.join(args))
+    bprint(b'*** runcommand', b' '.join(args))
+    stdout.flush()
+    server.stdin.write(b'runcommand\n')
+    writeblock(server, b'\0'.join(args))
 
     if not input:
         input = stringio()
 
     while True:
         ch, data = readchannel(server)
-        if ch == 'o':
+        if ch == b'o':
             output.write(outfilter(data))
             output.flush()
-        elif ch == 'e':
+        elif ch == b'e':
             error.write(data)
             error.flush()
-        elif ch == 'I':
+        elif ch == b'I':
             writeblock(server, input.read(data))
-        elif ch == 'L':
+        elif ch == b'L':
             writeblock(server, input.readline(data))
-        elif ch == 'r':
+        elif ch == b'r':
             ret, = struct.unpack('>i', data)
             if ret != 0:
-                print(' [%d]' % ret)
+                bprint(b' [%d]' % ret)
             return ret
         else:
-            print("unexpected channel %c: %r" % (ch, data))
+            bprint(b"unexpected channel %c: %r" % (ch, data))
             if ch.isupper():
                 return
 
 def check(func, connect=connectpipe):
-    sys.stdout.flush()
+    stdout.flush()
     server = connect()
     try:
         return func(server)
--- a/contrib/import-checker.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/import-checker.py	Mon Oct 22 14:46:06 2018 -0400
@@ -5,7 +5,6 @@
 import ast
 import collections
 import os
-import re
 import sys
 
 # Import a minimal set of stdlib modules needed for list_stdlib_modules()
@@ -18,6 +17,8 @@
         basehttpserver = None
     import zlib
 
+import testparseutil
+
 # Whitelist of modules that symbols can be directly imported from.
 allowsymbolimports = (
     '__future__',
@@ -28,6 +29,8 @@
     'mercurial.hgweb.request',
     'mercurial.i18n',
     'mercurial.node',
+    # for revlog to re-export constant to extensions
+    'mercurial.revlogutils.constants',
     # for cffi modules to re-export pure functions
     'mercurial.pure.base85',
     'mercurial.pure.bdiff',
@@ -36,6 +39,7 @@
     'mercurial.pure.parsers',
     # third-party imports should be directly imported
     'mercurial.thirdparty',
+    'mercurial.thirdparty.attr',
     'mercurial.thirdparty.cbor',
     'mercurial.thirdparty.cbor.cbor2',
     'mercurial.thirdparty.zope',
@@ -656,61 +660,21 @@
     ...   b'  > EOF',
     ... ]
     >>> test(b"example.t", lines)
-    example[2] doctest.py 2
-    "from __future__ import print_function\\n' multiline\\nstring'\\n"
-    example[7] foo.py 7
+    example[2] doctest.py 1
+    "from __future__ import print_function\\n' multiline\\nstring'\\n\\n"
+    example[8] foo.py 7
     'from __future__ import print_function\\n'
     """
-    inlinepython = 0
-    shpython = 0
-    script = []
-    prefix = 6
-    t = ''
-    n = 0
-    for l in src:
-        n += 1
-        if not l.endswith(b'\n'):
-            l += b'\n'
-        if l.startswith(b'  >>> '): # python inlines
-            if shpython:
-                print("%s:%d: Parse Error" % (f, n))
-            if not inlinepython:
-                # We've just entered a Python block.
-                inlinepython = n
-                t = b'doctest.py'
-            script.append(l[prefix:])
-            continue
-        if l.startswith(b'  ... '): # python inlines
-            script.append(l[prefix:])
-            continue
-        cat = re.search(br"\$ \s*cat\s*>\s*(\S+\.py)\s*<<\s*EOF", l)
-        if cat:
-            if inlinepython:
-                yield b''.join(script), (b"%s[%d]" %
-                       (modname, inlinepython)), t, inlinepython
-                script = []
-                inlinepython = 0
-            shpython = n
-            t = cat.group(1)
-            continue
-        if shpython and l.startswith(b'  > '): # sh continuation
-            if l == b'  > EOF\n':
-                yield b''.join(script), (b"%s[%d]" %
-                       (modname, shpython)), t, shpython
-                script = []
-                shpython = 0
-            else:
-                script.append(l[4:])
-            continue
-        # If we have an empty line or a command for sh, we end the
-        # inline script.
-        if inlinepython and (l == b'  \n'
-                             or l.startswith(b'  $ ')):
-            yield b''.join(script), (b"%s[%d]" %
-                   (modname, inlinepython)), t, inlinepython
-            script = []
-            inlinepython = 0
-            continue
+    errors = []
+    for name, starts, ends, code in testparseutil.pyembedded(f, src, errors):
+        if not name:
+            # use 'doctest.py', in order to make already existing
+            # doctest above pass instantly
+            name = 'doctest.py'
+        # "starts" is "line number" (1-origin), but embedded() is
+        # expected to return "line offset" (0-origin). Therefore, this
+        # yields "starts - 1".
+        yield code, "%s[%d]" % (modname, starts), name, starts - 1
 
 def sources(f, modname):
     """Yields possibly multiple sources from a filepath
--- a/contrib/packaging/Makefile	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/packaging/Makefile	Mon Oct 22 14:46:06 2018 -0400
@@ -120,8 +120,8 @@
 centos$(1):
 	mkdir -p $$(HGROOT)/packages/centos$(1)
 	./buildrpm $$(if $$(filter $(1),$$(CENTOS_WITH_PYTHON_RELEASES)),--withpython)
-	cp $$(HGROOT)/rpmbuild/RPMS/*/* $$(HGROOT)/packages/centos$(1)
-	cp $$(HGROOT)/rpmbuild/SRPMS/* $$(HGROOT)/packages/centos$(1)
+	cp $$(HGROOT)/contrib/packaging/rpmbuild/RPMS/*/* $$(HGROOT)/packages/centos$(1)
+	cp $$(HGROOT)/contrib/packaging/rpmbuild/SRPMS/* $$(HGROOT)/packages/centos$(1)
 
 .PHONY: docker-centos$(1)
 docker-centos$(1):
--- a/contrib/packaging/builddeb	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/packaging/builddeb	Mon Oct 22 14:46:06 2018 -0400
@@ -13,6 +13,13 @@
 DISTID=`(lsb_release -is 2> /dev/null | tr '[:upper:]' '[:lower:]') || echo debian`
 CODENAME=`lsb_release -cs 2> /dev/null || echo unknown`
 DEBFLAGS=-b
+
+cleanup() {
+    if [ "$CLEANUP" ]; then
+        rm -r "$ROOTDIR/debian";
+    fi
+}
+
 while [ "$1" ]; do
     case "$1" in
     --distid )
@@ -44,12 +51,14 @@
     esac
 done
 
-trap "if [ '$CLEANUP' ] ; then rm -r '$PWD/debian' ; fi" EXIT
+cd "$ROOTDIR"
+
+trap 'cleanup' EXIT
 
 set -u
 
 if [ ! -d .hg ]; then
-    echo 'You are not inside a Mercurial repository!' 1>&2
+    printf "You are inside %s, which is not the root of a Mercurial repository\n" $(pwd) 1>&2
     exit 1
 fi
 
@@ -71,7 +80,7 @@
 
 if [ "$BUILD" ]; then
     if [ -d debian ] ; then
-        echo "Error! debian control directory already exists!"
+        printf "Error! debian control directory already exists at %s/debian\n" $(pwd)
         exit 1
     fi
 
@@ -102,5 +111,5 @@
           -type f -newer $control -print0 2>/dev/null | \
       xargs -Inarf -0 mv narf "$OUTPUTDIR"
     echo "Built packages for $debver:"
-    find "$OUTPUTDIR" -type f -newer $control -name '*.deb'
+    find "$PWD"/"$OUTPUTDIR" -type f -newer $control -name '*.deb'
 fi
--- a/contrib/perf.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/perf.py	Mon Oct 22 14:46:06 2018 -0400
@@ -19,6 +19,7 @@
 #   Mercurial
 
 from __future__ import absolute_import
+import contextlib
 import functools
 import gc
 import os
@@ -64,12 +65,29 @@
     from mercurial import scmutil # since 1.9 (or 8b252e826c68)
 except ImportError:
     pass
+
+def identity(a):
+    return a
+
 try:
     from mercurial import pycompat
     getargspec = pycompat.getargspec  # added to module after 4.5
+    _byteskwargs = pycompat.byteskwargs  # since 4.1 (or fbc3f73dc802)
+    _sysstr = pycompat.sysstr         # since 4.0 (or 2219f4f82ede)
+    _xrange = pycompat.xrange         # since 4.8 (or 7eba8f83129b)
+    fsencode = pycompat.fsencode      # since 3.9 (or f4a5e0e86a7e)
+    if pycompat.ispy3:
+        _maxint = sys.maxsize  # per py3 docs for replacing maxint
+    else:
+        _maxint = sys.maxint
 except (ImportError, AttributeError):
     import inspect
     getargspec = inspect.getargspec
+    _byteskwargs = identity
+    fsencode = identity               # no py3 support
+    _maxint = sys.maxint              # no py3 support
+    _sysstr = lambda x: x             # no py3 support
+    _xrange = xrange
 
 try:
     # 4.7+
@@ -95,7 +113,7 @@
 # available since 1.9.3 (or 94b200a11cf7)
 _undefined = object()
 def safehasattr(thing, attr):
-    return getattr(thing, attr, _undefined) is not _undefined
+    return getattr(thing, _sysstr(attr), _undefined) is not _undefined
 setattr(util, 'safehasattr', safehasattr)
 
 # for "historical portability":
@@ -103,7 +121,7 @@
 # since ae5d60bb70c9
 if safehasattr(time, 'perf_counter'):
     util.timer = time.perf_counter
-elif os.name == 'nt':
+elif os.name == b'nt':
     util.timer = time.clock
 else:
     util.timer = time.time
@@ -123,9 +141,9 @@
 # since 1.9 (or a79fea6b3e77).
 revlogopts = getattr(cmdutil, "debugrevlogopts",
                      getattr(commands, "debugrevlogopts", [
-        ('c', 'changelog', False, ('open changelog')),
-        ('m', 'manifest', False, ('open manifest')),
-        ('', 'dir', False, ('open directory manifest')),
+        (b'c', b'changelog', False, (b'open changelog')),
+        (b'm', b'manifest', False, (b'open manifest')),
+        (b'', b'dir', False, (b'open directory manifest')),
         ]))
 
 cmdtable = {}
@@ -134,20 +152,20 @@
 # define parsealiases locally, because cmdutil.parsealiases has been
 # available since 1.5 (or 6252852b4332)
 def parsealiases(cmd):
-    return cmd.lstrip("^").split("|")
+    return cmd.split(b"|")
 
 if safehasattr(registrar, 'command'):
     command = registrar.command(cmdtable)
 elif safehasattr(cmdutil, 'command'):
     command = cmdutil.command(cmdtable)
-    if 'norepo' not in getargspec(command).args:
+    if b'norepo' not in getargspec(command).args:
         # for "historical portability":
         # wrap original cmdutil.command, because "norepo" option has
         # been available since 3.1 (or 75a96326cecb)
         _command = command
         def command(name, options=(), synopsis=None, norepo=False):
             if norepo:
-                commands.norepo += ' %s' % ' '.join(parsealiases(name))
+                commands.norepo += b' %s' % b' '.join(parsealiases(name))
             return _command(name, list(options), synopsis)
 else:
     # for "historical portability":
@@ -160,7 +178,7 @@
             else:
                 cmdtable[name] = func, list(options)
             if norepo:
-                commands.norepo += ' %s' % ' '.join(parsealiases(name))
+                commands.norepo += b' %s' % b' '.join(parsealiases(name))
             return func
         return decorator
 
@@ -169,23 +187,23 @@
     import mercurial.configitems
     configtable = {}
     configitem = mercurial.registrar.configitem(configtable)
-    configitem('perf', 'presleep',
+    configitem(b'perf', b'presleep',
         default=mercurial.configitems.dynamicdefault,
     )
-    configitem('perf', 'stub',
+    configitem(b'perf', b'stub',
         default=mercurial.configitems.dynamicdefault,
     )
-    configitem('perf', 'parentscount',
+    configitem(b'perf', b'parentscount',
         default=mercurial.configitems.dynamicdefault,
     )
-    configitem('perf', 'all-timing',
+    configitem(b'perf', b'all-timing',
         default=mercurial.configitems.dynamicdefault,
     )
 except (ImportError, AttributeError):
     pass
 
 def getlen(ui):
-    if ui.configbool("perf", "stub", False):
+    if ui.configbool(b"perf", b"stub", False):
         return lambda x: 1
     return len
 
@@ -197,14 +215,14 @@
 
     # enforce an idle period before execution to counteract power management
     # experimental config: perf.presleep
-    time.sleep(getint(ui, "perf", "presleep", 1))
+    time.sleep(getint(ui, b"perf", b"presleep", 1))
 
     if opts is None:
         opts = {}
     # redirect all to stderr unless buffer api is in use
     if not ui._buffers:
         ui = ui.copy()
-        uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
+        uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
         if uifout:
             # for "historical portability":
             # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
@@ -213,7 +231,7 @@
     # get a formatter
     uiformatter = getattr(ui, 'formatter', None)
     if uiformatter:
-        fm = uiformatter('perf', opts)
+        fm = uiformatter(b'perf', opts)
     else:
         # for "historical portability":
         # define formatter locally, because ui.formatter has been
@@ -244,66 +262,81 @@
                 self._ui.write(text, **opts)
             def end(self):
                 pass
-        fm = defaultformatter(ui, 'perf', opts)
+        fm = defaultformatter(ui, b'perf', opts)
 
     # stub function, runs code only once instead of in a loop
     # experimental config: perf.stub
-    if ui.configbool("perf", "stub", False):
+    if ui.configbool(b"perf", b"stub", False):
         return functools.partial(stub_timer, fm), fm
 
     # experimental config: perf.all-timing
-    displayall = ui.configbool("perf", "all-timing", False)
+    displayall = ui.configbool(b"perf", b"all-timing", False)
     return functools.partial(_timer, fm, displayall=displayall), fm
 
 def stub_timer(fm, func, title=None):
     func()
 
+@contextlib.contextmanager
+def timeone():
+    r = []
+    ostart = os.times()
+    cstart = util.timer()
+    yield r
+    cstop = util.timer()
+    ostop = os.times()
+    a, b = ostart, ostop
+    r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
+
 def _timer(fm, func, title=None, displayall=False):
     gc.collect()
     results = []
     begin = util.timer()
     count = 0
     while True:
-        ostart = os.times()
-        cstart = util.timer()
-        r = func()
+        with timeone() as item:
+            r = func()
+        count += 1
+        results.append(item[0])
         cstop = util.timer()
-        ostop = os.times()
-        count += 1
-        a, b = ostart, ostop
-        results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
         if cstop - begin > 3 and count >= 100:
             break
         if cstop - begin > 10 and count >= 3:
             break
 
+    formatone(fm, results, title=title, result=r,
+              displayall=displayall)
+
+def formatone(fm, timings, title=None, result=None, displayall=False):
+
+    count = len(timings)
+
     fm.startitem()
 
     if title:
-        fm.write('title', '! %s\n', title)
-    if r:
-        fm.write('result', '! result: %s\n', r)
+        fm.write(b'title', b'! %s\n', title)
+    if result:
+        fm.write(b'result', b'! result: %s\n', result)
     def display(role, entry):
-        prefix = ''
-        if role != 'best':
-            prefix = '%s.' % role
-        fm.plain('!')
-        fm.write(prefix + 'wall', ' wall %f', entry[0])
-        fm.write(prefix + 'comb', ' comb %f', entry[1] + entry[2])
-        fm.write(prefix + 'user', ' user %f', entry[1])
-        fm.write(prefix + 'sys',  ' sys %f', entry[2])
-        fm.write(prefix + 'count',  ' (%s of %d)', role, count)
-        fm.plain('\n')
-    results.sort()
-    min_val = results[0]
-    display('best', min_val)
+        prefix = b''
+        if role != b'best':
+            prefix = b'%s.' % role
+        fm.plain(b'!')
+        fm.write(prefix + b'wall', b' wall %f', entry[0])
+        fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
+        fm.write(prefix + b'user', b' user %f', entry[1])
+        fm.write(prefix + b'sys',  b' sys %f', entry[2])
+        fm.write(prefix + b'count',  b' (%s of %%d)' % role, count)
+        fm.plain(b'\n')
+    timings.sort()
+    min_val = timings[0]
+    display(b'best', min_val)
     if displayall:
-        max_val = results[-1]
-        display('max', max_val)
-        avg = tuple([sum(x) / count for x in zip(*results)])
-        display('avg', avg)
-        median = results[len(results) // 2]
-        display('median', median)
+        max_val = timings[-1]
+        display(b'max', max_val)
+        avg = tuple([sum(x) / count for x in zip(*timings)])
+        display(b'avg', avg)
+        median = timings[len(timings) // 2]
+        display(b'median', median)
 
 # utilities for historical portability
 
@@ -316,7 +349,7 @@
     try:
         return int(v)
     except ValueError:
-        raise error.ConfigError(("%s.%s is not an integer ('%s')")
+        raise error.ConfigError((b"%s.%s is not an integer ('%s')")
                                 % (section, name, v))
 
 def safeattrsetter(obj, name, ignoremissing=False):
@@ -337,15 +370,15 @@
     if not util.safehasattr(obj, name):
         if ignoremissing:
             return None
-        raise error.Abort(("missing attribute %s of %s might break assumption"
-                           " of performance measurement") % (name, obj))
+        raise error.Abort((b"missing attribute %s of %s might break assumption"
+                           b" of performance measurement") % (name, obj))
 
-    origvalue = getattr(obj, name)
+    origvalue = getattr(obj, _sysstr(name))
     class attrutil(object):
         def set(self, newvalue):
-            setattr(obj, name, newvalue)
+            setattr(obj, _sysstr(name), newvalue)
         def restore(self):
-            setattr(obj, name, origvalue)
+            setattr(obj, _sysstr(name), origvalue)
 
     return attrutil()
 
@@ -364,8 +397,8 @@
     # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
     # branchmap and repoview modules exist, but subsettable attribute
     # doesn't)
-    raise error.Abort(("perfbranchmap not available with this Mercurial"),
-                      hint="use 2.5 or later")
+    raise error.Abort((b"perfbranchmap not available with this Mercurial"),
+                      hint=b"use 2.5 or later")
 
 def getsvfs(repo):
     """Return appropriate object to access files under .hg/store
@@ -392,22 +425,22 @@
 def repocleartagscachefunc(repo):
     """Return the function to clear tags cache according to repo internal API
     """
-    if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
+    if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
         # in this case, setattr(repo, '_tagscache', None) or so isn't
         # correct way to clear tags cache, because existing code paths
         # expect _tagscache to be a structured object.
         def clearcache():
             # _tagscache has been filteredpropertycache since 2.5 (or
             # 98c867ac1330), and delattr() can't work in such case
-            if '_tagscache' in vars(repo):
-                del repo.__dict__['_tagscache']
+            if b'_tagscache' in vars(repo):
+                del repo.__dict__[b'_tagscache']
         return clearcache
 
-    repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
+    repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
     if repotags: # since 1.4 (or 5614a628d173)
         return lambda : repotags.set(None)
 
-    repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
+    repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
     if repotagscache: # since 0.6 (or d7df759d0e97)
         return lambda : repotagscache.set(None)
 
@@ -416,7 +449,7 @@
     # - repo.tags of such Mercurial isn't "callable", and repo.tags()
     #   in perftags() causes failure soon
     # - perf.py itself has been available since 1.1 (or eb240755386d)
-    raise error.Abort(("tags API of this hg command is unknown"))
+    raise error.Abort((b"tags API of this hg command is unknown"))
 
 # utilities to clear cache
 
@@ -428,56 +461,61 @@
 
 # perf commands
 
-@command('perfwalk', formatteropts)
+@command(b'perfwalk', formatteropts)
 def perfwalk(ui, repo, *pats, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     m = scmutil.match(repo[None], pats, {})
     timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
                                               ignored=False))))
     fm.end()
 
-@command('perfannotate', formatteropts)
+@command(b'perfannotate', formatteropts)
 def perfannotate(ui, repo, f, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    fc = repo['.'][f]
+    fc = repo[b'.'][f]
     timer(lambda: len(fc.annotate(True)))
     fm.end()
 
-@command('perfstatus',
-         [('u', 'unknown', False,
-           'ask status to look for unknown files')] + formatteropts)
+@command(b'perfstatus',
+         [(b'u', b'unknown', False,
+           b'ask status to look for unknown files')] + formatteropts)
 def perfstatus(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     #m = match.always(repo.root, repo.getcwd())
     #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
     #                                                False))))
     timer, fm = gettimer(ui, opts)
-    timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
+    timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
     fm.end()
 
-@command('perfaddremove', formatteropts)
+@command(b'perfaddremove', formatteropts)
 def perfaddremove(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     try:
         oldquiet = repo.ui.quiet
         repo.ui.quiet = True
         matcher = scmutil.match(repo[None])
-        opts['dry_run'] = True
-        timer(lambda: scmutil.addremove(repo, matcher, "", opts))
+        opts[b'dry_run'] = True
+        timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
     finally:
         repo.ui.quiet = oldquiet
         fm.end()
 
 def clearcaches(cl):
     # behave somewhat consistently across internal API changes
-    if util.safehasattr(cl, 'clearcaches'):
+    if util.safehasattr(cl, b'clearcaches'):
         cl.clearcaches()
-    elif util.safehasattr(cl, '_nodecache'):
+    elif util.safehasattr(cl, b'_nodecache'):
         from mercurial.node import nullid, nullrev
         cl._nodecache = {nullid: nullrev}
         cl._nodepos = None
 
-@command('perfheads', formatteropts)
+@command(b'perfheads', formatteropts)
 def perfheads(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     cl = repo.changelog
     def d():
@@ -486,23 +524,28 @@
     timer(d)
     fm.end()
 
-@command('perftags', formatteropts)
+@command(b'perftags', formatteropts)
 def perftags(ui, repo, **opts):
     import mercurial.changelog
     import mercurial.manifest
+
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     svfs = getsvfs(repo)
     repocleartagscache = repocleartagscachefunc(repo)
     def t():
         repo.changelog = mercurial.changelog.changelog(svfs)
-        repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
+        rootmanifest = mercurial.manifest.manifestrevlog(svfs)
+        repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
+                                                          rootmanifest)
         repocleartagscache()
         return len(repo.tags())
     timer(t)
     fm.end()
 
-@command('perfancestors', formatteropts)
+@command(b'perfancestors', formatteropts)
 def perfancestors(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     heads = repo.changelog.headrevs()
     def d():
@@ -511,8 +554,9 @@
     timer(d)
     fm.end()
 
-@command('perfancestorset', formatteropts)
+@command(b'perfancestorset', formatteropts)
 def perfancestorset(ui, repo, revset, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     revs = repo.revs(revset)
     heads = repo.changelog.headrevs()
@@ -523,17 +567,18 @@
     timer(d)
     fm.end()
 
-@command('perfbookmarks', formatteropts)
+@command(b'perfbookmarks', formatteropts)
 def perfbookmarks(ui, repo, **opts):
     """benchmark parsing bookmarks from disk to memory"""
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     def d():
-        clearfilecache(repo, '_bookmarks')
+        clearfilecache(repo, b'_bookmarks')
         repo._bookmarks
     timer(d)
     fm.end()
 
-@command('perfbundleread', formatteropts, 'BUNDLE')
+@command(b'perfbundleread', formatteropts, b'BUNDLE')
 def perfbundleread(ui, repo, bundlepath, **opts):
     """Benchmark reading of bundle files.
 
@@ -546,9 +591,11 @@
         streamclone,
     )
 
+    opts = _byteskwargs(opts)
+
     def makebench(fn):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 bundle = exchange.readbundle(ui, fh, bundlepath)
                 fn(bundle)
 
@@ -556,7 +603,7 @@
 
     def makereadnbytes(size):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 bundle = exchange.readbundle(ui, fh, bundlepath)
                 while bundle.read(size):
                     pass
@@ -565,7 +612,7 @@
 
     def makestdioread(size):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 while fh.read(size):
                     pass
 
@@ -601,7 +648,7 @@
 
     def makepartreadnbytes(size):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 bundle = exchange.readbundle(ui, fh, bundlepath)
                 for part in bundle.iterparts():
                     while part.read(size):
@@ -610,49 +657,49 @@
         return run
 
     benches = [
-        (makestdioread(8192), 'read(8k)'),
-        (makestdioread(16384), 'read(16k)'),
-        (makestdioread(32768), 'read(32k)'),
-        (makestdioread(131072), 'read(128k)'),
+        (makestdioread(8192), b'read(8k)'),
+        (makestdioread(16384), b'read(16k)'),
+        (makestdioread(32768), b'read(32k)'),
+        (makestdioread(131072), b'read(128k)'),
     ]
 
-    with open(bundlepath, 'rb') as fh:
+    with open(bundlepath, b'rb') as fh:
         bundle = exchange.readbundle(ui, fh, bundlepath)
 
         if isinstance(bundle, changegroup.cg1unpacker):
             benches.extend([
-                (makebench(deltaiter), 'cg1 deltaiter()'),
-                (makebench(iterchunks), 'cg1 getchunks()'),
-                (makereadnbytes(8192), 'cg1 read(8k)'),
-                (makereadnbytes(16384), 'cg1 read(16k)'),
-                (makereadnbytes(32768), 'cg1 read(32k)'),
-                (makereadnbytes(131072), 'cg1 read(128k)'),
+                (makebench(deltaiter), b'cg1 deltaiter()'),
+                (makebench(iterchunks), b'cg1 getchunks()'),
+                (makereadnbytes(8192), b'cg1 read(8k)'),
+                (makereadnbytes(16384), b'cg1 read(16k)'),
+                (makereadnbytes(32768), b'cg1 read(32k)'),
+                (makereadnbytes(131072), b'cg1 read(128k)'),
             ])
         elif isinstance(bundle, bundle2.unbundle20):
             benches.extend([
-                (makebench(forwardchunks), 'bundle2 forwardchunks()'),
-                (makebench(iterparts), 'bundle2 iterparts()'),
-                (makebench(iterpartsseekable), 'bundle2 iterparts() seekable'),
-                (makebench(seek), 'bundle2 part seek()'),
-                (makepartreadnbytes(8192), 'bundle2 part read(8k)'),
-                (makepartreadnbytes(16384), 'bundle2 part read(16k)'),
-                (makepartreadnbytes(32768), 'bundle2 part read(32k)'),
-                (makepartreadnbytes(131072), 'bundle2 part read(128k)'),
+                (makebench(forwardchunks), b'bundle2 forwardchunks()'),
+                (makebench(iterparts), b'bundle2 iterparts()'),
+                (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
+                (makebench(seek), b'bundle2 part seek()'),
+                (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
+                (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
+                (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
+                (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
             ])
         elif isinstance(bundle, streamclone.streamcloneapplier):
-            raise error.Abort('stream clone bundles not supported')
+            raise error.Abort(b'stream clone bundles not supported')
         else:
-            raise error.Abort('unhandled bundle type: %s' % type(bundle))
+            raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
 
     for fn, title in benches:
         timer, fm = gettimer(ui, opts)
         timer(fn, title=title)
         fm.end()
 
-@command('perfchangegroupchangelog', formatteropts +
-         [('', 'version', '02', 'changegroup version'),
-          ('r', 'rev', '', 'revisions to add to changegroup')])
-def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
+@command(b'perfchangegroupchangelog', formatteropts +
+         [(b'', b'version', b'02', b'changegroup version'),
+          (b'r', b'rev', b'', b'revisions to add to changegroup')])
+def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
     """Benchmark producing a changelog group for a changegroup.
 
     This measures the time spent processing the changelog during a
@@ -662,92 +709,99 @@
 
     By default, all revisions are added to the changegroup.
     """
+    opts = _byteskwargs(opts)
     cl = repo.changelog
-    revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
+    nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
     bundler = changegroup.getbundler(version, repo)
 
-    def lookup(node):
-        # The real bundler reads the revision in order to access the
-        # manifest node and files list. Do that here.
-        cl.read(node)
-        return node
-
     def d():
-        for chunk in bundler.group(revs, cl, lookup):
+        state, chunks = bundler._generatechangelog(cl, nodes)
+        for chunk in chunks:
             pass
 
     timer, fm = gettimer(ui, opts)
-    timer(d)
+
+    # Terminal printing can interfere with timing. So disable it.
+    with ui.configoverride({(b'progress', b'disable'): True}):
+        timer(d)
+
     fm.end()
 
-@command('perfdirs', formatteropts)
+@command(b'perfdirs', formatteropts)
 def perfdirs(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
-    'a' in dirstate
+    b'a' in dirstate
     def d():
-        dirstate.hasdir('a')
+        dirstate.hasdir(b'a')
         del dirstate._map._dirs
     timer(d)
     fm.end()
 
-@command('perfdirstate', formatteropts)
+@command(b'perfdirstate', formatteropts)
 def perfdirstate(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    "a" in repo.dirstate
+    b"a" in repo.dirstate
     def d():
         repo.dirstate.invalidate()
-        "a" in repo.dirstate
+        b"a" in repo.dirstate
     timer(d)
     fm.end()
 
-@command('perfdirstatedirs', formatteropts)
+@command(b'perfdirstatedirs', formatteropts)
 def perfdirstatedirs(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    "a" in repo.dirstate
+    b"a" in repo.dirstate
     def d():
-        repo.dirstate.hasdir("a")
+        repo.dirstate.hasdir(b"a")
         del repo.dirstate._map._dirs
     timer(d)
     fm.end()
 
-@command('perfdirstatefoldmap', formatteropts)
+@command(b'perfdirstatefoldmap', formatteropts)
 def perfdirstatefoldmap(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
-    'a' in dirstate
+    b'a' in dirstate
     def d():
-        dirstate._map.filefoldmap.get('a')
+        dirstate._map.filefoldmap.get(b'a')
         del dirstate._map.filefoldmap
     timer(d)
     fm.end()
 
-@command('perfdirfoldmap', formatteropts)
+@command(b'perfdirfoldmap', formatteropts)
 def perfdirfoldmap(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
-    'a' in dirstate
+    b'a' in dirstate
     def d():
-        dirstate._map.dirfoldmap.get('a')
+        dirstate._map.dirfoldmap.get(b'a')
         del dirstate._map.dirfoldmap
         del dirstate._map._dirs
     timer(d)
     fm.end()
 
-@command('perfdirstatewrite', formatteropts)
+@command(b'perfdirstatewrite', formatteropts)
 def perfdirstatewrite(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     ds = repo.dirstate
-    "a" in ds
+    b"a" in ds
     def d():
         ds._dirty = True
         ds.write(repo.currenttransaction())
     timer(d)
     fm.end()
 
-@command('perfmergecalculate',
-         [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
+@command(b'perfmergecalculate',
+         [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
 def perfmergecalculate(ui, repo, rev, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     wctx = repo[None]
     rctx = scmutil.revsingle(repo, rev, rev)
@@ -763,8 +817,9 @@
     timer(d)
     fm.end()
 
-@command('perfpathcopies', [], "REV REV")
+@command(b'perfpathcopies', [], b"REV REV")
 def perfpathcopies(ui, repo, rev1, rev2, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     ctx1 = scmutil.revsingle(repo, rev1, rev1)
     ctx2 = scmutil.revsingle(repo, rev2, rev2)
@@ -773,26 +828,27 @@
     timer(d)
     fm.end()
 
-@command('perfphases',
-         [('', 'full', False, 'include file reading time too'),
-         ], "")
+@command(b'perfphases',
+         [(b'', b'full', False, b'include file reading time too'),
+          ], b"")
 def perfphases(ui, repo, **opts):
     """benchmark phasesets computation"""
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     _phases = repo._phasecache
-    full = opts.get('full')
+    full = opts.get(b'full')
     def d():
         phases = _phases
         if full:
-            clearfilecache(repo, '_phasecache')
+            clearfilecache(repo, b'_phasecache')
             phases = repo._phasecache
         phases.invalidate()
         phases.loadphaserevs(repo)
     timer(d)
     fm.end()
 
-@command('perfphasesremote',
-         [], "[DEST]")
+@command(b'perfphasesremote',
+         [], b"[DEST]")
 def perfphasesremote(ui, repo, dest=None, **opts):
     """benchmark time needed to analyse phases of the remote server"""
     from mercurial.node import (
@@ -803,16 +859,17 @@
         hg,
         phases,
     )
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
 
-    path = ui.paths.getpath(dest, default=('default-push', 'default'))
+    path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
     if not path:
-        raise error.Abort(('default repository not configured!'),
-                         hint=("see 'hg help config.paths'"))
+        raise error.Abort((b'default repository not configured!'),
+                          hint=(b"see 'hg help config.paths'"))
     dest = path.pushloc or path.loc
-    branches = (path.branch, opts.get('branch') or [])
-    ui.status(('analysing phase of %s\n') % util.hidepassword(dest))
-    revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
+    branches = (path.branch, opts.get(b'branch') or [])
+    ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
+    revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
     other = hg.peer(repo, opts, dest)
 
     # easier to perform discovery through the operation
@@ -822,25 +879,25 @@
     remotesubset = op.fallbackheads
 
     with other.commandexecutor() as e:
-        remotephases = e.callcommand('listkeys',
-                       {'namespace': 'phases'}).result()
+        remotephases = e.callcommand(b'listkeys',
+                       {b'namespace': b'phases'}).result()
     del other
-    publishing = remotephases.get('publishing', False)
+    publishing = remotephases.get(b'publishing', False)
     if publishing:
-        ui.status(('publishing: yes\n'))
+        ui.status((b'publishing: yes\n'))
     else:
-        ui.status(('publishing: no\n'))
+        ui.status((b'publishing: no\n'))
 
     nodemap = repo.changelog.nodemap
     nonpublishroots = 0
     for nhex, phase in remotephases.iteritems():
-        if nhex == 'publishing': # ignore data related to publish option
+        if nhex == b'publishing': # ignore data related to publish option
             continue
         node = bin(nhex)
         if node in nodemap and int(phase):
             nonpublishroots += 1
-    ui.status(('number of roots: %d\n') % len(remotephases))
-    ui.status(('number of known non public roots: %d\n') % nonpublishroots)
+    ui.status((b'number of roots: %d\n') % len(remotephases))
+    ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
     def d():
         phases.remotephasessummary(repo,
                                    remotesubset,
@@ -848,23 +905,45 @@
     timer(d)
     fm.end()
 
-@command('perfmanifest', [], 'REV')
-def perfmanifest(ui, repo, rev, **opts):
+@command(b'perfmanifest',[
+            (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
+            (b'', b'clear-disk', False, b'clear on-disk caches too'),
+         ] + formatteropts, b'REV|NODE')
+def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
     """benchmark the time to read a manifest from disk and return a usable
     dict-like object
 
     Manifest caches are cleared before retrieval."""
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    ctx = scmutil.revsingle(repo, rev, rev)
-    t = ctx.manifestnode()
+    if not manifest_rev:
+        ctx = scmutil.revsingle(repo, rev, rev)
+        t = ctx.manifestnode()
+    else:
+        from mercurial.node import bin
+
+        if len(rev) == 40:
+            t = bin(rev)
+        else:
+            try:
+                rev = int(rev)
+
+                if util.safehasattr(repo.manifestlog, b'getstorage'):
+                    t = repo.manifestlog.getstorage(b'').node(rev)
+                else:
+                    t = repo.manifestlog._revlog.lookup(rev)
+            except ValueError:
+                raise error.Abort(b'manifest revision must be integer or full '
+                                  b'node')
     def d():
-        repo.manifestlog.clearcaches()
+        repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
         repo.manifestlog[t].read()
     timer(d)
     fm.end()
 
-@command('perfchangeset', formatteropts)
+@command(b'perfchangeset', formatteropts)
 def perfchangeset(ui, repo, rev, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     n = scmutil.revsingle(repo, rev).node()
     def d():
@@ -873,50 +952,54 @@
     timer(d)
     fm.end()
 
-@command('perfindex', formatteropts)
+@command(b'perfindex', formatteropts)
 def perfindex(ui, repo, **opts):
     import mercurial.revlog
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
-    n = repo["tip"].node()
+    n = repo[b"tip"].node()
     svfs = getsvfs(repo)
     def d():
-        cl = mercurial.revlog.revlog(svfs, "00changelog.i")
+        cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
         cl.rev(n)
     timer(d)
     fm.end()
 
-@command('perfstartup', formatteropts)
+@command(b'perfstartup', formatteropts)
 def perfstartup(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    cmd = sys.argv[0]
     def d():
-        if os.name != 'nt':
-            os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
+        if os.name != r'nt':
+            os.system(b"HGRCPATH= %s version -q > /dev/null" %
+                      fsencode(sys.argv[0]))
         else:
-            os.environ['HGRCPATH'] = ' '
-            os.system("%s version -q > NUL" % cmd)
+            os.environ[r'HGRCPATH'] = r' '
+            os.system(r"%s version -q > NUL" % sys.argv[0])
     timer(d)
     fm.end()
 
-@command('perfparents', formatteropts)
+@command(b'perfparents', formatteropts)
 def perfparents(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     # control the number of commits perfparents iterates over
     # experimental config: perf.parentscount
-    count = getint(ui, "perf", "parentscount", 1000)
+    count = getint(ui, b"perf", b"parentscount", 1000)
     if len(repo.changelog) < count:
-        raise error.Abort("repo needs %d commits for this test" % count)
+        raise error.Abort(b"repo needs %d commits for this test" % count)
     repo = repo.unfiltered()
-    nl = [repo.changelog.node(i) for i in xrange(count)]
+    nl = [repo.changelog.node(i) for i in _xrange(count)]
     def d():
         for n in nl:
             repo.changelog.parents(n)
     timer(d)
     fm.end()
 
-@command('perfctxfiles', formatteropts)
+@command(b'perfctxfiles', formatteropts)
 def perfctxfiles(ui, repo, x, **opts):
+    opts = _byteskwargs(opts)
     x = int(x)
     timer, fm = gettimer(ui, opts)
     def d():
@@ -924,8 +1007,9 @@
     timer(d)
     fm.end()
 
-@command('perfrawfiles', formatteropts)
+@command(b'perfrawfiles', formatteropts)
 def perfrawfiles(ui, repo, x, **opts):
+    opts = _byteskwargs(opts)
     x = int(x)
     timer, fm = gettimer(ui, opts)
     cl = repo.changelog
@@ -934,77 +1018,119 @@
     timer(d)
     fm.end()
 
-@command('perflookup', formatteropts)
+@command(b'perflookup', formatteropts)
 def perflookup(ui, repo, rev, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     timer(lambda: len(repo.lookup(rev)))
     fm.end()
 
-@command('perfrevrange', formatteropts)
+@command(b'perflinelogedits',
+         [(b'n', b'edits', 10000, b'number of edits'),
+          (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
+          ], norepo=True)
+def perflinelogedits(ui, **opts):
+    from mercurial import linelog
+
+    opts = _byteskwargs(opts)
+
+    edits = opts[b'edits']
+    maxhunklines = opts[b'max_hunk_lines']
+
+    maxb1 = 100000
+    random.seed(0)
+    randint = random.randint
+    currentlines = 0
+    arglist = []
+    for rev in _xrange(edits):
+        a1 = randint(0, currentlines)
+        a2 = randint(a1, min(currentlines, a1 + maxhunklines))
+        b1 = randint(0, maxb1)
+        b2 = randint(b1, b1 + maxhunklines)
+        currentlines += (b2 - b1) - (a2 - a1)
+        arglist.append((rev, a1, a2, b1, b2))
+
+    def d():
+        ll = linelog.linelog()
+        for args in arglist:
+            ll.replacelines(*args)
+
+    timer, fm = gettimer(ui, opts)
+    timer(d)
+    fm.end()
+
+@command(b'perfrevrange', formatteropts)
 def perfrevrange(ui, repo, *specs, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     revrange = scmutil.revrange
     timer(lambda: len(revrange(repo, specs)))
     fm.end()
 
-@command('perfnodelookup', formatteropts)
+@command(b'perfnodelookup', formatteropts)
 def perfnodelookup(ui, repo, rev, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     import mercurial.revlog
     mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
     n = scmutil.revsingle(repo, rev).node()
-    cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
+    cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
     def d():
         cl.rev(n)
         clearcaches(cl)
     timer(d)
     fm.end()
 
-@command('perflog',
-         [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
+@command(b'perflog',
+         [(b'', b'rename', False, b'ask log to follow renames')
+         ] + formatteropts)
 def perflog(ui, repo, rev=None, **opts):
+    opts = _byteskwargs(opts)
     if rev is None:
         rev=[]
     timer, fm = gettimer(ui, opts)
     ui.pushbuffer()
-    timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
-                               copies=opts.get('rename')))
+    timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
+                               copies=opts.get(b'rename')))
     ui.popbuffer()
     fm.end()
 
-@command('perfmoonwalk', formatteropts)
+@command(b'perfmoonwalk', formatteropts)
 def perfmoonwalk(ui, repo, **opts):
     """benchmark walking the changelog backwards
 
     This also loads the changelog data for each revision in the changelog.
     """
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     def moonwalk():
-        for i in xrange(len(repo), -1, -1):
+        for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
             ctx = repo[i]
             ctx.branch() # read changelog data (in addition to the index)
     timer(moonwalk)
     fm.end()
 
-@command('perftemplating',
-         [('r', 'rev', [], 'revisions to run the template on'),
-         ] + formatteropts)
+@command(b'perftemplating',
+         [(b'r', b'rev', [], b'revisions to run the template on'),
+          ] + formatteropts)
 def perftemplating(ui, repo, testedtemplate=None, **opts):
     """test the rendering time of a given template"""
     if makelogtemplater is None:
-        raise error.Abort(("perftemplating not available with this Mercurial"),
-                          hint="use 4.3 or later")
+        raise error.Abort((b"perftemplating not available with this Mercurial"),
+                          hint=b"use 4.3 or later")
+
+    opts = _byteskwargs(opts)
 
     nullui = ui.copy()
-    nullui.fout = open(os.devnull, 'wb')
+    nullui.fout = open(os.devnull, r'wb')
     nullui.disablepager()
-    revs = opts.get('rev')
+    revs = opts.get(b'rev')
     if not revs:
-        revs = ['all()']
+        revs = [b'all()']
     revs = list(scmutil.revrange(repo, revs))
 
-    defaulttemplate = ('{date|shortdate} [{rev}:{node|short}]'
-                       ' {author|person}: {desc|firstline}\n')
+    defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
+                       b' {author|person}: {desc|firstline}\n')
     if testedtemplate is None:
         testedtemplate = defaulttemplate
     displayer = makelogtemplater(nullui, repo, testedtemplate)
@@ -1018,14 +1144,16 @@
     timer(format)
     fm.end()
 
-@command('perfcca', formatteropts)
+@command(b'perfcca', formatteropts)
 def perfcca(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
     fm.end()
 
-@command('perffncacheload', formatteropts)
+@command(b'perffncacheload', formatteropts)
 def perffncacheload(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     s = repo.store
     def d():
@@ -1033,14 +1161,15 @@
     timer(d)
     fm.end()
 
-@command('perffncachewrite', formatteropts)
+@command(b'perffncachewrite', formatteropts)
 def perffncachewrite(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     s = repo.store
     lock = repo.lock()
     s.fncache._load()
-    tr = repo.transaction('perffncachewrite')
-    tr.addbackup('fncache')
+    tr = repo.transaction(b'perffncachewrite')
+    tr.addbackup(b'fncache')
     def d():
         s.fncache._dirty = True
         s.fncache.write(tr)
@@ -1049,8 +1178,9 @@
     lock.release()
     fm.end()
 
-@command('perffncacheencode', formatteropts)
+@command(b'perffncacheencode', formatteropts)
 def perffncacheencode(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     s = repo.store
     s.fncache._load()
@@ -1076,15 +1206,25 @@
         with ready:
             ready.wait()
 
-@command('perfbdiff', revlogopts + formatteropts + [
-    ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
-    ('', 'alldata', False, 'test bdiffs for all associated revisions'),
-    ('', 'threads', 0, 'number of thread to use (disable with 0)'),
-    ('', 'blocks', False, 'test computing diffs into blocks'),
-    ('', 'xdiff', False, 'use xdiff algorithm'),
+def _manifestrevision(repo, mnode):
+    ml = repo.manifestlog
+
+    if util.safehasattr(ml, b'getstorage'):
+        store = ml.getstorage(b'')
+    else:
+        store = ml._revlog
+
+    return store.revision(mnode)
+
+@command(b'perfbdiff', revlogopts + formatteropts + [
+    (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
+    (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
+    (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
+    (b'', b'blocks', False, b'test computing diffs into blocks'),
+    (b'', b'xdiff', False, b'use xdiff algorithm'),
     ],
 
-    '-c|-m|FILE REV')
+    b'-c|-m|FILE REV')
 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
     """benchmark a bdiff between revisions
 
@@ -1097,33 +1237,33 @@
     measure bdiffs for all changes related to that changeset (manifest
     and filelogs).
     """
-    opts = pycompat.byteskwargs(opts)
+    opts = _byteskwargs(opts)
 
-    if opts['xdiff'] and not opts['blocks']:
-        raise error.CommandError('perfbdiff', '--xdiff requires --blocks')
+    if opts[b'xdiff'] and not opts[b'blocks']:
+        raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
 
-    if opts['alldata']:
-        opts['changelog'] = True
+    if opts[b'alldata']:
+        opts[b'changelog'] = True
 
-    if opts.get('changelog') or opts.get('manifest'):
+    if opts.get(b'changelog') or opts.get(b'manifest'):
         file_, rev = None, file_
     elif rev is None:
-        raise error.CommandError('perfbdiff', 'invalid arguments')
+        raise error.CommandError(b'perfbdiff', b'invalid arguments')
 
-    blocks = opts['blocks']
-    xdiff = opts['xdiff']
+    blocks = opts[b'blocks']
+    xdiff = opts[b'xdiff']
     textpairs = []
 
-    r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
+    r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
 
     startrev = r.rev(r.lookup(rev))
     for rev in range(startrev, min(startrev + count, len(r) - 1)):
-        if opts['alldata']:
+        if opts[b'alldata']:
             # Load revisions associated with changeset.
             ctx = repo[rev]
-            mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
+            mtext = _manifestrevision(repo, ctx.manifestnode())
             for pctx in ctx.parents():
-                pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
+                pman = _manifestrevision(repo, pctx.manifestnode())
                 textpairs.append((pman, mtext))
 
             # Load filelog revisions by iterating manifest delta.
@@ -1150,18 +1290,18 @@
                     mdiff.textdiff(*pair)
     else:
         q = queue()
-        for i in xrange(threads):
+        for i in _xrange(threads):
             q.put(None)
         ready = threading.Condition()
         done = threading.Event()
-        for i in xrange(threads):
+        for i in _xrange(threads):
             threading.Thread(target=_bdiffworker,
                              args=(q, blocks, xdiff, ready, done)).start()
         q.join()
         def d():
             for pair in textpairs:
                 q.put(pair)
-            for i in xrange(threads):
+            for i in _xrange(threads):
                 q.put(None)
             with ready:
                 ready.notify_all()
@@ -1172,15 +1312,15 @@
 
     if withthreads:
         done.set()
-        for i in xrange(threads):
+        for i in _xrange(threads):
             q.put(None)
         with ready:
             ready.notify_all()
 
-@command('perfunidiff', revlogopts + formatteropts + [
-    ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
-    ('', 'alldata', False, 'test unidiffs for all associated revisions'),
-    ], '-c|-m|FILE REV')
+@command(b'perfunidiff', revlogopts + formatteropts + [
+    (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
+    (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
+    ], b'-c|-m|FILE REV')
 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
     """benchmark a unified diff between revisions
 
@@ -1196,26 +1336,27 @@
     measure diffs for all changes related to that changeset (manifest
     and filelogs).
     """
-    if opts['alldata']:
-        opts['changelog'] = True
+    opts = _byteskwargs(opts)
+    if opts[b'alldata']:
+        opts[b'changelog'] = True
 
-    if opts.get('changelog') or opts.get('manifest'):
+    if opts.get(b'changelog') or opts.get(b'manifest'):
         file_, rev = None, file_
     elif rev is None:
-        raise error.CommandError('perfunidiff', 'invalid arguments')
+        raise error.CommandError(b'perfunidiff', b'invalid arguments')
 
     textpairs = []
 
-    r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts)
+    r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
 
     startrev = r.rev(r.lookup(rev))
     for rev in range(startrev, min(startrev + count, len(r) - 1)):
-        if opts['alldata']:
+        if opts[b'alldata']:
             # Load revisions associated with changeset.
             ctx = repo[rev]
-            mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
+            mtext = _manifestrevision(repo, ctx.manifestnode())
             for pctx in ctx.parents():
-                pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
+                pman = _manifestrevision(repo, pctx.manifestnode())
                 textpairs.append((pman, mtext))
 
             # Load filelog revisions by iterating manifest delta.
@@ -1234,7 +1375,7 @@
         for left, right in textpairs:
             # The date strings don't matter, so we pass empty strings.
             headerlines, hunks = mdiff.unidiff(
-                left, '', right, '', 'left', 'right', binary=False)
+                left, b'', right, b'', b'left', b'right', binary=False)
             # consume iterators in roughly the way patch.py does
             b'\n'.join(headerlines)
             b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
@@ -1242,9 +1383,10 @@
     timer(d)
     fm.end()
 
-@command('perfdiffwd', formatteropts)
+@command(b'perfdiffwd', formatteropts)
 def perfdiffwd(ui, repo, **opts):
     """Profile diff of working directory changes"""
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     options = {
         'w': 'ignore_all_space',
@@ -1253,17 +1395,18 @@
         }
 
     for diffopt in ('', 'w', 'b', 'B', 'wB'):
-        opts = dict((options[c], '1') for c in diffopt)
+        opts = dict((options[c], b'1') for c in diffopt)
         def d():
             ui.pushbuffer()
             commands.diff(ui, repo, **opts)
             ui.popbuffer()
-        title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
+        diffopt = diffopt.encode('ascii')
+        title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
         timer(d, title)
     fm.end()
 
-@command('perfrevlogindex', revlogopts + formatteropts,
-         '-c|-m|FILE')
+@command(b'perfrevlogindex', revlogopts + formatteropts,
+         b'-c|-m|FILE')
 def perfrevlogindex(ui, repo, file_=None, **opts):
     """Benchmark operations against a revlog index.
 
@@ -1272,19 +1415,21 @@
     index data.
     """
 
-    rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts)
+    opts = _byteskwargs(opts)
+
+    rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
 
     opener = getattr(rl, 'opener')  # trick linter
     indexfile = rl.indexfile
     data = opener.read(indexfile)
 
-    header = struct.unpack('>I', data[0:4])[0]
+    header = struct.unpack(b'>I', data[0:4])[0]
     version = header & 0xFFFF
     if version == 1:
         revlogio = revlog.revlogio()
         inline = header & (1 << 16)
     else:
-        raise error.Abort(('unsupported revlog version: %d') % version)
+        raise error.Abort((b'unsupported revlog version: %d') % version)
 
     rllen = len(rl)
 
@@ -1344,33 +1489,33 @@
                     pass
 
     benches = [
-        (constructor, 'revlog constructor'),
-        (read, 'read'),
-        (parseindex, 'create index object'),
-        (lambda: getentry(0), 'retrieve index entry for rev 0'),
-        (lambda: resolvenode('a' * 20), 'look up missing node'),
-        (lambda: resolvenode(node0), 'look up node at rev 0'),
-        (lambda: resolvenode(node25), 'look up node at 1/4 len'),
-        (lambda: resolvenode(node50), 'look up node at 1/2 len'),
-        (lambda: resolvenode(node75), 'look up node at 3/4 len'),
-        (lambda: resolvenode(node100), 'look up node at tip'),
+        (constructor, b'revlog constructor'),
+        (read, b'read'),
+        (parseindex, b'create index object'),
+        (lambda: getentry(0), b'retrieve index entry for rev 0'),
+        (lambda: resolvenode(b'a' * 20), b'look up missing node'),
+        (lambda: resolvenode(node0), b'look up node at rev 0'),
+        (lambda: resolvenode(node25), b'look up node at 1/4 len'),
+        (lambda: resolvenode(node50), b'look up node at 1/2 len'),
+        (lambda: resolvenode(node75), b'look up node at 3/4 len'),
+        (lambda: resolvenode(node100), b'look up node at tip'),
         # 2x variation is to measure caching impact.
         (lambda: resolvenodes(allnodes),
-         'look up all nodes (forward)'),
+         b'look up all nodes (forward)'),
         (lambda: resolvenodes(allnodes, 2),
-         'look up all nodes 2x (forward)'),
+         b'look up all nodes 2x (forward)'),
         (lambda: resolvenodes(allnodesrev),
-         'look up all nodes (reverse)'),
+         b'look up all nodes (reverse)'),
         (lambda: resolvenodes(allnodesrev, 2),
-         'look up all nodes 2x (reverse)'),
+         b'look up all nodes 2x (reverse)'),
         (lambda: getentries(allrevs),
-         'retrieve all index entries (forward)'),
+         b'retrieve all index entries (forward)'),
         (lambda: getentries(allrevs, 2),
-         'retrieve all index entries 2x (forward)'),
+         b'retrieve all index entries 2x (forward)'),
         (lambda: getentries(allrevsrev),
-         'retrieve all index entries (reverse)'),
+         b'retrieve all index entries (reverse)'),
         (lambda: getentries(allrevsrev, 2),
-         'retrieve all index entries 2x (reverse)'),
+         b'retrieve all index entries 2x (reverse)'),
     ]
 
     for fn, title in benches:
@@ -1378,11 +1523,11 @@
         timer(fn, title=title)
         fm.end()
 
-@command('perfrevlogrevisions', revlogopts + formatteropts +
-         [('d', 'dist', 100, 'distance between the revisions'),
-          ('s', 'startrev', 0, 'revision to start reading at'),
-          ('', 'reverse', False, 'read in reverse')],
-         '-c|-m|FILE')
+@command(b'perfrevlogrevisions', revlogopts + formatteropts +
+         [(b'd', b'dist', 100, b'distance between the revisions'),
+          (b's', b'startrev', 0, b'revision to start reading at'),
+          (b'', b'reverse', False, b'read in reverse')],
+         b'-c|-m|FILE')
 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
                         **opts):
     """Benchmark reading a series of revisions from a revlog.
@@ -1392,21 +1537,26 @@
 
     The start revision can be defined via ``-s/--startrev``.
     """
-    rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts)
+    opts = _byteskwargs(opts)
+
+    rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
     rllen = getlen(ui)(rl)
 
+    if startrev < 0:
+        startrev = rllen + startrev
+
     def d():
         rl.clearcaches()
 
         beginrev = startrev
         endrev = rllen
-        dist = opts['dist']
+        dist = opts[b'dist']
 
         if reverse:
             beginrev, endrev = endrev, beginrev
             dist = -1 * dist
 
-        for x in xrange(beginrev, endrev, dist):
+        for x in _xrange(beginrev, endrev, dist):
             # Old revisions don't support passing int.
             n = rl.node(x)
             rl.revision(n)
@@ -1415,10 +1565,10 @@
     timer(d)
     fm.end()
 
-@command('perfrevlogchunks', revlogopts + formatteropts +
-         [('e', 'engines', '', 'compression engines to use'),
-          ('s', 'startrev', 0, 'revision to start at')],
-         '-c|-m|FILE')
+@command(b'perfrevlogchunks', revlogopts + formatteropts +
+         [(b'e', b'engines', b'', b'compression engines to use'),
+          (b's', b'startrev', 0, b'revision to start at')],
+         b'-c|-m|FILE')
 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
     """Benchmark operations on revlog chunks.
 
@@ -1431,7 +1581,9 @@
     For measurements of higher-level operations like resolving revisions,
     see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
     """
-    rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
+    opts = _byteskwargs(opts)
+
+    rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
 
     # _chunkraw was renamed to _getsegmentforrevs.
     try:
@@ -1441,19 +1593,19 @@
 
     # Verify engines argument.
     if engines:
-        engines = set(e.strip() for e in engines.split(','))
+        engines = set(e.strip() for e in engines.split(b','))
         for engine in engines:
             try:
                 util.compressionengines[engine]
             except KeyError:
-                raise error.Abort('unknown compression engine: %s' % engine)
+                raise error.Abort(b'unknown compression engine: %s' % engine)
     else:
         engines = []
         for e in util.compengines:
             engine = util.compengines[e]
             try:
                 if engine.available():
-                    engine.revlogcompressor().compress('dummy')
+                    engine.revlogcompressor().compress(b'dummy')
                     engines.append(e)
             except NotImplementedError:
                 pass
@@ -1513,27 +1665,27 @@
             rl._compressor = oldcompressor
 
     benches = [
-        (lambda: doread(), 'read'),
-        (lambda: doreadcachedfh(), 'read w/ reused fd'),
-        (lambda: doreadbatch(), 'read batch'),
-        (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
-        (lambda: dochunk(), 'chunk'),
-        (lambda: dochunkbatch(), 'chunk batch'),
+        (lambda: doread(), b'read'),
+        (lambda: doreadcachedfh(), b'read w/ reused fd'),
+        (lambda: doreadbatch(), b'read batch'),
+        (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
+        (lambda: dochunk(), b'chunk'),
+        (lambda: dochunkbatch(), b'chunk batch'),
     ]
 
     for engine in sorted(engines):
         compressor = util.compengines[engine].revlogcompressor()
         benches.append((functools.partial(docompress, compressor),
-                        'compress w/ %s' % engine))
+                        b'compress w/ %s' % engine))
 
     for fn, title in benches:
         timer, fm = gettimer(ui, opts)
         timer(fn, title=title)
         fm.end()
 
-@command('perfrevlogrevision', revlogopts + formatteropts +
-         [('', 'cache', False, 'use caches instead of clearing')],
-         '-c|-m|FILE REV')
+@command(b'perfrevlogrevision', revlogopts + formatteropts +
+         [(b'', b'cache', False, b'use caches instead of clearing')],
+         b'-c|-m|FILE REV')
 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
     """Benchmark obtaining a revlog revision.
 
@@ -1547,12 +1699,14 @@
 
     This command measures the time spent in each of these phases.
     """
-    if opts.get('changelog') or opts.get('manifest'):
+    opts = _byteskwargs(opts)
+
+    if opts.get(b'changelog') or opts.get(b'manifest'):
         file_, rev = None, file_
     elif rev is None:
-        raise error.CommandError('perfrevlogrevision', 'invalid arguments')
+        raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
 
-    r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
+    r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
 
     # _chunkraw was renamed to _getsegmentforrevs.
     try:
@@ -1622,18 +1776,18 @@
     data = segmentforrevs(chain[0], chain[-1])[1]
     rawchunks = getrawchunks(data, chain)
     bins = r._chunks(chain)
-    text = str(bins[0])
+    text = bytes(bins[0])
     bins = bins[1:]
     text = mdiff.patches(text, bins)
 
     benches = [
-        (lambda: dorevision(), 'full'),
-        (lambda: dodeltachain(rev), 'deltachain'),
-        (lambda: doread(chain), 'read'),
-        (lambda: dorawchunks(data, chain), 'rawchunks'),
-        (lambda: dodecompress(rawchunks), 'decompress'),
-        (lambda: dopatch(text, bins), 'patch'),
-        (lambda: dohash(text), 'hash'),
+        (lambda: dorevision(), b'full'),
+        (lambda: dodeltachain(rev), b'deltachain'),
+        (lambda: doread(chain), b'read'),
+        (lambda: dorawchunks(data, chain), b'rawchunks'),
+        (lambda: dodecompress(rawchunks), b'decompress'),
+        (lambda: dopatch(text, bins), b'patch'),
+        (lambda: dohash(text), b'hash'),
     ]
 
     for fn, title in benches:
@@ -1641,16 +1795,18 @@
         timer(fn, title=title)
         fm.end()
 
-@command('perfrevset',
-         [('C', 'clear', False, 'clear volatile cache between each call.'),
-          ('', 'contexts', False, 'obtain changectx for each revision')]
-         + formatteropts, "REVSET")
+@command(b'perfrevset',
+         [(b'C', b'clear', False, b'clear volatile cache between each call.'),
+          (b'', b'contexts', False, b'obtain changectx for each revision')]
+         + formatteropts, b"REVSET")
 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
     """benchmark the execution time of a revset
 
     Use the --clean option if need to evaluate the impact of build volatile
     revisions set cache on the revset execution. Volatile cache hold filtered
     and obsolete related cache."""
+    opts = _byteskwargs(opts)
+
     timer, fm = gettimer(ui, opts)
     def d():
         if clear:
@@ -1662,21 +1818,22 @@
     timer(d)
     fm.end()
 
-@command('perfvolatilesets',
-         [('', 'clear-obsstore', False, 'drop obsstore between each call.'),
-         ] + formatteropts)
+@command(b'perfvolatilesets',
+         [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
+          ] + formatteropts)
 def perfvolatilesets(ui, repo, *names, **opts):
     """benchmark the computation of various volatile set
 
     Volatile set computes element related to filtering and obsolescence."""
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     repo = repo.unfiltered()
 
     def getobs(name):
         def d():
             repo.invalidatevolatilesets()
-            if opts['clear_obsstore']:
-                clearfilecache(repo, 'obsstore')
+            if opts[b'clear_obsstore']:
+                clearfilecache(repo, b'obsstore')
             obsolete.getrevs(repo, name)
         return d
 
@@ -1690,8 +1847,8 @@
     def getfiltered(name):
         def d():
             repo.invalidatevolatilesets()
-            if opts['clear_obsstore']:
-                clearfilecache(repo, 'obsstore')
+            if opts[b'clear_obsstore']:
+                clearfilecache(repo, b'obsstore')
             repoview.filterrevs(repo, name)
         return d
 
@@ -1703,19 +1860,20 @@
         timer(getfiltered(name), title=name)
     fm.end()
 
-@command('perfbranchmap',
-         [('f', 'full', False,
-           'Includes build time of subset'),
-          ('', 'clear-revbranch', False,
-           'purge the revbranch cache between computation'),
-         ] + formatteropts)
+@command(b'perfbranchmap',
+         [(b'f', b'full', False,
+           b'Includes build time of subset'),
+          (b'', b'clear-revbranch', False,
+           b'purge the revbranch cache between computation'),
+          ] + formatteropts)
 def perfbranchmap(ui, repo, *filternames, **opts):
     """benchmark the update of a branchmap
 
     This benchmarks the full repo.branchmap() call with read and write disabled
     """
-    full = opts.get("full", False)
-    clear_revbranch = opts.get("clear_revbranch", False)
+    opts = _byteskwargs(opts)
+    full = opts.get(b"full", False)
+    clear_revbranch = opts.get(b"clear_revbranch", False)
     timer, fm = gettimer(ui, opts)
     def getbranchmap(filtername):
         """generate a benchmark function for the filtername"""
@@ -1744,7 +1902,7 @@
             if subset not in possiblefilters:
                 break
         else:
-            assert False, 'subset cycle %s!' % possiblefilters
+            assert False, b'subset cycle %s!' % possiblefilters
         allfilters.append(name)
         possiblefilters.remove(name)
 
@@ -1752,26 +1910,53 @@
     if not full:
         for name in allfilters:
             repo.filtered(name).branchmap()
-    if not filternames or 'unfiltered' in filternames:
+    if not filternames or b'unfiltered' in filternames:
         # add unfiltered
         allfilters.append(None)
 
-    branchcacheread = safeattrsetter(branchmap, 'read')
-    branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
+    branchcacheread = safeattrsetter(branchmap, b'read')
+    branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
     branchcacheread.set(lambda repo: None)
     branchcachewrite.set(lambda bc, repo: None)
     try:
         for name in allfilters:
             printname = name
             if name is None:
-                printname = 'unfiltered'
+                printname = b'unfiltered'
             timer(getbranchmap(name), title=str(printname))
     finally:
         branchcacheread.restore()
         branchcachewrite.restore()
     fm.end()
 
-@command('perfloadmarkers')
+@command(b'perfbranchmapload', [
+     (b'f', b'filter', b'', b'Specify repoview filter'),
+     (b'', b'list', False, b'List brachmap filter caches'),
+    ] + formatteropts)
+def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
+    """benchmark reading the branchmap"""
+    opts = _byteskwargs(opts)
+
+    if list:
+        for name, kind, st in repo.cachevfs.readdir(stat=True):
+            if name.startswith(b'branch2'):
+                filtername = name.partition(b'-')[2] or b'unfiltered'
+                ui.status(b'%s - %s\n'
+                          % (filtername, util.bytecount(st.st_size)))
+        return
+    if filter:
+        repo = repoview.repoview(repo, filter)
+    else:
+        repo = repo.unfiltered()
+    # try once without timer, the filter may not be cached
+    if branchmap.read(repo) is None:
+        raise error.Abort(b'No brachmap cached for %s repo'
+                          % (filter or b'unfiltered'))
+    timer, fm = gettimer(ui, opts)
+    timer(lambda: branchmap.read(repo) and None)
+    fm.end()
+
+@command(b'perfloadmarkers')
 def perfloadmarkers(ui, repo):
     """benchmark the time to parse the on-disk markers for a repo
 
@@ -1781,27 +1966,34 @@
     timer(lambda: len(obsolete.obsstore(svfs)))
     fm.end()
 
-@command('perflrucachedict', formatteropts +
-    [('', 'size', 4, 'size of cache'),
-     ('', 'gets', 10000, 'number of key lookups'),
-     ('', 'sets', 10000, 'number of key sets'),
-     ('', 'mixed', 10000, 'number of mixed mode operations'),
-     ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
+@command(b'perflrucachedict', formatteropts +
+    [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
+     (b'', b'mincost', 0, b'smallest cost of items in cache'),
+     (b'', b'maxcost', 100, b'maximum cost of items in cache'),
+     (b'', b'size', 4, b'size of cache'),
+     (b'', b'gets', 10000, b'number of key lookups'),
+     (b'', b'sets', 10000, b'number of key sets'),
+     (b'', b'mixed', 10000, b'number of mixed mode operations'),
+     (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
     norepo=True)
-def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
-                 mixedgetfreq=50, **opts):
+def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
+                 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
+    opts = _byteskwargs(opts)
+
     def doinit():
-        for i in xrange(10000):
+        for i in _xrange(10000):
             util.lrucachedict(size)
 
+    costrange = list(range(mincost, maxcost + 1))
+
     values = []
-    for i in xrange(size):
-        values.append(random.randint(0, sys.maxint))
+    for i in _xrange(size):
+        values.append(random.randint(0, _maxint))
 
     # Get mode fills the cache and tests raw lookup performance with no
     # eviction.
     getseq = []
-    for i in xrange(gets):
+    for i in _xrange(gets):
         getseq.append(random.choice(values))
 
     def dogets():
@@ -1812,10 +2004,33 @@
             value = d[key]
             value # silence pyflakes warning
 
+    def dogetscost():
+        d = util.lrucachedict(size, maxcost=costlimit)
+        for i, v in enumerate(values):
+            d.insert(v, v, cost=costs[i])
+        for key in getseq:
+            try:
+                value = d[key]
+                value # silence pyflakes warning
+            except KeyError:
+                pass
+
     # Set mode tests insertion speed with cache eviction.
     setseq = []
-    for i in xrange(sets):
-        setseq.append(random.randint(0, sys.maxint))
+    costs = []
+    for i in _xrange(sets):
+        setseq.append(random.randint(0, _maxint))
+        costs.append(random.choice(costrange))
+
+    def doinserts():
+        d = util.lrucachedict(size)
+        for v in setseq:
+            d.insert(v, v)
+
+    def doinsertscost():
+        d = util.lrucachedict(size, maxcost=costlimit)
+        for i, v in enumerate(setseq):
+            d.insert(v, v, cost=costs[i])
 
     def dosets():
         d = util.lrucachedict(size)
@@ -1824,19 +2039,21 @@
 
     # Mixed mode randomly performs gets and sets with eviction.
     mixedops = []
-    for i in xrange(mixed):
+    for i in _xrange(mixed):
         r = random.randint(0, 100)
         if r < mixedgetfreq:
             op = 0
         else:
             op = 1
 
-        mixedops.append((op, random.randint(0, size * 2)))
+        mixedops.append((op,
+                         random.randint(0, size * 2),
+                         random.choice(costrange)))
 
     def domixed():
         d = util.lrucachedict(size)
 
-        for op, v in mixedops:
+        for op, v, cost in mixedops:
             if op == 0:
                 try:
                     d[v]
@@ -1845,40 +2062,65 @@
             else:
                 d[v] = v
 
+    def domixedcost():
+        d = util.lrucachedict(size, maxcost=costlimit)
+
+        for op, v, cost in mixedops:
+            if op == 0:
+                try:
+                    d[v]
+                except KeyError:
+                    pass
+            else:
+                d.insert(v, v, cost=cost)
+
     benches = [
-        (doinit, 'init'),
-        (dogets, 'gets'),
-        (dosets, 'sets'),
-        (domixed, 'mixed')
+        (doinit, b'init'),
     ]
 
+    if costlimit:
+        benches.extend([
+            (dogetscost, b'gets w/ cost limit'),
+            (doinsertscost, b'inserts w/ cost limit'),
+            (domixedcost, b'mixed w/ cost limit'),
+        ])
+    else:
+        benches.extend([
+            (dogets, b'gets'),
+            (doinserts, b'inserts'),
+            (dosets, b'sets'),
+            (domixed, b'mixed')
+        ])
+
     for fn, title in benches:
         timer, fm = gettimer(ui, opts)
         timer(fn, title=title)
         fm.end()
 
-@command('perfwrite', formatteropts)
+@command(b'perfwrite', formatteropts)
 def perfwrite(ui, repo, **opts):
     """microbenchmark ui.write
     """
+    opts = _byteskwargs(opts)
+
     timer, fm = gettimer(ui, opts)
     def write():
         for i in range(100000):
-            ui.write(('Testing write performance\n'))
+            ui.write((b'Testing write performance\n'))
     timer(write)
     fm.end()
 
 def uisetup(ui):
-    if (util.safehasattr(cmdutil, 'openrevlog') and
-        not util.safehasattr(commands, 'debugrevlogopts')):
+    if (util.safehasattr(cmdutil, b'openrevlog') and
+        not util.safehasattr(commands, b'debugrevlogopts')):
         # for "historical portability":
         # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
         # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
         # openrevlog() should cause failure, because it has been
         # available since 3.5 (or 49c583ca48c4).
         def openrevlog(orig, repo, cmd, file_, opts):
-            if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
-                raise error.Abort("This version doesn't support --dir option",
-                                  hint="use 3.5 or later")
+            if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
+                raise error.Abort(b"This version doesn't support --dir option",
+                                  hint=b"use 3.5 or later")
             return orig(repo, cmd, file_, opts)
-        extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
+        extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
--- a/contrib/phabricator.py	Wed Oct 10 12:25:28 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,980 +0,0 @@
-# phabricator.py - simple Phabricator integration
-#
-# Copyright 2017 Facebook, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-"""simple Phabricator integration
-
-This extension provides a ``phabsend`` command which sends a stack of
-changesets to Phabricator, and a ``phabread`` command which prints a stack of
-revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
-to update statuses in batch.
-
-By default, Phabricator requires ``Test Plan`` which might prevent some
-changeset from being sent. The requirement could be disabled by changing
-``differential.require-test-plan-field`` config server side.
-
-Config::
-
-    [phabricator]
-    # Phabricator URL
-    url = https://phab.example.com/
-
-    # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
-    # callsign is "FOO".
-    callsign = FOO
-
-    # curl command to use. If not set (default), use builtin HTTP library to
-    # communicate. If set, use the specified curl command. This could be useful
-    # if you need to specify advanced options that is not easily supported by
-    # the internal library.
-    curlcmd = curl --connect-timeout 2 --retry 3 --silent
-
-    [auth]
-    example.schemes = https
-    example.prefix = phab.example.com
-
-    # API token. Get it from https://$HOST/conduit/login/
-    example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
-"""
-
-from __future__ import absolute_import
-
-import itertools
-import json
-import operator
-import re
-
-from mercurial.node import bin, nullid
-from mercurial.i18n import _
-from mercurial import (
-    cmdutil,
-    context,
-    encoding,
-    error,
-    httpconnection as httpconnectionmod,
-    mdiff,
-    obsutil,
-    parser,
-    patch,
-    registrar,
-    scmutil,
-    smartset,
-    tags,
-    url as urlmod,
-    util,
-)
-from mercurial.utils import (
-    procutil,
-    stringutil,
-)
-
-cmdtable = {}
-command = registrar.command(cmdtable)
-
-configtable = {}
-configitem = registrar.configitem(configtable)
-
-# developer config: phabricator.batchsize
-configitem(b'phabricator', b'batchsize',
-    default=12,
-)
-configitem(b'phabricator', b'callsign',
-    default=None,
-)
-configitem(b'phabricator', b'curlcmd',
-    default=None,
-)
-# developer config: phabricator.repophid
-configitem(b'phabricator', b'repophid',
-    default=None,
-)
-configitem(b'phabricator', b'url',
-    default=None,
-)
-configitem(b'phabsend', b'confirm',
-    default=False,
-)
-
-colortable = {
-    b'phabricator.action.created': b'green',
-    b'phabricator.action.skipped': b'magenta',
-    b'phabricator.action.updated': b'magenta',
-    b'phabricator.desc': b'',
-    b'phabricator.drev': b'bold',
-    b'phabricator.node': b'',
-}
-
-def urlencodenested(params):
-    """like urlencode, but works with nested parameters.
-
-    For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
-    flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
-    urlencode. Note: the encoding is consistent with PHP's http_build_query.
-    """
-    flatparams = util.sortdict()
-    def process(prefix, obj):
-        items = {list: enumerate, dict: lambda x: x.items()}.get(type(obj))
-        if items is None:
-            flatparams[prefix] = obj
-        else:
-            for k, v in items(obj):
-                if prefix:
-                    process(b'%s[%s]' % (prefix, k), v)
-                else:
-                    process(k, v)
-    process(b'', params)
-    return util.urlreq.urlencode(flatparams)
-
-printed_token_warning = False
-
-def readlegacytoken(repo, url):
-    """Transitional support for old phabricator tokens.
-
-    Remove before the 4.7 release.
-    """
-    groups = {}
-    for key, val in repo.ui.configitems(b'phabricator.auth'):
-        if b'.' not in key:
-            repo.ui.warn(_(b"ignoring invalid [phabricator.auth] key '%s'\n")
-                         % key)
-            continue
-        group, setting = key.rsplit(b'.', 1)
-        groups.setdefault(group, {})[setting] = val
-
-    token = None
-    for group, auth in groups.iteritems():
-        if url != auth.get(b'url'):
-            continue
-        token = auth.get(b'token')
-        if token:
-            break
-
-    global printed_token_warning
-
-    if token and not printed_token_warning:
-        printed_token_warning = True
-        repo.ui.warn(_(b'phabricator.auth.token is deprecated - please '
-                       b'migrate to auth.phabtoken.\n'))
-    return token
-
-def readurltoken(repo):
-    """return conduit url, token and make sure they exist
-
-    Currently read from [auth] config section. In the future, it might
-    make sense to read from .arcconfig and .arcrc as well.
-    """
-    url = repo.ui.config(b'phabricator', b'url')
-    if not url:
-        raise error.Abort(_(b'config %s.%s is required')
-                          % (b'phabricator', b'url'))
-
-    res = httpconnectionmod.readauthforuri(repo.ui, url, util.url(url).user)
-    token = None
-
-    if res:
-        group, auth = res
-
-        repo.ui.debug(b"using auth.%s.* for authentication\n" % group)
-
-        token = auth.get(b'phabtoken')
-
-    if not token:
-        token = readlegacytoken(repo, url)
-        if not token:
-            raise error.Abort(_(b'Can\'t find conduit token associated to %s')
-                              % (url,))
-
-    return url, token
-
-def callconduit(repo, name, params):
-    """call Conduit API, params is a dict. return json.loads result, or None"""
-    host, token = readurltoken(repo)
-    url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
-    repo.ui.debug(b'Conduit Call: %s %s\n' % (url, params))
-    params = params.copy()
-    params[b'api.token'] = token
-    data = urlencodenested(params)
-    curlcmd = repo.ui.config(b'phabricator', b'curlcmd')
-    if curlcmd:
-        sin, sout = procutil.popen2(b'%s -d @- %s'
-                                    % (curlcmd, procutil.shellquote(url)))
-        sin.write(data)
-        sin.close()
-        body = sout.read()
-    else:
-        urlopener = urlmod.opener(repo.ui, authinfo)
-        request = util.urlreq.request(url, data=data)
-        body = urlopener.open(request).read()
-    repo.ui.debug(b'Conduit Response: %s\n' % body)
-    parsed = json.loads(body)
-    if parsed.get(r'error_code'):
-        msg = (_(b'Conduit Error (%s): %s')
-               % (parsed[r'error_code'], parsed[r'error_info']))
-        raise error.Abort(msg)
-    return parsed[r'result']
-
-@command(b'debugcallconduit', [], _(b'METHOD'))
-def debugcallconduit(ui, repo, name):
-    """call Conduit API
-
-    Call parameters are read from stdin as a JSON blob. Result will be written
-    to stdout as a JSON blob.
-    """
-    params = json.loads(ui.fin.read())
-    result = callconduit(repo, name, params)
-    s = json.dumps(result, sort_keys=True, indent=2, separators=(b',', b': '))
-    ui.write(b'%s\n' % s)
-
-def getrepophid(repo):
-    """given callsign, return repository PHID or None"""
-    # developer config: phabricator.repophid
-    repophid = repo.ui.config(b'phabricator', b'repophid')
-    if repophid:
-        return repophid
-    callsign = repo.ui.config(b'phabricator', b'callsign')
-    if not callsign:
-        return None
-    query = callconduit(repo, b'diffusion.repository.search',
-                        {b'constraints': {b'callsigns': [callsign]}})
-    if len(query[r'data']) == 0:
-        return None
-    repophid = encoding.strtolocal(query[r'data'][0][r'phid'])
-    repo.ui.setconfig(b'phabricator', b'repophid', repophid)
-    return repophid
-
-_differentialrevisiontagre = re.compile(b'\AD([1-9][0-9]*)\Z')
-_differentialrevisiondescre = re.compile(
-    b'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M)
-
-def getoldnodedrevmap(repo, nodelist):
-    """find previous nodes that has been sent to Phabricator
-
-    return {node: (oldnode, Differential diff, Differential Revision ID)}
-    for node in nodelist with known previous sent versions, or associated
-    Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
-    be ``None``.
-
-    Examines commit messages like "Differential Revision:" to get the
-    association information.
-
-    If such commit message line is not found, examines all precursors and their
-    tags. Tags with format like "D1234" are considered a match and the node
-    with that tag, and the number after "D" (ex. 1234) will be returned.
-
-    The ``old node``, if not None, is guaranteed to be the last diff of
-    corresponding Differential Revision, and exist in the repo.
-    """
-    url, token = readurltoken(repo)
-    unfi = repo.unfiltered()
-    nodemap = unfi.changelog.nodemap
-
-    result = {} # {node: (oldnode?, lastdiff?, drev)}
-    toconfirm = {} # {node: (force, {precnode}, drev)}
-    for node in nodelist:
-        ctx = unfi[node]
-        # For tags like "D123", put them into "toconfirm" to verify later
-        precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
-        for n in precnodes:
-            if n in nodemap:
-                for tag in unfi.nodetags(n):
-                    m = _differentialrevisiontagre.match(tag)
-                    if m:
-                        toconfirm[node] = (0, set(precnodes), int(m.group(1)))
-                        continue
-
-        # Check commit message
-        m = _differentialrevisiondescre.search(ctx.description())
-        if m:
-            toconfirm[node] = (1, set(precnodes), int(m.group(b'id')))
-
-    # Double check if tags are genuine by collecting all old nodes from
-    # Phabricator, and expect precursors overlap with it.
-    if toconfirm:
-        drevs = [drev for force, precs, drev in toconfirm.values()]
-        alldiffs = callconduit(unfi, b'differential.querydiffs',
-                               {b'revisionIDs': drevs})
-        getnode = lambda d: bin(encoding.unitolocal(
-            getdiffmeta(d).get(r'node', b''))) or None
-        for newnode, (force, precset, drev) in toconfirm.items():
-            diffs = [d for d in alldiffs.values()
-                     if int(d[r'revisionID']) == drev]
-
-            # "precursors" as known by Phabricator
-            phprecset = set(getnode(d) for d in diffs)
-
-            # Ignore if precursors (Phabricator and local repo) do not overlap,
-            # and force is not set (when commit message says nothing)
-            if not force and not bool(phprecset & precset):
-                tagname = b'D%d' % drev
-                tags.tag(repo, tagname, nullid, message=None, user=None,
-                         date=None, local=True)
-                unfi.ui.warn(_(b'D%s: local tag removed - does not match '
-                               b'Differential history\n') % drev)
-                continue
-
-            # Find the last node using Phabricator metadata, and make sure it
-            # exists in the repo
-            oldnode = lastdiff = None
-            if diffs:
-                lastdiff = max(diffs, key=lambda d: int(d[r'id']))
-                oldnode = getnode(lastdiff)
-                if oldnode and oldnode not in nodemap:
-                    oldnode = None
-
-            result[newnode] = (oldnode, lastdiff, drev)
-
-    return result
-
-def getdiff(ctx, diffopts):
-    """plain-text diff without header (user, commit message, etc)"""
-    output = util.stringio()
-    for chunk, _label in patch.diffui(ctx.repo(), ctx.p1().node(), ctx.node(),
-                                      None, opts=diffopts):
-        output.write(chunk)
-    return output.getvalue()
-
-def creatediff(ctx):
-    """create a Differential Diff"""
-    repo = ctx.repo()
-    repophid = getrepophid(repo)
-    # Create a "Differential Diff" via "differential.createrawdiff" API
-    params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
-    if repophid:
-        params[b'repositoryPHID'] = repophid
-    diff = callconduit(repo, b'differential.createrawdiff', params)
-    if not diff:
-        raise error.Abort(_(b'cannot create diff for %s') % ctx)
-    return diff
-
-def writediffproperties(ctx, diff):
-    """write metadata to diff so patches could be applied losslessly"""
-    params = {
-        b'diff_id': diff[r'id'],
-        b'name': b'hg:meta',
-        b'data': json.dumps({
-            b'user': ctx.user(),
-            b'date': b'%d %d' % ctx.date(),
-            b'node': ctx.hex(),
-            b'parent': ctx.p1().hex(),
-        }),
-    }
-    callconduit(ctx.repo(), b'differential.setdiffproperty', params)
-
-    params = {
-        b'diff_id': diff[r'id'],
-        b'name': b'local:commits',
-        b'data': json.dumps({
-            ctx.hex(): {
-                b'author': stringutil.person(ctx.user()),
-                b'authorEmail': stringutil.email(ctx.user()),
-                b'time': ctx.date()[0],
-            },
-        }),
-    }
-    callconduit(ctx.repo(), b'differential.setdiffproperty', params)
-
-def createdifferentialrevision(ctx, revid=None, parentrevid=None, oldnode=None,
-                               olddiff=None, actions=None):
-    """create or update a Differential Revision
-
-    If revid is None, create a new Differential Revision, otherwise update
-    revid. If parentrevid is not None, set it as a dependency.
-
-    If oldnode is not None, check if the patch content (without commit message
-    and metadata) has changed before creating another diff.
-
-    If actions is not None, they will be appended to the transaction.
-    """
-    repo = ctx.repo()
-    if oldnode:
-        diffopts = mdiff.diffopts(git=True, context=32767)
-        oldctx = repo.unfiltered()[oldnode]
-        neednewdiff = (getdiff(ctx, diffopts) != getdiff(oldctx, diffopts))
-    else:
-        neednewdiff = True
-
-    transactions = []
-    if neednewdiff:
-        diff = creatediff(ctx)
-        transactions.append({b'type': b'update', b'value': diff[r'phid']})
-    else:
-        # Even if we don't need to upload a new diff because the patch content
-        # does not change. We might still need to update its metadata so
-        # pushers could know the correct node metadata.
-        assert olddiff
-        diff = olddiff
-    writediffproperties(ctx, diff)
-
-    # Use a temporary summary to set dependency. There might be better ways but
-    # I cannot find them for now. But do not do that if we are updating an
-    # existing revision (revid is not None) since that introduces visible
-    # churns (someone edited "Summary" twice) on the web page.
-    if parentrevid and revid is None:
-        summary = b'Depends on D%s' % parentrevid
-        transactions += [{b'type': b'summary', b'value': summary},
-                         {b'type': b'summary', b'value': b' '}]
-
-    if actions:
-        transactions += actions
-
-    # Parse commit message and update related fields.
-    desc = ctx.description()
-    info = callconduit(repo, b'differential.parsecommitmessage',
-                       {b'corpus': desc})
-    for k, v in info[r'fields'].items():
-        if k in [b'title', b'summary', b'testPlan']:
-            transactions.append({b'type': k, b'value': v})
-
-    params = {b'transactions': transactions}
-    if revid is not None:
-        # Update an existing Differential Revision
-        params[b'objectIdentifier'] = revid
-
-    revision = callconduit(repo, b'differential.revision.edit', params)
-    if not revision:
-        raise error.Abort(_(b'cannot create revision for %s') % ctx)
-
-    return revision, diff
-
-def userphids(repo, names):
-    """convert user names to PHIDs"""
-    query = {b'constraints': {b'usernames': names}}
-    result = callconduit(repo, b'user.search', query)
-    # username not found is not an error of the API. So check if we have missed
-    # some names here.
-    data = result[r'data']
-    resolved = set(entry[r'fields'][r'username'] for entry in data)
-    unresolved = set(names) - resolved
-    if unresolved:
-        raise error.Abort(_(b'unknown username: %s')
-                          % b' '.join(sorted(unresolved)))
-    return [entry[r'phid'] for entry in data]
-
-@command(b'phabsend',
-         [(b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
-          (b'', b'amend', True, _(b'update commit messages')),
-          (b'', b'reviewer', [], _(b'specify reviewers')),
-          (b'', b'confirm', None, _(b'ask for confirmation before sending'))],
-         _(b'REV [OPTIONS]'))
-def phabsend(ui, repo, *revs, **opts):
-    """upload changesets to Phabricator
-
-    If there are multiple revisions specified, they will be send as a stack
-    with a linear dependencies relationship using the order specified by the
-    revset.
-
-    For the first time uploading changesets, local tags will be created to
-    maintain the association. After the first time, phabsend will check
-    obsstore and tags information so it can figure out whether to update an
-    existing Differential Revision, or create a new one.
-
-    If --amend is set, update commit messages so they have the
-    ``Differential Revision`` URL, remove related tags. This is similar to what
-    arcanist will do, and is more desired in author-push workflows. Otherwise,
-    use local tags to record the ``Differential Revision`` association.
-
-    The --confirm option lets you confirm changesets before sending them. You
-    can also add following to your configuration file to make it default
-    behaviour::
-
-        [phabsend]
-        confirm = true
-
-    phabsend will check obsstore and the above association to decide whether to
-    update an existing Differential Revision, or create a new one.
-    """
-    revs = list(revs) + opts.get(b'rev', [])
-    revs = scmutil.revrange(repo, revs)
-
-    if not revs:
-        raise error.Abort(_(b'phabsend requires at least one changeset'))
-    if opts.get(b'amend'):
-        cmdutil.checkunfinished(repo)
-
-    # {newnode: (oldnode, olddiff, olddrev}
-    oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
-
-    confirm = ui.configbool(b'phabsend', b'confirm')
-    confirm |= bool(opts.get(b'confirm'))
-    if confirm:
-        confirmed = _confirmbeforesend(repo, revs, oldmap)
-        if not confirmed:
-            raise error.Abort(_(b'phabsend cancelled'))
-
-    actions = []
-    reviewers = opts.get(b'reviewer', [])
-    if reviewers:
-        phids = userphids(repo, reviewers)
-        actions.append({b'type': b'reviewers.add', b'value': phids})
-
-    drevids = [] # [int]
-    diffmap = {} # {newnode: diff}
-
-    # Send patches one by one so we know their Differential Revision IDs and
-    # can provide dependency relationship
-    lastrevid = None
-    for rev in revs:
-        ui.debug(b'sending rev %d\n' % rev)
-        ctx = repo[rev]
-
-        # Get Differential Revision ID
-        oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
-        if oldnode != ctx.node() or opts.get(b'amend'):
-            # Create or update Differential Revision
-            revision, diff = createdifferentialrevision(
-                ctx, revid, lastrevid, oldnode, olddiff, actions)
-            diffmap[ctx.node()] = diff
-            newrevid = int(revision[r'object'][r'id'])
-            if revid:
-                action = b'updated'
-            else:
-                action = b'created'
-
-            # Create a local tag to note the association, if commit message
-            # does not have it already
-            m = _differentialrevisiondescre.search(ctx.description())
-            if not m or int(m.group(b'id')) != newrevid:
-                tagname = b'D%d' % newrevid
-                tags.tag(repo, tagname, ctx.node(), message=None, user=None,
-                         date=None, local=True)
-        else:
-            # Nothing changed. But still set "newrevid" so the next revision
-            # could depend on this one.
-            newrevid = revid
-            action = b'skipped'
-
-        actiondesc = ui.label(
-            {b'created': _(b'created'),
-             b'skipped': _(b'skipped'),
-             b'updated': _(b'updated')}[action],
-            b'phabricator.action.%s' % action)
-        drevdesc = ui.label(b'D%s' % newrevid, b'phabricator.drev')
-        nodedesc = ui.label(bytes(ctx), b'phabricator.node')
-        desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc')
-        ui.write(_(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc,
-                                             desc))
-        drevids.append(newrevid)
-        lastrevid = newrevid
-
-    # Update commit messages and remove tags
-    if opts.get(b'amend'):
-        unfi = repo.unfiltered()
-        drevs = callconduit(repo, b'differential.query', {b'ids': drevids})
-        with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
-            wnode = unfi[b'.'].node()
-            mapping = {} # {oldnode: [newnode]}
-            for i, rev in enumerate(revs):
-                old = unfi[rev]
-                drevid = drevids[i]
-                drev = [d for d in drevs if int(d[r'id']) == drevid][0]
-                newdesc = getdescfromdrev(drev)
-                # Make sure commit message contain "Differential Revision"
-                if old.description() != newdesc:
-                    parents = [
-                        mapping.get(old.p1().node(), (old.p1(),))[0],
-                        mapping.get(old.p2().node(), (old.p2(),))[0],
-                    ]
-                    new = context.metadataonlyctx(
-                        repo, old, parents=parents, text=newdesc,
-                        user=old.user(), date=old.date(), extra=old.extra())
-
-                    newnode = new.commit()
-
-                    mapping[old.node()] = [newnode]
-                    # Update diff property
-                    writediffproperties(unfi[newnode], diffmap[old.node()])
-                # Remove local tags since it's no longer necessary
-                tagname = b'D%d' % drevid
-                if tagname in repo.tags():
-                    tags.tag(repo, tagname, nullid, message=None, user=None,
-                             date=None, local=True)
-            scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
-            if wnode in mapping:
-                unfi.setparents(mapping[wnode][0])
-
-# Map from "hg:meta" keys to header understood by "hg import". The order is
-# consistent with "hg export" output.
-_metanamemap = util.sortdict([(r'user', b'User'), (r'date', b'Date'),
-                              (r'node', b'Node ID'), (r'parent', b'Parent ')])
-
-def _confirmbeforesend(repo, revs, oldmap):
-    url, token = readurltoken(repo)
-    ui = repo.ui
-    for rev in revs:
-        ctx = repo[rev]
-        desc = ctx.description().splitlines()[0]
-        oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
-        if drevid:
-            drevdesc = ui.label(b'D%s' % drevid, b'phabricator.drev')
-        else:
-            drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
-
-        ui.write(_(b'%s - %s: %s\n')
-                 % (drevdesc,
-                    ui.label(bytes(ctx), b'phabricator.node'),
-                    ui.label(desc, b'phabricator.desc')))
-
-    if ui.promptchoice(_(b'Send the above changes to %s (yn)?'
-                         b'$$ &Yes $$ &No') % url):
-        return False
-
-    return True
-
-_knownstatusnames = {b'accepted', b'needsreview', b'needsrevision', b'closed',
-                     b'abandoned'}
-
-def _getstatusname(drev):
-    """get normalized status name from a Differential Revision"""
-    return drev[r'statusName'].replace(b' ', b'').lower()
-
-# Small language to specify differential revisions. Support symbols: (), :X,
-# +, and -.
-
-_elements = {
-    # token-type: binding-strength, primary, prefix, infix, suffix
-    b'(':      (12, None, (b'group', 1, b')'), None, None),
-    b':':      (8, None, (b'ancestors', 8), None, None),
-    b'&':      (5,  None, None, (b'and_', 5), None),
-    b'+':      (4,  None, None, (b'add', 4), None),
-    b'-':      (4,  None, None, (b'sub', 4), None),
-    b')':      (0,  None, None, None, None),
-    b'symbol': (0, b'symbol', None, None, None),
-    b'end':    (0, None, None, None, None),
-}
-
-def _tokenize(text):
-    view = memoryview(text) # zero-copy slice
-    special = b'():+-& '
-    pos = 0
-    length = len(text)
-    while pos < length:
-        symbol = b''.join(itertools.takewhile(lambda ch: ch not in special,
-                                              view[pos:]))
-        if symbol:
-            yield (b'symbol', symbol, pos)
-            pos += len(symbol)
-        else: # special char, ignore space
-            if text[pos] != b' ':
-                yield (text[pos], None, pos)
-            pos += 1
-    yield (b'end', None, pos)
-
-def _parse(text):
-    tree, pos = parser.parser(_elements).parse(_tokenize(text))
-    if pos != len(text):
-        raise error.ParseError(b'invalid token', pos)
-    return tree
-
-def _parsedrev(symbol):
-    """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
-    if symbol.startswith(b'D') and symbol[1:].isdigit():
-        return int(symbol[1:])
-    if symbol.isdigit():
-        return int(symbol)
-
-def _prefetchdrevs(tree):
-    """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
-    drevs = set()
-    ancestordrevs = set()
-    op = tree[0]
-    if op == b'symbol':
-        r = _parsedrev(tree[1])
-        if r:
-            drevs.add(r)
-    elif op == b'ancestors':
-        r, a = _prefetchdrevs(tree[1])
-        drevs.update(r)
-        ancestordrevs.update(r)
-        ancestordrevs.update(a)
-    else:
-        for t in tree[1:]:
-            r, a = _prefetchdrevs(t)
-            drevs.update(r)
-            ancestordrevs.update(a)
-    return drevs, ancestordrevs
-
-def querydrev(repo, spec):
-    """return a list of "Differential Revision" dicts
-
-    spec is a string using a simple query language, see docstring in phabread
-    for details.
-
-    A "Differential Revision dict" looks like:
-
-        {
-            "id": "2",
-            "phid": "PHID-DREV-672qvysjcczopag46qty",
-            "title": "example",
-            "uri": "https://phab.example.com/D2",
-            "dateCreated": "1499181406",
-            "dateModified": "1499182103",
-            "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
-            "status": "0",
-            "statusName": "Needs Review",
-            "properties": [],
-            "branch": null,
-            "summary": "",
-            "testPlan": "",
-            "lineCount": "2",
-            "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
-            "diffs": [
-              "3",
-              "4",
-            ],
-            "commits": [],
-            "reviewers": [],
-            "ccs": [],
-            "hashes": [],
-            "auxiliary": {
-              "phabricator:projects": [],
-              "phabricator:depends-on": [
-                "PHID-DREV-gbapp366kutjebt7agcd"
-              ]
-            },
-            "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
-            "sourcePath": null
-        }
-    """
-    def fetch(params):
-        """params -> single drev or None"""
-        key = (params.get(r'ids') or params.get(r'phids') or [None])[0]
-        if key in prefetched:
-            return prefetched[key]
-        drevs = callconduit(repo, b'differential.query', params)
-        # Fill prefetched with the result
-        for drev in drevs:
-            prefetched[drev[r'phid']] = drev
-            prefetched[int(drev[r'id'])] = drev
-        if key not in prefetched:
-            raise error.Abort(_(b'cannot get Differential Revision %r')
-                              % params)
-        return prefetched[key]
-
-    def getstack(topdrevids):
-        """given a top, get a stack from the bottom, [id] -> [id]"""
-        visited = set()
-        result = []
-        queue = [{r'ids': [i]} for i in topdrevids]
-        while queue:
-            params = queue.pop()
-            drev = fetch(params)
-            if drev[r'id'] in visited:
-                continue
-            visited.add(drev[r'id'])
-            result.append(int(drev[r'id']))
-            auxiliary = drev.get(r'auxiliary', {})
-            depends = auxiliary.get(r'phabricator:depends-on', [])
-            for phid in depends:
-                queue.append({b'phids': [phid]})
-        result.reverse()
-        return smartset.baseset(result)
-
-    # Initialize prefetch cache
-    prefetched = {} # {id or phid: drev}
-
-    tree = _parse(spec)
-    drevs, ancestordrevs = _prefetchdrevs(tree)
-
-    # developer config: phabricator.batchsize
-    batchsize = repo.ui.configint(b'phabricator', b'batchsize')
-
-    # Prefetch Differential Revisions in batch
-    tofetch = set(drevs)
-    for r in ancestordrevs:
-        tofetch.update(range(max(1, r - batchsize), r + 1))
-    if drevs:
-        fetch({r'ids': list(tofetch)})
-    validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
-
-    # Walk through the tree, return smartsets
-    def walk(tree):
-        op = tree[0]
-        if op == b'symbol':
-            drev = _parsedrev(tree[1])
-            if drev:
-                return smartset.baseset([drev])
-            elif tree[1] in _knownstatusnames:
-                drevs = [r for r in validids
-                         if _getstatusname(prefetched[r]) == tree[1]]
-                return smartset.baseset(drevs)
-            else:
-                raise error.Abort(_(b'unknown symbol: %s') % tree[1])
-        elif op in {b'and_', b'add', b'sub'}:
-            assert len(tree) == 3
-            return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
-        elif op == b'group':
-            return walk(tree[1])
-        elif op == b'ancestors':
-            return getstack(walk(tree[1]))
-        else:
-            raise error.ProgrammingError(b'illegal tree: %r' % tree)
-
-    return [prefetched[r] for r in walk(tree)]
-
-def getdescfromdrev(drev):
-    """get description (commit message) from "Differential Revision"
-
-    This is similar to differential.getcommitmessage API. But we only care
-    about limited fields: title, summary, test plan, and URL.
-    """
-    title = drev[r'title']
-    summary = drev[r'summary'].rstrip()
-    testplan = drev[r'testPlan'].rstrip()
-    if testplan:
-        testplan = b'Test Plan:\n%s' % testplan
-    uri = b'Differential Revision: %s' % drev[r'uri']
-    return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
-
-def getdiffmeta(diff):
-    """get commit metadata (date, node, user, p1) from a diff object
-
-    The metadata could be "hg:meta", sent by phabsend, like:
-
-        "properties": {
-          "hg:meta": {
-            "date": "1499571514 25200",
-            "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
-            "user": "Foo Bar <foo@example.com>",
-            "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
-          }
-        }
-
-    Or converted from "local:commits", sent by "arc", like:
-
-        "properties": {
-          "local:commits": {
-            "98c08acae292b2faf60a279b4189beb6cff1414d": {
-              "author": "Foo Bar",
-              "time": 1499546314,
-              "branch": "default",
-              "tag": "",
-              "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
-              "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
-              "local": "1000",
-              "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
-              "summary": "...",
-              "message": "...",
-              "authorEmail": "foo@example.com"
-            }
-          }
-        }
-
-    Note: metadata extracted from "local:commits" will lose time zone
-    information.
-    """
-    props = diff.get(r'properties') or {}
-    meta = props.get(r'hg:meta')
-    if not meta and props.get(r'local:commits'):
-        commit = sorted(props[r'local:commits'].values())[0]
-        meta = {
-            r'date': r'%d 0' % commit[r'time'],
-            r'node': commit[r'rev'],
-            r'user': r'%s <%s>' % (commit[r'author'], commit[r'authorEmail']),
-        }
-        if len(commit.get(r'parents', ())) >= 1:
-            meta[r'parent'] = commit[r'parents'][0]
-    return meta or {}
-
-def readpatch(repo, drevs, write):
-    """generate plain-text patch readable by 'hg import'
-
-    write is usually ui.write. drevs is what "querydrev" returns, results of
-    "differential.query".
-    """
-    # Prefetch hg:meta property for all diffs
-    diffids = sorted(set(max(int(v) for v in drev[r'diffs']) for drev in drevs))
-    diffs = callconduit(repo, b'differential.querydiffs', {b'ids': diffids})
-
-    # Generate patch for each drev
-    for drev in drevs:
-        repo.ui.note(_(b'reading D%s\n') % drev[r'id'])
-
-        diffid = max(int(v) for v in drev[r'diffs'])
-        body = callconduit(repo, b'differential.getrawdiff',
-                           {b'diffID': diffid})
-        desc = getdescfromdrev(drev)
-        header = b'# HG changeset patch\n'
-
-        # Try to preserve metadata from hg:meta property. Write hg patch
-        # headers that can be read by the "import" command. See patchheadermap
-        # and extract in mercurial/patch.py for supported headers.
-        meta = getdiffmeta(diffs[str(diffid)])
-        for k in _metanamemap.keys():
-            if k in meta:
-                header += b'# %s %s\n' % (_metanamemap[k], meta[k])
-
-        content = b'%s%s\n%s' % (header, desc, body)
-        write(encoding.unitolocal(content))
-
-@command(b'phabread',
-         [(b'', b'stack', False, _(b'read dependencies'))],
-         _(b'DREVSPEC [OPTIONS]'))
-def phabread(ui, repo, spec, **opts):
-    """print patches from Phabricator suitable for importing
-
-    DREVSPEC could be a Differential Revision identity, like ``D123``, or just
-    the number ``123``. It could also have common operators like ``+``, ``-``,
-    ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
-    select a stack.
-
-    ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
-    could be used to filter patches by status. For performance reason, they
-    only represent a subset of non-status selections and cannot be used alone.
-
-    For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
-    D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
-    stack up to D9.
-
-    If --stack is given, follow dependencies information and read all patches.
-    It is equivalent to the ``:`` operator.
-    """
-    if opts.get(b'stack'):
-        spec = b':(%s)' % spec
-    drevs = querydrev(repo, spec)
-    readpatch(repo, drevs, ui.write)
-
-@command(b'phabupdate',
-         [(b'', b'accept', False, _(b'accept revisions')),
-          (b'', b'reject', False, _(b'reject revisions')),
-          (b'', b'abandon', False, _(b'abandon revisions')),
-          (b'', b'reclaim', False, _(b'reclaim revisions')),
-          (b'm', b'comment', b'', _(b'comment on the last revision')),
-          ], _(b'DREVSPEC [OPTIONS]'))
-def phabupdate(ui, repo, spec, **opts):
-    """update Differential Revision in batch
-
-    DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
-    """
-    flags = [n for n in b'accept reject abandon reclaim'.split() if opts.get(n)]
-    if len(flags) > 1:
-        raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
-
-    actions = []
-    for f in flags:
-        actions.append({b'type': f, b'value': b'true'})
-
-    drevs = querydrev(repo, spec)
-    for i, drev in enumerate(drevs):
-        if i + 1 == len(drevs) and opts.get(b'comment'):
-            actions.append({b'type': b'comment', b'value': opts[b'comment']})
-        if actions:
-            params = {b'objectIdentifier': drev[r'phid'],
-                      b'transactions': actions}
-            callconduit(repo, b'differential.revision.edit', params)
-
-templatekeyword = registrar.templatekeyword()
-
-@templatekeyword(b'phabreview', requires={b'ctx'})
-def template_review(context, mapping):
-    """:phabreview: Object describing the review for this changeset.
-    Has attributes `url` and `id`.
-    """
-    ctx = context.resource(mapping, b'ctx')
-    m = _differentialrevisiondescre.search(ctx.description())
-    if m:
-        return {
-            b'url': m.group(b'url'),
-            b'id': b"D{}".format(m.group(b'id')),
-        }
--- a/contrib/python-hook-examples.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-hook-examples.py	Mon Oct 22 14:46:06 2018 -0400
@@ -19,7 +19,7 @@
     node = kwargs['node']
     first = repo[node].p1().node()
     if 'url' in kwargs:
-        last = repo['tip'].node()
+        last = repo.changelog.tip()
     else:
         last = node
     diff = patch.diff(repo, first, last)
--- a/contrib/python-zstandard/MANIFEST.in	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/MANIFEST.in	Mon Oct 22 14:46:06 2018 -0400
@@ -1,7 +1,10 @@
 graft c-ext
+graft debian
 graft zstd
 graft tests
 include make_cffi.py
 include setup_zstd.py
 include zstd.c
+include zstd_cffi.py
 include LICENSE
+include NEWS.rst
--- a/contrib/python-zstandard/NEWS.rst	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/NEWS.rst	Mon Oct 22 14:46:06 2018 -0400
@@ -30,6 +30,19 @@
 * Remove low-level compression parameters from ``ZstdCompressor.__init__`` and
   require use of ``CompressionParameters``.
 * Expose ``ZSTD_getFrameProgression()`` from more compressor types.
+* Support modifying compression parameters mid operation when supported by
+  zstd API.
+* Expose ``ZSTD_CLEVEL_DEFAULT`` constant.
+* Support ``ZSTD_p_forceAttachDict`` compression parameter.
+* Use ``ZSTD_CCtx_getParameter()``/``ZSTD_CCtxParam_getParameter()`` for retrieving
+  compression parameters.
+* Consider exposing ``ZSTDMT_toFlushNow()``.
+* Expose ``ZDICT_trainFromBuffer_fastCover()``,
+  ``ZDICT_optimizeTrainFromBuffer_fastCover``.
+* Expose and enforce ``ZSTD_minCLevel()`` for minimum compression level.
+* Consider a ``chunker()`` API for decompression.
+* Consider stats for ``chunker()`` API, including finding the last consumed
+  offset of input data.
 
 Other Actions Not Blocking Release
 ---------------------------------------
@@ -38,6 +51,111 @@
 * API for ensuring max memory ceiling isn't exceeded.
 * Move off nose for testing.
 
+0.10.1 (released 2018-10-08)
+============================
+
+Backwards Compatibility Notes
+-----------------------------
+
+* ``ZstdCompressor.stream_reader().closed`` is now a property instead of a
+  method (#58).
+* ``ZstdDecompressor.stream_reader().closed`` is now a property instead of a
+  method (#58).
+
+Changes
+-------
+
+* Stop attempting to package Python 3.6 for Miniconda. The latest version of
+  Miniconda is using Python 3.7. The Python 3.6 Miniconda packages were a lie
+  since this were built against Python 3.7.
+* ``ZstdCompressor.stream_reader()``'s and ``ZstdDecompressor.stream_reader()``'s
+  ``closed`` attribute is now a read-only property instead of a method. This now
+  properly matches the ``IOBase`` API and allows instances to be used in more
+  places that accept ``IOBase`` instances.
+
+0.10.0 (released 2018-10-08)
+============================
+
+Backwards Compatibility Notes
+-----------------------------
+
+* ``ZstdDecompressor.stream_reader().read()`` now consistently requires an
+  argument in both the C and CFFI backends. Before, the CFFI implementation
+  would assume a default value of ``-1``, which was later rejected.
+* The ``compress_literals`` argument and attribute has been removed from
+  ``zstd.ZstdCompressionParameters`` because it was removed by the zstd 1.3.5
+  API.
+* ``ZSTD_CCtx_setParametersUsingCCtxParams()`` is no longer called on every
+  operation performed against ``ZstdCompressor`` instances. The reason for this
+  change is that the zstd 1.3.5 API no longer allows this without calling
+  ``ZSTD_CCtx_resetParameters()`` first. But if we called
+  ``ZSTD_CCtx_resetParameters()`` on every operation, we'd have to redo
+  potentially expensive setup when using dictionaries. We now call
+  ``ZSTD_CCtx_reset()`` on every operation and don't attempt to change
+  compression parameters.
+* Objects returned by ``ZstdCompressor.stream_reader()`` no longer need to be
+  used as a context manager. The context manager interface still exists and its
+  behavior is unchanged.
+* Objects returned by ``ZstdDecompressor.stream_reader()`` no longer need to be
+  used as a context manager. The context manager interface still exists and its
+  behavior is unchanged.
+
+Bug Fixes
+---------
+
+* ``ZstdDecompressor.decompressobj().decompress()`` should now return all data
+  from internal buffers in more scenarios. Before, it was possible for data to
+  remain in internal buffers. This data would be emitted on a subsequent call
+  to ``decompress()``. The overall output stream would still be valid. But if
+  callers were expecting input data to exactly map to output data (say the
+  producer had used ``flush(COMPRESSOBJ_FLUSH_BLOCK)`` and was attempting to
+  map input chunks to output chunks), then the previous behavior would be
+  wrong. The new behavior is such that output from
+  ``flush(COMPRESSOBJ_FLUSH_BLOCK)`` fed into ``decompressobj().decompress()``
+  should produce all available compressed input.
+* ``ZstdDecompressor.stream_reader().read()`` should no longer segfault after
+  a previous context manager resulted in error (#56).
+* ``ZstdCompressor.compressobj().flush(COMPRESSOBJ_FLUSH_BLOCK)`` now returns
+  all data necessary to flush a block. Before, it was possible for the
+  ``flush()`` to not emit all data necessary to fully represent a block. This
+  would mean decompressors wouldn't be able to decompress all data that had been
+  fed into the compressor and ``flush()``ed. (#55).
+
+New Features
+------------
+
+* New module constants ``BLOCKSIZELOG_MAX``, ``BLOCKSIZE_MAX``,
+  ``TARGETLENGTH_MAX`` that expose constants from libzstd.
+* New ``ZstdCompressor.chunker()`` API for manually feeding data into a
+  compressor and emitting chunks of a fixed size. Like ``compressobj()``, the
+  API doesn't impose restrictions on the input or output types for the
+  data streams. Unlike ``compressobj()``, it ensures output chunks are of a
+  fixed size. This makes this API useful when the compressed output is being
+  fed into an I/O layer, where uniform write sizes are useful.
+* ``ZstdCompressor.stream_reader()`` no longer needs to be used as a context
+  manager (#34).
+* ``ZstdDecompressor.stream_reader()`` no longer needs to be used as a context
+  manager (#34).
+* Bundled zstandard library upgraded from 1.3.4 to 1.3.6.
+
+Changes
+-------
+
+* Added ``zstd_cffi.py`` and ``NEWS.rst`` to ``MANIFEST.in``.
+* ``zstandard.__version__`` is now defined (#50).
+* Upgrade pip, setuptools, wheel, and cibuildwheel packages to latest versions.
+* Upgrade various packages used in CI to latest versions. Notably tox (in
+  order to support Python 3.7).
+* Use relative paths in setup.py to appease Python 3.7 (#51).
+* Added CI for Python 3.7.
+
+0.9.1 (released 2018-06-04)
+===========================
+
+* Debian packaging support.
+* Fix typo in setup.py (#44).
+* Support building with mingw compiler (#46).
+
 0.9.0 (released 2018-04-08)
 ===========================
 
@@ -90,7 +208,7 @@
 New Features
 ------------
 
-* Bundlded zstandard library upgraded from 1.1.3 to 1.3.4. This delivers various
+* Bundled zstandard library upgraded from 1.1.3 to 1.3.4. This delivers various
   bug fixes and performance improvements. It also gives us access to newer
   features.
 * Support for negative compression levels.
--- a/contrib/python-zstandard/README.rst	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/README.rst	Mon Oct 22 14:46:06 2018 -0400
@@ -196,6 +196,17 @@
 
    with open(path, 'rb') as fh:
        cctx = zstd.ZstdCompressor()
+       reader = cctx.stream_reader(fh)
+       while True:
+           chunk = reader.read(16384)
+           if not chunk:
+               break
+
+           # Do something with compressed chunk.
+
+Instances can also be used as context managers::
+
+   with open(path, 'rb') as fh:
        with cctx.stream_reader(fh) as reader:
            while True:
                chunk = reader.read(16384)
@@ -204,9 +215,9 @@
 
                # Do something with compressed chunk.
 
-The stream can only be read within a context manager. When the context
-manager exits, the stream is closed and the underlying resource is
-released and future operations against the compression stream stream will fail.
+When the context manager exists or ``close()`` is called, the stream is closed,
+underlying resources are released, and future operations against the compression
+stream will fail.
 
 The ``source`` argument to ``stream_reader()`` can be any object with a
 ``read(size)`` method or any object implementing the *buffer protocol*.
@@ -419,6 +430,64 @@
    data = cobj.compress(b'foobar')
    data = cobj.flush()
 
+Chunker API
+^^^^^^^^^^^
+
+``chunker(size=None, chunk_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE)`` returns
+an object that can be used to iteratively feed chunks of data into a compressor
+and produce output chunks of a uniform size.
+
+The object returned by ``chunker()`` exposes the following methods:
+
+``compress(data)``
+   Feeds new input data into the compressor.
+
+``flush()``
+   Flushes all data currently in the compressor.
+
+``finish()``
+   Signals the end of input data. No new data can be compressed after this
+   method is called.
+
+``compress()``, ``flush()``, and ``finish()`` all return an iterator of
+``bytes`` instances holding compressed data. The iterator may be empty. Callers
+MUST iterate through all elements of the returned iterator before performing
+another operation on the object.
+
+All chunks emitted by ``compress()`` will have a length of ``chunk_size``.
+
+``flush()`` and ``finish()`` may return a final chunk smaller than
+``chunk_size``.
+
+Here is how the API should be used::
+
+   cctx = zstd.ZstdCompressor()
+   chunker = cctx.chunker(chunk_size=32768)
+
+   with open(path, 'rb') as fh:
+       while True:
+           in_chunk = fh.read(32768)
+           if not in_chunk:
+               break
+
+           for out_chunk in chunker.compress(in_chunk):
+               # Do something with output chunk of size 32768.
+
+       for out_chunk in chunker.finish():
+           # Do something with output chunks that finalize the zstd frame.
+
+The ``chunker()`` API is often a better alternative to ``compressobj()``.
+
+``compressobj()`` will emit output data as it is available. This results in a
+*stream* of output chunks of varying sizes. The consistency of the output chunk
+size with ``chunker()`` is more appropriate for many usages, such as sending
+compressed data to a socket.
+
+``compressobj()`` may also perform extra memory reallocations in order to
+dynamically adjust the sizes of the output chunks. Since ``chunker()`` output
+chunks are all the same size (except for flushed or final chunks), there is
+less memory allocation overhead.
+
 Batch Compression API
 ^^^^^^^^^^^^^^^^^^^^^
 
@@ -542,17 +611,24 @@
 
    with open(path, 'rb') as fh:
        dctx = zstd.ZstdDecompressor()
-       with dctx.stream_reader(fh) as reader:
-           while True:
-               chunk = reader.read(16384)
-               if not chunk:
-                   break
+       reader = dctx.stream_reader(fh)
+       while True:
+           chunk = reader.read(16384)
+            if not chunk:
+                break
+
+            # Do something with decompressed chunk.
 
-               # Do something with decompressed chunk.
+The stream can also be used as a context manager::
 
-The stream can only be read within a context manager. When the context
-manager exits, the stream is closed and the underlying resource is
-released and future operations against the stream will fail.
+   with open(path, 'rb') as fh:
+       dctx = zstd.ZstdDecompressor()
+       with dctx.stream_reader(fh) as reader:
+           ...
+
+When used as a context manager, the stream is closed and the underlying
+resources are released when the context manager exits. Future operations against
+the stream will fail.
 
 The ``source`` argument to ``stream_reader()`` can be any object with a
 ``read(size)`` method or any object implementing the *buffer protocol*.
@@ -1077,7 +1153,6 @@
 * write_dict_id
 * job_size
 * overlap_size_log
-* compress_literals
 * force_max_window
 * enable_ldm
 * ldm_hash_log
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/compressionchunker.c	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,360 @@
+/**
+* Copyright (c) 2018-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+PyDoc_STRVAR(ZstdCompressionChunkerIterator__doc__,
+	"Iterator of output chunks from ZstdCompressionChunker.\n"
+);
+
+static void ZstdCompressionChunkerIterator_dealloc(ZstdCompressionChunkerIterator* self) {
+	Py_XDECREF(self->chunker);
+
+	PyObject_Del(self);
+}
+
+static PyObject* ZstdCompressionChunkerIterator_iter(PyObject* self) {
+	Py_INCREF(self);
+	return self;
+}
+
+static PyObject* ZstdCompressionChunkerIterator_iternext(ZstdCompressionChunkerIterator* self) {
+	size_t zresult;
+	PyObject* chunk;
+	ZstdCompressionChunker* chunker = self->chunker;
+	ZSTD_EndDirective zFlushMode;
+
+	if (self->mode != compressionchunker_mode_normal && chunker->input.pos != chunker->input.size) {
+		PyErr_SetString(ZstdError, "input should have been fully consumed before calling flush() or finish()");
+		return NULL;
+	}
+
+	if (chunker->finished) {
+		return NULL;
+	}
+
+	/* If we have data left in the input, consume it. */
+	while (chunker->input.pos < chunker->input.size) {
+		Py_BEGIN_ALLOW_THREADS
+		zresult = ZSTD_compress_generic(chunker->compressor->cctx, &chunker->output,
+			&chunker->input, ZSTD_e_continue);
+		Py_END_ALLOW_THREADS
+
+		/* Input is fully consumed. */
+		if (chunker->input.pos == chunker->input.size) {
+			chunker->input.src = NULL;
+			chunker->input.pos = 0;
+			chunker->input.size = 0;
+			PyBuffer_Release(&chunker->inBuffer);
+		}
+
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(ZstdError, "zstd compress error: %s", ZSTD_getErrorName(zresult));
+			return NULL;
+		}
+
+		/* If it produced a full output chunk, emit it. */
+		if (chunker->output.pos == chunker->output.size) {
+			chunk = PyBytes_FromStringAndSize(chunker->output.dst, chunker->output.pos);
+			if (!chunk) {
+				return NULL;
+			}
+
+			chunker->output.pos = 0;
+
+			return chunk;
+		}
+
+		/* Else continue to compress available input data. */
+	}
+
+	/* We also need this here for the special case of an empty input buffer. */
+	if (chunker->input.pos == chunker->input.size) {
+		chunker->input.src = NULL;
+		chunker->input.pos = 0;
+		chunker->input.size = 0;
+		PyBuffer_Release(&chunker->inBuffer);
+	}
+
+	/* No more input data. A partial chunk may be in chunker->output.
+	 * If we're in normal compression mode, we're done. Otherwise if we're in
+	 * flush or finish mode, we need to emit what data remains.
+	 */
+	if (self->mode == compressionchunker_mode_normal) {
+		/* We don't need to set StopIteration. */
+		return NULL;
+	}
+
+	if (self->mode == compressionchunker_mode_flush) {
+		zFlushMode = ZSTD_e_flush;
+	}
+	else if (self->mode == compressionchunker_mode_finish) {
+		zFlushMode = ZSTD_e_end;
+	}
+	else {
+		PyErr_SetString(ZstdError, "unhandled compression mode; this should never happen");
+		return NULL;
+	}
+
+	Py_BEGIN_ALLOW_THREADS
+	zresult = ZSTD_compress_generic(chunker->compressor->cctx, &chunker->output,
+		&chunker->input, zFlushMode);
+	Py_END_ALLOW_THREADS
+
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "zstd compress error: %s",
+			ZSTD_getErrorName(zresult));
+		return NULL;
+	}
+
+	if (!zresult && chunker->output.pos == 0) {
+		return NULL;
+	}
+
+	chunk = PyBytes_FromStringAndSize(chunker->output.dst, chunker->output.pos);
+	if (!chunk) {
+		return NULL;
+	}
+
+	chunker->output.pos = 0;
+
+	if (!zresult && self->mode == compressionchunker_mode_finish) {
+		chunker->finished = 1;
+	}
+
+	return chunk;
+}
+
+PyTypeObject ZstdCompressionChunkerIteratorType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdCompressionChunkerIterator", /* tp_name */
+	sizeof(ZstdCompressionChunkerIterator), /* tp_basicsize */
+	0,                               /* tp_itemsize */
+	(destructor)ZstdCompressionChunkerIterator_dealloc, /* tp_dealloc */
+	0,                               /* tp_print */
+	0,                               /* tp_getattr */
+	0,                               /* tp_setattr */
+	0,                               /* tp_compare */
+	0,                               /* tp_repr */
+	0,                               /* tp_as_number */
+	0,                               /* tp_as_sequence */
+	0,                               /* tp_as_mapping */
+	0,                               /* tp_hash */
+	0,                               /* tp_call */
+	0,                               /* tp_str */
+	0,                               /* tp_getattro */
+	0,                               /* tp_setattro */
+	0,                               /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+	ZstdCompressionChunkerIterator__doc__, /* tp_doc */
+	0,                               /* tp_traverse */
+	0,                               /* tp_clear */
+	0,                               /* tp_richcompare */
+	0,                               /* tp_weaklistoffset */
+	ZstdCompressionChunkerIterator_iter, /* tp_iter */
+	(iternextfunc)ZstdCompressionChunkerIterator_iternext, /* tp_iternext */
+	0,                               /* tp_methods */
+	0,                               /* tp_members */
+	0,                               /* tp_getset */
+	0,                               /* tp_base */
+	0,                               /* tp_dict */
+	0,                               /* tp_descr_get */
+	0,                               /* tp_descr_set */
+	0,                               /* tp_dictoffset */
+	0,                               /* tp_init */
+	0,                               /* tp_alloc */
+	PyType_GenericNew,              /* tp_new */
+};
+
+PyDoc_STRVAR(ZstdCompressionChunker__doc__,
+	"Compress chunks iteratively into exact chunk sizes.\n"
+);
+
+static void ZstdCompressionChunker_dealloc(ZstdCompressionChunker* self) {
+	PyBuffer_Release(&self->inBuffer);
+	self->input.src = NULL;
+
+	PyMem_Free(self->output.dst);
+	self->output.dst = NULL;
+
+	Py_XDECREF(self->compressor);
+
+	PyObject_Del(self);
+}
+
+static ZstdCompressionChunkerIterator* ZstdCompressionChunker_compress(ZstdCompressionChunker* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"data",
+		NULL
+	};
+
+	ZstdCompressionChunkerIterator* result;
+
+	if (self->finished) {
+		PyErr_SetString(ZstdError, "cannot call compress() after compression finished");
+		return NULL;
+	}
+
+	if (self->inBuffer.obj) {
+		PyErr_SetString(ZstdError,
+			"cannot perform operation before consuming output from previous operation");
+		return NULL;
+	}
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y*:compress",
+#else
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s*:compress",
+#endif
+		kwlist, &self->inBuffer)) {
+		return NULL;
+	}
+
+	if (!PyBuffer_IsContiguous(&self->inBuffer, 'C') || self->inBuffer.ndim > 1) {
+		PyErr_SetString(PyExc_ValueError,
+			"data buffer should be contiguous and have at most one dimension");
+		PyBuffer_Release(&self->inBuffer);
+		return NULL;
+	}
+
+	result = (ZstdCompressionChunkerIterator*)PyObject_CallObject((PyObject*)&ZstdCompressionChunkerIteratorType, NULL);
+	if (!result) {
+		PyBuffer_Release(&self->inBuffer);
+		return NULL;
+	}
+
+	self->input.src = self->inBuffer.buf;
+	self->input.size = self->inBuffer.len;
+	self->input.pos = 0;
+
+	result->chunker = self;
+	Py_INCREF(result->chunker);
+
+	result->mode = compressionchunker_mode_normal;
+
+	return result;
+}
+
+static ZstdCompressionChunkerIterator* ZstdCompressionChunker_finish(ZstdCompressionChunker* self) {
+	ZstdCompressionChunkerIterator* result;
+
+	if (self->finished) {
+		PyErr_SetString(ZstdError, "cannot call finish() after compression finished");
+		return NULL;
+	}
+
+	if (self->inBuffer.obj) {
+		PyErr_SetString(ZstdError,
+			"cannot call finish() before consuming output from previous operation");
+		return NULL;
+	}
+
+	result = (ZstdCompressionChunkerIterator*)PyObject_CallObject((PyObject*)&ZstdCompressionChunkerIteratorType, NULL);
+	if (!result) {
+		return NULL;
+	}
+
+	result->chunker = self;
+	Py_INCREF(result->chunker);
+
+	result->mode = compressionchunker_mode_finish;
+
+	return result;
+}
+
+static ZstdCompressionChunkerIterator* ZstdCompressionChunker_flush(ZstdCompressionChunker* self, PyObject* args, PyObject* kwargs) {
+	ZstdCompressionChunkerIterator* result;
+
+	if (self->finished) {
+		PyErr_SetString(ZstdError, "cannot call flush() after compression finished");
+		return NULL;
+	}
+
+	if (self->inBuffer.obj) {
+		PyErr_SetString(ZstdError,
+			"cannot call flush() before consuming output from previous operation");
+		return NULL;
+	}
+
+	result = (ZstdCompressionChunkerIterator*)PyObject_CallObject((PyObject*)&ZstdCompressionChunkerIteratorType, NULL);
+	if (!result) {
+		return NULL;
+	}
+
+	result->chunker = self;
+	Py_INCREF(result->chunker);
+
+	result->mode = compressionchunker_mode_flush;
+
+	return result;
+}
+
+static PyMethodDef ZstdCompressionChunker_methods[] = {
+	{ "compress", (PyCFunction)ZstdCompressionChunker_compress, METH_VARARGS | METH_KEYWORDS,
+	PyDoc_STR("compress data") },
+	{ "finish", (PyCFunction)ZstdCompressionChunker_finish, METH_NOARGS,
+	PyDoc_STR("finish compression operation") },
+	{ "flush", (PyCFunction)ZstdCompressionChunker_flush, METH_VARARGS | METH_KEYWORDS,
+	PyDoc_STR("finish compression operation") },
+	{ NULL, NULL }
+};
+
+PyTypeObject ZstdCompressionChunkerType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdCompressionChunkerType",  /* tp_name */
+	sizeof(ZstdCompressionChunker),     /* tp_basicsize */
+	0,                                  /* tp_itemsize */
+	(destructor)ZstdCompressionChunker_dealloc, /* tp_dealloc */
+	0,                                  /* tp_print */
+	0,                                  /* tp_getattr */
+	0,                                  /* tp_setattr */
+	0,                                  /* tp_compare */
+	0,                                  /* tp_repr */
+	0,                                  /* tp_as_number */
+	0,                                  /* tp_as_sequence */
+	0,                                  /* tp_as_mapping */
+	0,                                  /* tp_hash */
+	0,                                  /* tp_call */
+	0,                                  /* tp_str */
+	0,                                  /* tp_getattro */
+	0,                                  /* tp_setattro */
+	0,                                  /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+	ZstdCompressionChunker__doc__,      /* tp_doc */
+	0,                                  /* tp_traverse */
+	0,                                  /* tp_clear */
+	0,                                  /* tp_richcompare */
+	0,                                  /* tp_weaklistoffset */
+	0,                                  /* tp_iter */
+	0,                                  /* tp_iternext */
+	ZstdCompressionChunker_methods,     /* tp_methods */
+	0,                                  /* tp_members */
+	0,                                  /* tp_getset */
+	0,                                  /* tp_base */
+	0,                                  /* tp_dict */
+	0,                                  /* tp_descr_get */
+	0,                                  /* tp_descr_set */
+	0,                                  /* tp_dictoffset */
+	0,                                  /* tp_init */
+	0,                                  /* tp_alloc */
+	PyType_GenericNew,                  /* tp_new */
+};
+
+void compressionchunker_module_init(PyObject* module) {
+	Py_TYPE(&ZstdCompressionChunkerIteratorType) = &PyType_Type;
+	if (PyType_Ready(&ZstdCompressionChunkerIteratorType) < 0) {
+		return;
+	}
+
+	Py_TYPE(&ZstdCompressionChunkerType) = &PyType_Type;
+	if (PyType_Ready(&ZstdCompressionChunkerType) < 0) {
+		return;
+	}
+}
--- a/contrib/python-zstandard/c-ext/compressionparams.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/c-ext/compressionparams.c	Mon Oct 22 14:46:06 2018 -0400
@@ -39,7 +39,6 @@
 	TRY_SET_PARAMETER(params, ZSTD_p_nbWorkers, obj->threads);
 	TRY_SET_PARAMETER(params, ZSTD_p_jobSize, obj->jobSize);
 	TRY_SET_PARAMETER(params, ZSTD_p_overlapSizeLog, obj->overlapSizeLog);
-	TRY_SET_PARAMETER(params, ZSTD_p_compressLiterals, obj->compressLiterals);
 	TRY_SET_PARAMETER(params, ZSTD_p_forceMaxWindow, obj->forceMaxWindow);
 	TRY_SET_PARAMETER(params, ZSTD_p_enableLongDistanceMatching, obj->enableLongDistanceMatching);
 	TRY_SET_PARAMETER(params, ZSTD_p_ldmHashLog, obj->ldmHashLog);
@@ -88,7 +87,6 @@
 		"ldm_bucket_size_log",
 		"ldm_hash_every_log",
 		"threads",
-		"compress_literals",
 		NULL
 	};
 
@@ -114,18 +112,13 @@
 	unsigned ldmHashEveryLog = 0;
 	int threads = 0;
 
-	/* Setting value 0 has the effect of disabling. So we use -1 as a default
-	 * to detect whether to set. Then we automatically derive the expected value
-	 * based on the level, just like zstandard does itself. */
-	int compressLiterals = -1;
-
 	if (!PyArg_ParseTupleAndKeywords(args, kwargs,
-		"|IiIIIIIIIIIIIIIIIIIIii:CompressionParameters",
+		"|IiIIIIIIIIIIIIIIIIIIi:CompressionParameters",
 		kwlist, &format, &compressionLevel, &windowLog, &hashLog, &chainLog,
 		&searchLog, &minMatch, &targetLength, &compressionStrategy,
 		&contentSizeFlag, &checksumFlag, &dictIDFlag, &jobSize, &overlapSizeLog,
 		&forceMaxWindow, &enableLDM, &ldmHashLog, &ldmMinMatch, &ldmBucketSizeLog,
-		&ldmHashEveryLog, &threads, &compressLiterals)) {
+		&ldmHashEveryLog, &threads)) {
 		return -1;
 	}
 
@@ -133,10 +126,6 @@
 		threads = cpu_count();
 	}
 
-	if (compressLiterals < 0) {
-		compressLiterals = compressionLevel >= 0;
-	}
-
 	self->format = format;
 	self->compressionLevel = compressionLevel;
 	self->windowLog = windowLog;
@@ -152,7 +141,6 @@
 	self->threads = threads;
 	self->jobSize = jobSize;
 	self->overlapSizeLog = overlapSizeLog;
-	self->compressLiterals = compressLiterals;
 	self->forceMaxWindow = forceMaxWindow;
 	self->enableLongDistanceMatching = enableLDM;
 	self->ldmHashLog = ldmHashLog;
@@ -299,16 +287,6 @@
 		Py_DECREF(val);
 	}
 
-	val = PyDict_GetItemString(kwargs, "compress_literals");
-	if (!val) {
-		val = PyLong_FromLong(level >= 0 ? 1 : 0);
-		if (!val) {
-			goto cleanup;
-		}
-		PyDict_SetItemString(kwargs, "compress_literals", val);
-		Py_DECREF(val);
-	}
-
 	result = PyObject_New(ZstdCompressionParametersObject, &ZstdCompressionParametersType);
 	if (!result) {
 		goto cleanup;
@@ -420,9 +398,6 @@
 	{ "overlap_size_log", T_UINT,
 	  offsetof(ZstdCompressionParametersObject, overlapSizeLog), READONLY,
 	  "Size of previous input reloaded at the beginning of each job" },
-	{ "compress_literals", T_UINT,
-	  offsetof(ZstdCompressionParametersObject, compressLiterals), READONLY,
-	  "whether Huffman compression of literals is in use" },
 	{ "force_max_window", T_UINT,
 	  offsetof(ZstdCompressionParametersObject, forceMaxWindow), READONLY,
 	  "force back references to remain smaller than window size" },
--- a/contrib/python-zstandard/c-ext/compressionreader.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/c-ext/compressionreader.c	Mon Oct 22 14:46:06 2018 -0400
@@ -43,20 +43,11 @@
 }
 
 static ZstdCompressionReader* reader_enter(ZstdCompressionReader* self) {
-	size_t zresult;
-
 	if (self->entered) {
 		PyErr_SetString(PyExc_ValueError, "cannot __enter__ multiple times");
 		return NULL;
 	}
 
-	zresult = ZSTD_CCtx_setPledgedSrcSize(self->compressor->cctx, self->sourceSize);
-	if (ZSTD_isError(zresult)) {
-		PyErr_Format(ZstdError, "error setting source size: %s",
-			ZSTD_getErrorName(zresult));
-		return NULL;
-	}
-
 	self->entered = 1;
 
 	Py_INCREF(self);
@@ -132,15 +123,6 @@
 	Py_RETURN_NONE;
 }
 
-static PyObject* reader_closed(ZstdCompressionReader* self) {
-	if (self->closed) {
-		Py_RETURN_TRUE;
-	}
-	else {
-		Py_RETURN_FALSE;
-	}
-}
-
 static PyObject* reader_tell(ZstdCompressionReader* self) {
 	/* TODO should this raise OSError since stream isn't seekable? */
 	return PyLong_FromUnsignedLongLong(self->bytesCompressed);
@@ -159,11 +141,6 @@
 	size_t zresult;
 	size_t oldPos;
 
-	if (!self->entered) {
-		PyErr_SetString(ZstdError, "read() must be called from an active context manager");
-		return NULL;
-	}
-
 	if (self->closed) {
 		PyErr_SetString(PyExc_ValueError, "stream is closed");
 		return NULL;
@@ -333,8 +310,6 @@
 	PyDoc_STR("Exit a compression context") },
 	{ "close", (PyCFunction)reader_close, METH_NOARGS,
 	PyDoc_STR("Close the stream so it cannot perform any more operations") },
-	{ "closed", (PyCFunction)reader_closed, METH_NOARGS,
-	PyDoc_STR("Whether stream is closed") },
 	{ "flush", (PyCFunction)reader_flush, METH_NOARGS, PyDoc_STR("no-ops") },
 	{ "isatty", (PyCFunction)reader_isatty, METH_NOARGS, PyDoc_STR("Returns False") },
 	{ "readable", (PyCFunction)reader_readable, METH_NOARGS,
@@ -354,6 +329,12 @@
 	{ NULL, NULL }
 };
 
+static PyMemberDef reader_members[] = {
+	{ "closed", T_BOOL, offsetof(ZstdCompressionReader, closed),
+	  READONLY, "whether stream is closed" },
+	{ NULL }
+};
+
 PyTypeObject ZstdCompressionReaderType = {
 	PyVarObject_HEAD_INIT(NULL, 0)
 	"zstd.ZstdCompressionReader", /* tp_name */
@@ -383,7 +364,7 @@
 	reader_iter, /* tp_iter */
 	reader_iternext, /* tp_iternext */
 	reader_methods, /* tp_methods */
-	0, /* tp_members */
+	reader_members, /* tp_members */
 	0, /* tp_getset */
 	0, /* tp_base */
 	0, /* tp_dict */
--- a/contrib/python-zstandard/c-ext/compressionwriter.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/c-ext/compressionwriter.c	Mon Oct 22 14:46:06 2018 -0400
@@ -222,10 +222,6 @@
 			return NULL;
 		}
 
-		if (!output.pos) {
-			break;
-		}
-
 		/* Copy data from output buffer to writer. */
 		if (output.pos) {
 #if PY_MAJOR_VERSION >= 3
@@ -238,7 +234,12 @@
 			totalWrite += output.pos;
 			self->bytesCompressed += output.pos;
 		}
+
 		output.pos = 0;
+
+		if (!zresult) {
+			break;
+		}
 	}
 
 	PyMem_Free(output.dst);
--- a/contrib/python-zstandard/c-ext/compressobj.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/c-ext/compressobj.c	Mon Oct 22 14:46:06 2018 -0400
@@ -115,6 +115,7 @@
 	PyObject* result = NULL;
 	Py_ssize_t resultSize = 0;
 	ZSTD_inBuffer input;
+	ZSTD_EndDirective zFlushMode;
 
 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i:flush", kwlist, &flushMode)) {
 		return NULL;
@@ -130,52 +131,34 @@
 		return NULL;
 	}
 
+	switch (flushMode) {
+		case compressorobj_flush_block:
+			zFlushMode = ZSTD_e_flush;
+			break;
+
+		case compressorobj_flush_finish:
+			zFlushMode = ZSTD_e_end;
+			self->finished = 1;
+			break;
+
+		default:
+			PyErr_SetString(ZstdError, "unhandled flush mode");
+			return NULL;
+	}
+
 	assert(self->output.pos == 0);
 
 	input.src = NULL;
 	input.size = 0;
 	input.pos = 0;
 
-	if (flushMode == compressorobj_flush_block) {
-		/* The output buffer is of size ZSTD_CStreamOutSize(), which is 
-		   guaranteed to hold a full block. */
+	while (1) {
 		Py_BEGIN_ALLOW_THREADS
-			zresult = ZSTD_compress_generic(self->compressor->cctx, &self->output,
-				&input, ZSTD_e_flush);
+		zresult = ZSTD_compress_generic(self->compressor->cctx, &self->output,
+			&input, zFlushMode);
 		Py_END_ALLOW_THREADS
 
 		if (ZSTD_isError(zresult)) {
-			PyErr_Format(ZstdError, "zstd compress error: %s", ZSTD_getErrorName(zresult));
-			return NULL;
-		}
-
-		/* Output buffer is guaranteed to hold full block. */
-		assert(zresult == 0);
-
-		if (self->output.pos) {
-			result = PyBytes_FromStringAndSize(self->output.dst, self->output.pos);
-			if (!result) {
-				return NULL;
-			}
-		}
-
-		self->output.pos = 0;
-
-		if (result) {
-			return result;
-		}
-		else {
-			return PyBytes_FromString("");
-		}
-	}
-
-	assert(flushMode == compressorobj_flush_finish);
-	self->finished = 1;
-
-	while (1) {
-		zresult = ZSTD_compress_generic(self->compressor->cctx, &self->output,
-			&input, ZSTD_e_end);
-		if (ZSTD_isError(zresult)) {
 			PyErr_Format(ZstdError, "error ending compression stream: %s",
 				ZSTD_getErrorName(zresult));
 			return NULL;
--- a/contrib/python-zstandard/c-ext/compressor.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/c-ext/compressor.c	Mon Oct 22 14:46:06 2018 -0400
@@ -11,15 +11,13 @@
 
 extern PyObject* ZstdError;
 
-int ensure_cctx(ZstdCompressor* compressor) {
+int setup_cctx(ZstdCompressor* compressor) {
 	size_t zresult;
 
 	assert(compressor);
 	assert(compressor->cctx);
 	assert(compressor->params);
 
-	ZSTD_CCtx_reset(compressor->cctx);
-
 	zresult = ZSTD_CCtx_setParametersUsingCCtxParams(compressor->cctx, compressor->params);
 	if (ZSTD_isError(zresult)) {
 		PyErr_Format(ZstdError, "could not set compression parameters: %s",
@@ -237,9 +235,9 @@
 		Py_INCREF(dict);
 	}
 
-	if (ensure_cctx(self)) {
-		return -1;
-	}
+    if (setup_cctx(self)) {
+        return -1;
+    }
 
 	return 0;
 }
@@ -346,9 +344,7 @@
 		return NULL;
 	}
 
-	if (ensure_cctx(self)) {
-		return NULL;
-	}
+	ZSTD_CCtx_reset(self->cctx);
 
 	zresult = ZSTD_CCtx_setPledgedSrcSize(self->cctx, sourceSize);
 	if (ZSTD_isError(zresult)) {
@@ -489,6 +485,7 @@
 	unsigned long long sourceSize = ZSTD_CONTENTSIZE_UNKNOWN;
 	size_t readSize = ZSTD_CStreamInSize();
 	ZstdCompressionReader* result = NULL;
+	size_t zresult;
 
 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|Kk:stream_reader", kwlist,
 		&source, &sourceSize, &readSize)) {
@@ -520,13 +517,17 @@
 		goto except;
 	}
 
-	if (ensure_cctx(self)) {
+	ZSTD_CCtx_reset(self->cctx);
+
+	zresult = ZSTD_CCtx_setPledgedSrcSize(self->cctx, sourceSize);
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "error setting source source: %s",
+			ZSTD_getErrorName(zresult));
 		goto except;
 	}
 
 	result->compressor = self;
 	Py_INCREF(self);
-	result->sourceSize = sourceSize;
 
 	return result;
 
@@ -576,9 +577,7 @@
 		goto finally;
 	}
 
-	if (ensure_cctx(self)) {
-		goto finally;
-	}
+	ZSTD_CCtx_reset(self->cctx);
 
 	destSize = ZSTD_compressBound(source.len);
 	output = PyBytes_FromStringAndSize(NULL, destSize);
@@ -652,9 +651,7 @@
 		return NULL;
 	}
 
-	if (ensure_cctx(self)) {
-		return NULL;
-	}
+	ZSTD_CCtx_reset(self->cctx);
 
 	zresult = ZSTD_CCtx_setPledgedSrcSize(self->cctx, inSize);
 	if (ZSTD_isError(zresult)) {
@@ -743,9 +740,7 @@
 		goto except;
 	}
 
-	if (ensure_cctx(self)) {
-		return NULL;
-	}
+	ZSTD_CCtx_reset(self->cctx);
 
 	zresult = ZSTD_CCtx_setPledgedSrcSize(self->cctx, sourceSize);
 	if (ZSTD_isError(zresult)) {
@@ -817,9 +812,7 @@
 		return NULL;
 	}
 
-	if (ensure_cctx(self)) {
-		return NULL;
-	}
+	ZSTD_CCtx_reset(self->cctx);
 
 	result = (ZstdCompressionWriter*)PyObject_CallObject((PyObject*)&ZstdCompressionWriterType, NULL);
 	if (!result) {
@@ -839,6 +832,58 @@
 	return result;
 }
 
+PyDoc_STRVAR(ZstdCompressor_chunker__doc__,
+"Create an object for iterative compressing to same-sized chunks.\n"
+);
+
+static ZstdCompressionChunker* ZstdCompressor_chunker(ZstdCompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"size",
+		"chunk_size",
+		NULL
+	};
+
+	unsigned long long sourceSize = ZSTD_CONTENTSIZE_UNKNOWN;
+	size_t chunkSize = ZSTD_CStreamOutSize();
+	ZstdCompressionChunker* chunker;
+	size_t zresult;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|Kk:chunker", kwlist,
+		&sourceSize, &chunkSize)) {
+		return NULL;
+	}
+
+	ZSTD_CCtx_reset(self->cctx);
+
+	zresult = ZSTD_CCtx_setPledgedSrcSize(self->cctx, sourceSize);
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "error setting source size: %s",
+			ZSTD_getErrorName(zresult));
+		return NULL;
+	}
+
+	chunker = (ZstdCompressionChunker*)PyObject_CallObject((PyObject*)&ZstdCompressionChunkerType, NULL);
+	if (!chunker) {
+		return NULL;
+	}
+
+	chunker->output.dst = PyMem_Malloc(chunkSize);
+	if (!chunker->output.dst) {
+		PyErr_NoMemory();
+		Py_DECREF(chunker);
+		return NULL;
+	}
+	chunker->output.size = chunkSize;
+	chunker->output.pos = 0;
+
+	chunker->compressor = self;
+	Py_INCREF(chunker->compressor);
+
+	chunker->chunkSize = chunkSize;
+
+	return chunker;
+}
+
 typedef struct {
 	void* sourceData;
 	size_t sourceSize;
@@ -1524,6 +1569,8 @@
 }
 
 static PyMethodDef ZstdCompressor_methods[] = {
+	{ "chunker", (PyCFunction)ZstdCompressor_chunker,
+	METH_VARARGS | METH_KEYWORDS, ZstdCompressor_chunker__doc__ },
 	{ "compress", (PyCFunction)ZstdCompressor_compress,
 	METH_VARARGS | METH_KEYWORDS, ZstdCompressor_compress__doc__ },
 	{ "compressobj", (PyCFunction)ZstdCompressor_compressobj,
--- a/contrib/python-zstandard/c-ext/constants.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/c-ext/constants.c	Mon Oct 22 14:46:06 2018 -0400
@@ -27,7 +27,6 @@
 #else
 	version = PyString_FromString(PYTHON_ZSTANDARD_VERSION);
 #endif
-	Py_INCREF(version);
 	PyModule_AddObject(mod, "__version__", version);
 
 	ZstdError = PyErr_NewException("zstd.ZstdError", NULL, NULL);
@@ -41,7 +40,6 @@
 	PyTuple_SetItem(zstdVersion, 0, PyLong_FromLong(ZSTD_VERSION_MAJOR));
 	PyTuple_SetItem(zstdVersion, 1, PyLong_FromLong(ZSTD_VERSION_MINOR));
 	PyTuple_SetItem(zstdVersion, 2, PyLong_FromLong(ZSTD_VERSION_RELEASE));
-	Py_INCREF(zstdVersion);
 	PyModule_AddObject(mod, "ZSTD_VERSION", zstdVersion);
 
 	frameHeader = PyBytes_FromStringAndSize(frame_header, sizeof(frame_header));
@@ -68,6 +66,8 @@
 		(long)ZSTD_DStreamOutSize());
 
 	PyModule_AddIntConstant(mod, "MAGIC_NUMBER", ZSTD_MAGICNUMBER);
+	PyModule_AddIntConstant(mod, "BLOCKSIZELOG_MAX", ZSTD_BLOCKSIZELOG_MAX);
+	PyModule_AddIntConstant(mod, "BLOCKSIZE_MAX", ZSTD_BLOCKSIZE_MAX);
 	PyModule_AddIntConstant(mod, "WINDOWLOG_MIN", ZSTD_WINDOWLOG_MIN);
 	PyModule_AddIntConstant(mod, "WINDOWLOG_MAX", ZSTD_WINDOWLOG_MAX);
 	PyModule_AddIntConstant(mod, "CHAINLOG_MIN", ZSTD_CHAINLOG_MIN);
@@ -80,6 +80,7 @@
 	PyModule_AddIntConstant(mod, "SEARCHLENGTH_MIN", ZSTD_SEARCHLENGTH_MIN);
 	PyModule_AddIntConstant(mod, "SEARCHLENGTH_MAX", ZSTD_SEARCHLENGTH_MAX);
 	PyModule_AddIntConstant(mod, "TARGETLENGTH_MIN", ZSTD_TARGETLENGTH_MIN);
+	PyModule_AddIntConstant(mod, "TARGETLENGTH_MAX", ZSTD_TARGETLENGTH_MAX);
 	PyModule_AddIntConstant(mod, "LDM_MINMATCH_MIN", ZSTD_LDM_MINMATCH_MIN);
 	PyModule_AddIntConstant(mod, "LDM_MINMATCH_MAX", ZSTD_LDM_MINMATCH_MAX);
 	PyModule_AddIntConstant(mod, "LDM_BUCKETSIZELOG_MAX", ZSTD_LDM_BUCKETSIZELOG_MAX);
--- a/contrib/python-zstandard/c-ext/decompressionreader.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/c-ext/decompressionreader.c	Mon Oct 22 14:46:06 2018 -0400
@@ -47,10 +47,6 @@
 		return NULL;
 	}
 
-	if (ensure_dctx(self->decompressor, 1)) {
-		return NULL;
-	}
-
 	self->entered = 1;
 
 	Py_INCREF(self);
@@ -98,15 +94,6 @@
 	Py_RETURN_NONE;
 }
 
-static PyObject* reader_closed(ZstdDecompressionReader* self) {
-	if (self->closed) {
-		Py_RETURN_TRUE;
-	}
-	else {
-		Py_RETURN_FALSE;
-	}
-}
-
 static PyObject* reader_flush(PyObject* self) {
 	Py_RETURN_NONE;
 }
@@ -128,11 +115,6 @@
 	ZSTD_outBuffer output;
 	size_t zresult;
 
-	if (!self->entered) {
-		PyErr_SetString(ZstdError, "read() must be called from an active context manager");
-		return NULL;
-	}
-
 	if (self->closed) {
 		PyErr_SetString(PyExc_ValueError, "stream is closed");
 		return NULL;
@@ -281,11 +263,6 @@
 	unsigned long long readAmount = 0;
 	size_t defaultOutSize = ZSTD_DStreamOutSize();
 
-	if (!self->entered) {
-		PyErr_SetString(ZstdError, "seek() must be called from an active context manager");
-		return NULL;
-	}
-
 	if (self->closed) {
 		PyErr_SetString(PyExc_ValueError, "stream is closed");
 		return NULL;
@@ -384,8 +361,6 @@
 	PyDoc_STR("Exit a compression context") },
 	{ "close", (PyCFunction)reader_close, METH_NOARGS,
 	PyDoc_STR("Close the stream so it cannot perform any more operations") },
-	{ "closed", (PyCFunction)reader_closed, METH_NOARGS,
-	PyDoc_STR("Whether stream is closed") },
 	{ "flush", (PyCFunction)reader_flush, METH_NOARGS, PyDoc_STR("no-ops") },
 	{ "isatty", (PyCFunction)reader_isatty, METH_NOARGS, PyDoc_STR("Returns False") },
 	{ "readable", (PyCFunction)reader_readable, METH_NOARGS,
@@ -407,6 +382,12 @@
 	{ NULL, NULL }
 };
 
+static PyMemberDef reader_members[] = {
+	{ "closed", T_BOOL, offsetof(ZstdDecompressionReader, closed),
+	  READONLY, "whether stream is closed" },
+	{ NULL }
+};
+
 PyTypeObject ZstdDecompressionReaderType = {
 	PyVarObject_HEAD_INIT(NULL, 0)
 	"zstd.ZstdDecompressionReader", /* tp_name */
@@ -436,7 +417,7 @@
 	reader_iter, /* tp_iter */
 	reader_iternext, /* tp_iternext */
 	reader_methods, /* tp_methods */
-	0, /* tp_members */
+	reader_members, /* tp_members */
 	0, /* tp_getset */
 	0, /* tp_base */
 	0, /* tp_dict */
--- a/contrib/python-zstandard/c-ext/decompressobj.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/c-ext/decompressobj.c	Mon Oct 22 14:46:06 2018 -0400
@@ -33,6 +33,8 @@
 	PyObject* result = NULL;
 	Py_ssize_t resultSize = 0;
 
+	output.dst = NULL;
+
 	if (self->finished) {
 		PyErr_SetString(ZstdError, "cannot use a decompressobj multiple times");
 		return NULL;
@@ -53,6 +55,12 @@
 		goto finally;
 	}
 
+	/* Special case of empty input. Output will always be empty. */
+	if (source.len == 0) {
+		result = PyBytes_FromString("");
+		goto finally;
+	}
+
 	input.src = source.buf;
 	input.size = source.len;
 	input.pos = 0;
@@ -65,8 +73,7 @@
 	output.size = self->outSize;
 	output.pos = 0;
 
-	/* Read input until exhausted. */
-	while (input.pos < input.size) {
+	while (1) {
 		Py_BEGIN_ALLOW_THREADS
 		zresult = ZSTD_decompress_generic(self->decompressor->dctx, &output, &input);
 		Py_END_ALLOW_THREADS
@@ -98,9 +105,13 @@
 					goto except;
 				}
 			}
+		}
 
-			output.pos = 0;
+		if (zresult == 0 || (input.pos == input.size && output.pos == 0)) {
+			break;
 		}
+
+		output.pos = 0;
 	}
 
 	if (!result) {
--- a/contrib/python-zstandard/c-ext/decompressor.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/c-ext/decompressor.c	Mon Oct 22 14:46:06 2018 -0400
@@ -575,6 +575,10 @@
 		return NULL;
 	}
 
+	if (ensure_dctx(self, 1)) {
+		return NULL;
+	}
+
 	result = (ZstdDecompressionReader*)PyObject_CallObject((PyObject*)&ZstdDecompressionReaderType, NULL);
 	if (NULL == result) {
 		return NULL;
--- a/contrib/python-zstandard/c-ext/python-zstandard.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/c-ext/python-zstandard.h	Mon Oct 22 14:46:06 2018 -0400
@@ -15,7 +15,8 @@
 #include <zstd.h>
 #include <zdict.h>
 
-#define PYTHON_ZSTANDARD_VERSION "0.9.0"
+/* Remember to change the string in zstandard/__init__ as well */
+#define PYTHON_ZSTANDARD_VERSION "0.10.1"
 
 typedef enum {
 	compressorobj_flush_finish,
@@ -45,7 +46,6 @@
 	unsigned threads;
 	unsigned jobSize;
 	unsigned overlapSizeLog;
-	unsigned compressLiterals;
 	unsigned forceMaxWindow;
 	unsigned enableLongDistanceMatching;
 	unsigned ldmHashLog;
@@ -162,7 +162,6 @@
 	ZstdCompressor* compressor;
 	PyObject* reader;
 	Py_buffer buffer;
-	unsigned long long sourceSize;
 	size_t readSize;
 
 	int entered;
@@ -181,6 +180,34 @@
 typedef struct {
 	PyObject_HEAD
 
+	ZstdCompressor* compressor;
+	ZSTD_inBuffer input;
+	ZSTD_outBuffer output;
+	Py_buffer inBuffer;
+	int finished;
+	size_t chunkSize;
+} ZstdCompressionChunker;
+
+extern PyTypeObject ZstdCompressionChunkerType;
+
+typedef enum {
+	compressionchunker_mode_normal,
+	compressionchunker_mode_flush,
+	compressionchunker_mode_finish,
+} CompressionChunkerMode;
+
+typedef struct {
+	PyObject_HEAD
+
+	ZstdCompressionChunker* chunker;
+	CompressionChunkerMode mode;
+} ZstdCompressionChunkerIterator;
+
+extern PyTypeObject ZstdCompressionChunkerIteratorType;
+
+typedef struct {
+	PyObject_HEAD
+
 	ZSTD_DCtx* dctx;
 	ZstdCompressionDict* dict;
 	size_t maxWindowSize;
--- a/contrib/python-zstandard/make_cffi.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/make_cffi.py	Mon Oct 22 14:46:06 2018 -0400
@@ -17,6 +17,7 @@
 HERE = os.path.abspath(os.path.dirname(__file__))
 
 SOURCES = ['zstd/%s' % p for p in (
+    'common/debug.c',
     'common/entropy_common.c',
     'common/error_private.c',
     'common/fse_decompress.c',
@@ -25,6 +26,7 @@
     'common/xxhash.c',
     'common/zstd_common.c',
     'compress/fse_compress.c',
+    'compress/hist.c',
     'compress/huf_compress.c',
     'compress/zstd_compress.c',
     'compress/zstd_double_fast.c',
@@ -36,6 +38,7 @@
     'decompress/huf_decompress.c',
     'decompress/zstd_decompress.c',
     'dictBuilder/cover.c',
+    'dictBuilder/fastcover.c',
     'dictBuilder/divsufsort.c',
     'dictBuilder/zdict.c',
 )]
--- a/contrib/python-zstandard/setup_zstd.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/setup_zstd.py	Mon Oct 22 14:46:06 2018 -0400
@@ -6,12 +6,12 @@
 
 import distutils.ccompiler
 import os
-import sys
 
 from distutils.extension import Extension
 
 
 zstd_sources = ['zstd/%s' % p for p in (
+    'common/debug.c',
     'common/entropy_common.c',
     'common/error_private.c',
     'common/fse_decompress.c',
@@ -20,6 +20,7 @@
     'common/xxhash.c',
     'common/zstd_common.c',
     'compress/fse_compress.c',
+    'compress/hist.c',
     'compress/huf_compress.c',
     'compress/zstd_compress.c',
     'compress/zstd_double_fast.c',
@@ -32,6 +33,7 @@
     'decompress/zstd_decompress.c',
     'dictBuilder/cover.c',
     'dictBuilder/divsufsort.c',
+    'dictBuilder/fastcover.c',
     'dictBuilder/zdict.c',
 )]
 
@@ -75,6 +77,7 @@
     'c-ext/compressobj.c',
     'c-ext/compressor.c',
     'c-ext/compressoriterator.c',
+    'c-ext/compressionchunker.c',
     'c-ext/compressionparams.c',
     'c-ext/compressionreader.c',
     'c-ext/compressionwriter.c',
@@ -93,25 +96,45 @@
 
 
 def get_c_extension(support_legacy=False, system_zstd=False, name='zstd',
-                    warnings_as_errors=False):
-    """Obtain a distutils.extension.Extension for the C extension."""
-    root = os.path.abspath(os.path.dirname(__file__))
+                    warnings_as_errors=False, root=None):
+    """Obtain a distutils.extension.Extension for the C extension.
+
+    ``support_legacy`` controls whether to compile in legacy zstd format support.
+
+    ``system_zstd`` controls whether to compile against the system zstd library.
+    For this to work, the system zstd library and headers must match what
+    python-zstandard is coded against exactly.
+
+    ``name`` is the module name of the C extension to produce.
+
+    ``warnings_as_errors`` controls whether compiler warnings are turned into
+    compiler errors.
 
-    sources = set([os.path.join(root, p) for p in ext_sources])
+    ``root`` defines a root path that source should be computed as relative
+    to. This should be the directory with the main ``setup.py`` that is
+    being invoked. If not defined, paths will be relative to this file.
+    """
+    actual_root = os.path.abspath(os.path.dirname(__file__))
+    root = root or actual_root
+
+    sources = set([os.path.join(actual_root, p) for p in ext_sources])
     if not system_zstd:
-        sources.update([os.path.join(root, p) for p in zstd_sources])
+        sources.update([os.path.join(actual_root, p) for p in zstd_sources])
         if support_legacy:
-            sources.update([os.path.join(root, p) for p in zstd_sources_legacy])
+            sources.update([os.path.join(actual_root, p)
+                            for p in zstd_sources_legacy])
     sources = list(sources)
 
-    include_dirs = set([os.path.join(root, d) for d in ext_includes])
+    include_dirs = set([os.path.join(actual_root, d) for d in ext_includes])
     if not system_zstd:
-        include_dirs.update([os.path.join(root, d) for d in zstd_includes])
+        include_dirs.update([os.path.join(actual_root, d)
+                             for d in zstd_includes])
         if support_legacy:
-            include_dirs.update([os.path.join(root, d) for d in zstd_includes_legacy])
+            include_dirs.update([os.path.join(actual_root, d)
+                                 for d in zstd_includes_legacy])
     include_dirs = list(include_dirs)
 
-    depends = [os.path.join(root, p) for p in zstd_depends]
+    depends = [os.path.join(actual_root, p) for p in zstd_depends]
 
     compiler = distutils.ccompiler.new_compiler()
 
@@ -152,6 +175,11 @@
 
     libraries = ['zstd'] if system_zstd else []
 
+    # Python 3.7 doesn't like absolute paths. So normalize to relative.
+    sources = [os.path.relpath(p, root) for p in sources]
+    include_dirs = [os.path.relpath(p, root) for p in include_dirs]
+    depends = [os.path.relpath(p, root) for p in depends]
+
     # TODO compile with optimizations.
     return Extension(name, sources,
                      include_dirs=include_dirs,
--- a/contrib/python-zstandard/tests/test_compressor.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/tests/test_compressor.py	Mon Oct 22 14:46:06 2018 -0400
@@ -153,7 +153,7 @@
         no_params = zstd.get_frame_parameters(no_dict_id)
         with_params = zstd.get_frame_parameters(with_dict_id)
         self.assertEqual(no_params.dict_id, 0)
-        self.assertEqual(with_params.dict_id, 1387616518)
+        self.assertEqual(with_params.dict_id, 1880053135)
 
     def test_compress_dict_multiple(self):
         samples = []
@@ -216,7 +216,7 @@
         self.assertEqual(params.dict_id, d.dict_id())
 
         self.assertEqual(result,
-                         b'\x28\xb5\x2f\xfd\x23\x06\x59\xb5\x52\x03\x19\x00\x00'
+                         b'\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00'
                          b'\x66\x6f\x6f')
 
     def test_multithreaded_compression_params(self):
@@ -336,7 +336,9 @@
                          b'\x28\xb5\x2f\xfd\x00\x48\x18\x00\x00foo')
         self.assertEqual(cobj.compress(b'bar'), b'')
         # 3 byte header plus content.
-        self.assertEqual(cobj.flush(), b'\x19\x00\x00bar')
+        self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK),
+                         b'\x18\x00\x00bar')
+        self.assertEqual(cobj.flush(), b'\x01\x00\x00')
 
     def test_flush_empty_block(self):
         cctx = zstd.ZstdCompressor(write_checksum=True)
@@ -576,15 +578,23 @@
     def test_context_manager(self):
         cctx = zstd.ZstdCompressor()
 
-        reader = cctx.stream_reader(b'foo' * 60)
-        with self.assertRaisesRegexp(zstd.ZstdError, 'read\(\) must be called from an active'):
-            reader.read(10)
-
         with cctx.stream_reader(b'foo') as reader:
             with self.assertRaisesRegexp(ValueError, 'cannot __enter__ multiple times'):
                 with reader as reader2:
                     pass
 
+    def test_no_context_manager(self):
+        cctx = zstd.ZstdCompressor()
+
+        reader = cctx.stream_reader(b'foo')
+        reader.read(4)
+        self.assertFalse(reader.closed)
+
+        reader.close()
+        self.assertTrue(reader.closed)
+        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+            reader.read(1)
+
     def test_not_implemented(self):
         cctx = zstd.ZstdCompressor()
 
@@ -619,13 +629,18 @@
             self.assertFalse(reader.writable())
             self.assertFalse(reader.seekable())
             self.assertFalse(reader.isatty())
+            self.assertFalse(reader.closed)
             self.assertIsNone(reader.flush())
+            self.assertFalse(reader.closed)
+
+        self.assertTrue(reader.closed)
 
     def test_read_closed(self):
         cctx = zstd.ZstdCompressor()
 
         with cctx.stream_reader(b'foo' * 60) as reader:
             reader.close()
+            self.assertTrue(reader.closed)
             with self.assertRaisesRegexp(ValueError, 'stream is closed'):
                 reader.read(10)
 
@@ -715,7 +730,7 @@
             while reader.read(8192):
                 pass
 
-        with self.assertRaisesRegexp(zstd.ZstdError, 'read\(\) must be called from an active'):
+        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
             reader.read(10)
 
     def test_bad_size(self):
@@ -792,7 +807,7 @@
         d = zstd.train_dictionary(8192, samples)
 
         h = hashlib.sha1(d.as_bytes()).hexdigest()
-        self.assertEqual(h, '3040faa0ddc37d50e71a4dd28052cb8db5d9d027')
+        self.assertEqual(h, '2b3b6428da5bf2c9cc9d4bb58ba0bc5990dd0e79')
 
         buffer = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=9, dict_data=d)
@@ -808,9 +823,16 @@
         self.assertEqual(params.window_size, 2097152)
         self.assertEqual(params.dict_id, d.dict_id())
         self.assertFalse(params.has_checksum)
-        self.assertEqual(compressed,
-                         b'\x28\xb5\x2f\xfd\x03\x58\x06\x59\xb5\x52\x5d\x00'
-                         b'\x00\x00\x02\xfc\x3d\x3f\xd9\xb0\x51\x03\x45\x89')
+
+        h = hashlib.sha1(compressed).hexdigest()
+        self.assertEqual(h, '23f88344263678478f5f82298e0a5d1833125786')
+
+        source = b'foo' + b'bar' + (b'foo' * 16384)
+
+        dctx = zstd.ZstdDecompressor(dict_data=d)
+
+        self.assertEqual(dctx.decompress(compressed, max_output_size=len(source)),
+                         source)
 
     def test_compression_params(self):
         params = zstd.ZstdCompressionParameters(
@@ -1157,6 +1179,181 @@
         b''.join(cctx.read_to_iter(source))
 
 
+@make_cffi
+class TestCompressor_chunker(unittest.TestCase):
+    def test_empty(self):
+        cctx = zstd.ZstdCompressor(write_content_size=False)
+        chunker = cctx.chunker()
+
+        it = chunker.compress(b'')
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        it = chunker.finish()
+
+        self.assertEqual(next(it), b'\x28\xb5\x2f\xfd\x00\x50\x01\x00\x00')
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+    def test_simple_input(self):
+        cctx = zstd.ZstdCompressor()
+        chunker = cctx.chunker()
+
+        it = chunker.compress(b'foobar')
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        it = chunker.compress(b'baz' * 30)
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        it = chunker.finish()
+
+        self.assertEqual(next(it),
+                         b'\x28\xb5\x2f\xfd\x00\x50\x7d\x00\x00\x48\x66\x6f'
+                         b'\x6f\x62\x61\x72\x62\x61\x7a\x01\x00\xe4\xe4\x8e')
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+    def test_input_size(self):
+        cctx = zstd.ZstdCompressor()
+        chunker = cctx.chunker(size=1024)
+
+        it = chunker.compress(b'x' * 1000)
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        it = chunker.compress(b'y' * 24)
+
+        with self.assertRaises(StopIteration):
+            next(it)
+
+        chunks = list(chunker.finish())
+
+        self.assertEqual(chunks, [
+            b'\x28\xb5\x2f\xfd\x60\x00\x03\x65\x00\x00\x18\x78\x78\x79\x02\x00'
+            b'\xa0\x16\xe3\x2b\x80\x05'
+        ])
+
+        dctx = zstd.ZstdDecompressor()
+
+        self.assertEqual(dctx.decompress(b''.join(chunks)),
+                         (b'x' * 1000) + (b'y' * 24))
+
+    def test_small_chunk_size(self):
+        cctx = zstd.ZstdCompressor()
+        chunker = cctx.chunker(chunk_size=1)
+
+        chunks = list(chunker.compress(b'foo' * 1024))
+        self.assertEqual(chunks, [])
+
+        chunks = list(chunker.finish())
+        self.assertTrue(all(len(chunk) == 1 for chunk in chunks))
+
+        self.assertEqual(
+            b''.join(chunks),
+            b'\x28\xb5\x2f\xfd\x00\x50\x55\x00\x00\x18\x66\x6f\x6f\x01\x00'
+            b'\xfa\xd3\x77\x43')
+
+        dctx = zstd.ZstdDecompressor()
+        self.assertEqual(dctx.decompress(b''.join(chunks),
+                                         max_output_size=10000),
+                         b'foo' * 1024)
+
+    def test_input_types(self):
+        cctx = zstd.ZstdCompressor()
+
+        mutable_array = bytearray(3)
+        mutable_array[:] = b'foo'
+
+        sources = [
+            memoryview(b'foo'),
+            bytearray(b'foo'),
+            mutable_array,
+        ]
+
+        for source in sources:
+            chunker = cctx.chunker()
+
+            self.assertEqual(list(chunker.compress(source)), [])
+            self.assertEqual(list(chunker.finish()), [
+                b'\x28\xb5\x2f\xfd\x00\x50\x19\x00\x00\x66\x6f\x6f'
+            ])
+
+    def test_flush(self):
+        cctx = zstd.ZstdCompressor()
+        chunker = cctx.chunker()
+
+        self.assertEqual(list(chunker.compress(b'foo' * 1024)), [])
+        self.assertEqual(list(chunker.compress(b'bar' * 1024)), [])
+
+        chunks1 = list(chunker.flush())
+
+        self.assertEqual(chunks1, [
+            b'\x28\xb5\x2f\xfd\x00\x50\x8c\x00\x00\x30\x66\x6f\x6f\x62\x61\x72'
+            b'\x02\x00\xfa\x03\xfe\xd0\x9f\xbe\x1b\x02'
+        ])
+
+        self.assertEqual(list(chunker.flush()), [])
+        self.assertEqual(list(chunker.flush()), [])
+
+        self.assertEqual(list(chunker.compress(b'baz' * 1024)), [])
+
+        chunks2 = list(chunker.flush())
+        self.assertEqual(len(chunks2), 1)
+
+        chunks3 = list(chunker.finish())
+        self.assertEqual(len(chunks2), 1)
+
+        dctx = zstd.ZstdDecompressor()
+
+        self.assertEqual(dctx.decompress(b''.join(chunks1 + chunks2 + chunks3),
+                                         max_output_size=10000),
+                         (b'foo' * 1024) + (b'bar' * 1024) + (b'baz' * 1024))
+
+    def test_compress_after_finish(self):
+        cctx = zstd.ZstdCompressor()
+        chunker = cctx.chunker()
+
+        list(chunker.compress(b'foo'))
+        list(chunker.finish())
+
+        with self.assertRaisesRegexp(
+                zstd.ZstdError,
+                'cannot call compress\(\) after compression finished'):
+            list(chunker.compress(b'foo'))
+
+    def test_flush_after_finish(self):
+        cctx = zstd.ZstdCompressor()
+        chunker = cctx.chunker()
+
+        list(chunker.compress(b'foo'))
+        list(chunker.finish())
+
+        with self.assertRaisesRegexp(
+                zstd.ZstdError,
+                'cannot call flush\(\) after compression finished'):
+            list(chunker.flush())
+
+    def test_finish_after_finish(self):
+        cctx = zstd.ZstdCompressor()
+        chunker = cctx.chunker()
+
+        list(chunker.compress(b'foo'))
+        list(chunker.finish())
+
+        with self.assertRaisesRegexp(
+                zstd.ZstdError,
+                'cannot call finish\(\) after compression finished'):
+            list(chunker.finish())
+
+
 class TestCompressor_multi_compress_to_buffer(unittest.TestCase):
     def test_invalid_inputs(self):
         cctx = zstd.ZstdCompressor()
--- a/contrib/python-zstandard/tests/test_compressor_fuzzing.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/tests/test_compressor_fuzzing.py	Mon Oct 22 14:46:06 2018 -0400
@@ -135,6 +135,51 @@
 
         self.assertEqual(b''.join(chunks), ref_frame)
 
+    @hypothesis.settings(
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
+    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
+                      level=strategies.integers(min_value=1, max_value=5),
+                      chunk_sizes=strategies.data(),
+                      flushes=strategies.data())
+    def test_flush_block(self, original, level, chunk_sizes, flushes):
+        cctx = zstd.ZstdCompressor(level=level)
+        cobj = cctx.compressobj()
+
+        dctx = zstd.ZstdDecompressor()
+        dobj = dctx.decompressobj()
+
+        compressed_chunks = []
+        decompressed_chunks = []
+        i = 0
+        while True:
+            input_size = chunk_sizes.draw(strategies.integers(1, 4096))
+            source = original[i:i + input_size]
+            if not source:
+                break
+
+            i += input_size
+
+            chunk = cobj.compress(source)
+            compressed_chunks.append(chunk)
+            decompressed_chunks.append(dobj.decompress(chunk))
+
+            if not flushes.draw(strategies.booleans()):
+                continue
+
+            chunk = cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK)
+            compressed_chunks.append(chunk)
+            decompressed_chunks.append(dobj.decompress(chunk))
+
+            self.assertEqual(b''.join(decompressed_chunks), original[0:i])
+
+        chunk = cobj.flush(zstd.COMPRESSOBJ_FLUSH_FINISH)
+        compressed_chunks.append(chunk)
+        decompressed_chunks.append(dobj.decompress(chunk))
+
+        self.assertEqual(dctx.decompress(b''.join(compressed_chunks),
+                                         max_output_size=len(original)),
+                         original)
+        self.assertEqual(b''.join(decompressed_chunks), original)
 
 @unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
 @make_cffi
@@ -186,3 +231,90 @@
 
         for i, frame in enumerate(result):
             self.assertEqual(dctx.decompress(frame), original[i])
+
+
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@make_cffi
+class TestCompressor_chunker_fuzzing(unittest.TestCase):
+    @hypothesis.settings(
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
+    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
+                      level=strategies.integers(min_value=1, max_value=5),
+                      chunk_size=strategies.integers(
+                          min_value=1,
+                          max_value=32 * 1048576),
+                      input_sizes=strategies.data())
+    def test_random_input_sizes(self, original, level, chunk_size, input_sizes):
+        cctx = zstd.ZstdCompressor(level=level)
+        chunker = cctx.chunker(chunk_size=chunk_size)
+
+        chunks = []
+        i = 0
+        while True:
+            input_size = input_sizes.draw(strategies.integers(1, 4096))
+            source = original[i:i + input_size]
+            if not source:
+                break
+
+            chunks.extend(chunker.compress(source))
+            i += input_size
+
+        chunks.extend(chunker.finish())
+
+        dctx = zstd.ZstdDecompressor()
+
+        self.assertEqual(dctx.decompress(b''.join(chunks),
+                                         max_output_size=len(original)),
+                         original)
+
+        self.assertTrue(all(len(chunk) == chunk_size for chunk in chunks[:-1]))
+
+    @hypothesis.settings(
+        suppress_health_check=[hypothesis.HealthCheck.large_base_example])
+    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
+                      level=strategies.integers(min_value=1, max_value=5),
+                      chunk_size=strategies.integers(
+                          min_value=1,
+                          max_value=32 * 1048576),
+                      input_sizes=strategies.data(),
+                      flushes=strategies.data())
+    def test_flush_block(self, original, level, chunk_size, input_sizes,
+                         flushes):
+        cctx = zstd.ZstdCompressor(level=level)
+        chunker = cctx.chunker(chunk_size=chunk_size)
+
+        dctx = zstd.ZstdDecompressor()
+        dobj = dctx.decompressobj()
+
+        compressed_chunks = []
+        decompressed_chunks = []
+        i = 0
+        while True:
+            input_size = input_sizes.draw(strategies.integers(1, 4096))
+            source = original[i:i + input_size]
+            if not source:
+                break
+
+            i += input_size
+
+            chunks = list(chunker.compress(source))
+            compressed_chunks.extend(chunks)
+            decompressed_chunks.append(dobj.decompress(b''.join(chunks)))
+
+            if not flushes.draw(strategies.booleans()):
+                continue
+
+            chunks = list(chunker.flush())
+            compressed_chunks.extend(chunks)
+            decompressed_chunks.append(dobj.decompress(b''.join(chunks)))
+
+            self.assertEqual(b''.join(decompressed_chunks), original[0:i])
+
+        chunks = list(chunker.finish())
+        compressed_chunks.extend(chunks)
+        decompressed_chunks.append(dobj.decompress(b''.join(chunks)))
+
+        self.assertEqual(dctx.decompress(b''.join(compressed_chunks),
+                                         max_output_size=len(original)),
+                         original)
+        self.assertEqual(b''.join(decompressed_chunks), original)
\ No newline at end of file
--- a/contrib/python-zstandard/tests/test_data_structures.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/tests/test_data_structures.py	Mon Oct 22 14:46:06 2018 -0400
@@ -24,6 +24,7 @@
                                        hash_log=zstd.HASHLOG_MAX,
                                        search_log=zstd.SEARCHLOG_MAX,
                                        min_match=zstd.SEARCHLENGTH_MAX - 1,
+                                       target_length=zstd.TARGETLENGTH_MAX,
                                        compression_strategy=zstd.STRATEGY_BTULTRA)
 
     def test_from_level(self):
@@ -34,7 +35,6 @@
 
         p = zstd.ZstdCompressionParameters.from_level(-4)
         self.assertEqual(p.window_log, 19)
-        self.assertEqual(p.compress_literals, 0)
 
     def test_members(self):
         p = zstd.ZstdCompressionParameters(window_log=10,
@@ -64,19 +64,11 @@
         self.assertEqual(p.job_size, 1048576)
         self.assertEqual(p.overlap_size_log, 6)
 
-        p = zstd.ZstdCompressionParameters(compression_level=2)
-        self.assertEqual(p.compress_literals, 1)
-
-        p = zstd.ZstdCompressionParameters(compress_literals=False)
-        self.assertEqual(p.compress_literals, 0)
-
         p = zstd.ZstdCompressionParameters(compression_level=-1)
         self.assertEqual(p.compression_level, -1)
-        self.assertEqual(p.compress_literals, 0)
 
-        p = zstd.ZstdCompressionParameters(compression_level=-2, compress_literals=True)
+        p = zstd.ZstdCompressionParameters(compression_level=-2)
         self.assertEqual(p.compression_level, -2)
-        self.assertEqual(p.compress_literals, 1)
 
         p = zstd.ZstdCompressionParameters(force_max_window=True)
         self.assertEqual(p.force_max_window, 1)
--- a/contrib/python-zstandard/tests/test_data_structures_fuzzing.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/tests/test_data_structures_fuzzing.py	Mon Oct 22 14:46:06 2018 -0400
@@ -27,7 +27,7 @@
 s_searchlength = strategies.integers(min_value=zstd.SEARCHLENGTH_MIN,
                                      max_value=zstd.SEARCHLENGTH_MAX)
 s_targetlength = strategies.integers(min_value=zstd.TARGETLENGTH_MIN,
-                                     max_value=2**32)
+                                     max_value=zstd.TARGETLENGTH_MAX)
 s_strategy = strategies.sampled_from((zstd.STRATEGY_FAST,
                                         zstd.STRATEGY_DFAST,
                                         zstd.STRATEGY_GREEDY,
--- a/contrib/python-zstandard/tests/test_decompressor.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/tests/test_decompressor.py	Mon Oct 22 14:46:06 2018 -0400
@@ -293,10 +293,6 @@
     def test_context_manager(self):
         dctx = zstd.ZstdDecompressor()
 
-        reader = dctx.stream_reader(b'foo')
-        with self.assertRaisesRegexp(zstd.ZstdError, 'read\(\) must be called from an active'):
-            reader.read(1)
-
         with dctx.stream_reader(b'foo') as reader:
             with self.assertRaisesRegexp(ValueError, 'cannot __enter__ multiple times'):
                 with reader as reader2:
@@ -331,17 +327,23 @@
         dctx = zstd.ZstdDecompressor()
 
         with dctx.stream_reader(b'foo') as reader:
+            self.assertFalse(reader.closed)
             self.assertTrue(reader.readable())
             self.assertFalse(reader.writable())
             self.assertTrue(reader.seekable())
             self.assertFalse(reader.isatty())
+            self.assertFalse(reader.closed)
             self.assertIsNone(reader.flush())
+            self.assertFalse(reader.closed)
+
+        self.assertTrue(reader.closed)
 
     def test_read_closed(self):
         dctx = zstd.ZstdDecompressor()
 
         with dctx.stream_reader(b'foo') as reader:
             reader.close()
+            self.assertTrue(reader.closed)
             with self.assertRaisesRegexp(ValueError, 'stream is closed'):
                 reader.read(1)
 
@@ -372,10 +374,10 @@
             self.assertEqual(reader.tell(), len(source))
 
             # Read after EOF should return empty bytes.
-            self.assertEqual(reader.read(), b'')
+            self.assertEqual(reader.read(1), b'')
             self.assertEqual(reader.tell(), len(result))
 
-        self.assertTrue(reader.closed())
+        self.assertTrue(reader.closed)
 
     def test_read_buffer_small_chunks(self):
         cctx = zstd.ZstdCompressor()
@@ -408,8 +410,11 @@
             chunk = reader.read(8192)
             self.assertEqual(chunk, source)
             self.assertEqual(reader.tell(), len(source))
-            self.assertEqual(reader.read(), b'')
+            self.assertEqual(reader.read(1), b'')
             self.assertEqual(reader.tell(), len(source))
+            self.assertFalse(reader.closed)
+
+        self.assertTrue(reader.closed)
 
     def test_read_stream_small_chunks(self):
         cctx = zstd.ZstdCompressor()
@@ -440,7 +445,9 @@
             while reader.read(16):
                 pass
 
-        with self.assertRaisesRegexp(zstd.ZstdError, 'read\(\) must be called from an active'):
+        self.assertTrue(reader.closed)
+
+        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
             reader.read(10)
 
     def test_illegal_seeks(self):
@@ -474,8 +481,7 @@
             with self.assertRaisesRegexp(ValueError, 'stream is closed'):
                 reader.seek(4, os.SEEK_SET)
 
-        with self.assertRaisesRegexp(
-            zstd.ZstdError, 'seek\(\) must be called from an active context'):
+        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
             reader.seek(0)
 
     def test_seek(self):
@@ -492,6 +498,39 @@
             reader.seek(4, os.SEEK_CUR)
             self.assertEqual(reader.read(2), b'ar')
 
+    def test_no_context_manager(self):
+        source = b'foobar' * 60
+        cctx = zstd.ZstdCompressor()
+        frame = cctx.compress(source)
+
+        dctx = zstd.ZstdDecompressor()
+        reader = dctx.stream_reader(frame)
+
+        self.assertEqual(reader.read(6), b'foobar')
+        self.assertEqual(reader.read(18), b'foobar' * 3)
+        self.assertFalse(reader.closed)
+
+        # Calling close prevents subsequent use.
+        reader.close()
+        self.assertTrue(reader.closed)
+
+        with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+            reader.read(6)
+
+    def test_read_after_error(self):
+        source = io.BytesIO(b'')
+        dctx = zstd.ZstdDecompressor()
+
+        reader = dctx.stream_reader(source)
+
+        with reader:
+            with self.assertRaises(TypeError):
+                reader.read()
+
+        with reader:
+            with self.assertRaisesRegexp(ValueError, 'stream is closed'):
+                reader.read(100)
+
 
 @make_cffi
 class TestDecompressor_decompressobj(unittest.TestCase):
--- a/contrib/python-zstandard/tests/test_module_attributes.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/tests/test_module_attributes.py	Mon Oct 22 14:46:06 2018 -0400
@@ -12,7 +12,9 @@
 @make_cffi
 class TestModuleAttributes(unittest.TestCase):
     def test_version(self):
-        self.assertEqual(zstd.ZSTD_VERSION, (1, 3, 4))
+        self.assertEqual(zstd.ZSTD_VERSION, (1, 3, 6))
+
+        self.assertEqual(zstd.__version__, '0.10.1')
 
     def test_constants(self):
         self.assertEqual(zstd.MAX_COMPRESSION_LEVEL, 22)
@@ -27,6 +29,8 @@
             'DECOMPRESSION_RECOMMENDED_INPUT_SIZE',
             'DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE',
             'MAGIC_NUMBER',
+            'BLOCKSIZELOG_MAX',
+            'BLOCKSIZE_MAX',
             'WINDOWLOG_MIN',
             'WINDOWLOG_MAX',
             'CHAINLOG_MIN',
@@ -39,6 +43,7 @@
             'SEARCHLENGTH_MIN',
             'SEARCHLENGTH_MAX',
             'TARGETLENGTH_MIN',
+            'TARGETLENGTH_MAX',
             'LDM_MINMATCH_MIN',
             'LDM_MINMATCH_MAX',
             'LDM_BUCKETSIZELOG_MAX',
--- a/contrib/python-zstandard/tests/test_train_dictionary.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/tests/test_train_dictionary.py	Mon Oct 22 14:46:06 2018 -0400
@@ -57,7 +57,8 @@
         d = zstd.train_dictionary(8192, generate_samples(), threads=-1, steps=1,
                                   d=16)
 
-        self.assertEqual(d.k, 50)
+        # This varies by platform.
+        self.assertIn(d.k, (50, 2000))
         self.assertEqual(d.d, 16)
 
 @make_cffi
--- a/contrib/python-zstandard/zstandard/__init__.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstandard/__init__.py	Mon Oct 22 14:46:06 2018 -0400
@@ -60,3 +60,6 @@
 else:
     raise ImportError('unknown module import policy: %s; use default, cffi_fallback, '
                       'cext, or cffi' % _module_policy)
+
+# Keep this in sync with python-zstandard.h.
+__version__ = '0.10.1'
--- a/contrib/python-zstandard/zstd.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd.c	Mon Oct 22 14:46:06 2018 -0400
@@ -182,6 +182,7 @@
 void compressor_module_init(PyObject* mod);
 void compressionparams_module_init(PyObject* mod);
 void constants_module_init(PyObject* mod);
+void compressionchunker_module_init(PyObject* mod);
 void compressiondict_module_init(PyObject* mod);
 void compressionreader_module_init(PyObject* mod);
 void compressionwriter_module_init(PyObject* mod);
@@ -209,7 +210,7 @@
 	   We detect this mismatch here and refuse to load the module if this
 	   scenario is detected.
 	*/
-	if (ZSTD_VERSION_NUMBER != 10304 || ZSTD_versionNumber() != 10304) {
+	if (ZSTD_VERSION_NUMBER != 10306 || ZSTD_versionNumber() != 10306) {
 		PyErr_SetString(PyExc_ImportError, "zstd C API mismatch; Python bindings not compiled against expected zstd version");
 		return;
 	}
@@ -219,6 +220,7 @@
 	compressiondict_module_init(m);
 	compressobj_module_init(m);
 	compressor_module_init(m);
+	compressionchunker_module_init(m);
 	compressionreader_module_init(m);
 	compressionwriter_module_init(m);
 	compressoriterator_module_init(m);
--- a/contrib/python-zstandard/zstd/common/bitstream.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/bitstream.h	Mon Oct 22 14:46:06 2018 -0400
@@ -1,8 +1,7 @@
 /* ******************************************************************
    bitstream
    Part of FSE library
-   header file (to include)
-   Copyright (C) 2013-2017, Yann Collet.
+   Copyright (C) 2013-present, Yann Collet.
 
    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
@@ -49,21 +48,10 @@
 *  Dependencies
 ******************************************/
 #include "mem.h"            /* unaligned access routines */
+#include "debug.h"          /* assert(), DEBUGLOG(), RAWLOG() */
 #include "error_private.h"  /* error codes and messages */
 
 
-/*-*************************************
-*  Debug
-***************************************/
-#if defined(BIT_DEBUG) && (BIT_DEBUG>=1)
-#  include <assert.h>
-#else
-#  ifndef assert
-#    define assert(condition) ((void)0)
-#  endif
-#endif
-
-
 /*=========================================
 *  Target specific
 =========================================*/
@@ -83,8 +71,7 @@
  * A critical property of these streams is that they encode and decode in **reverse** direction.
  * So the first bit sequence you add will be the last to be read, like a LIFO stack.
  */
-typedef struct
-{
+typedef struct {
     size_t bitContainer;
     unsigned bitPos;
     char*  startPtr;
@@ -118,8 +105,7 @@
 /*-********************************************
 *  bitStream decoding API (read backward)
 **********************************************/
-typedef struct
-{
+typedef struct {
     size_t   bitContainer;
     unsigned bitsConsumed;
     const char* ptr;
@@ -236,7 +222,8 @@
 }
 
 /*! BIT_addBitsFast() :
- *  works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
+ *  works only if `value` is _clean_,
+ *  meaning all high bits above nbBits are 0 */
 MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
                                 size_t value, unsigned nbBits)
 {
--- a/contrib/python-zstandard/zstd/common/compiler.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/compiler.h	Mon Oct 22 14:46:06 2018 -0400
@@ -77,9 +77,9 @@
  * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
  */
 #ifndef DYNAMIC_BMI2
-  #if (defined(__clang__) && __has_attribute(__target__)) \
+  #if ((defined(__clang__) && __has_attribute(__target__)) \
       || (defined(__GNUC__) \
-          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) \
+          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
       && (defined(__x86_64__) || defined(_M_X86)) \
       && !defined(__BMI2__)
   #  define DYNAMIC_BMI2 1
@@ -88,15 +88,37 @@
   #endif
 #endif
 
-/* prefetch */
-#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))  /* _mm_prefetch() is not defined outside of x86/x64 */
-#  include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
-#  define PREFETCH(ptr)   _mm_prefetch((const char*)ptr, _MM_HINT_T0)
-#elif defined(__GNUC__)
-#  define PREFETCH(ptr)   __builtin_prefetch(ptr, 0, 0)
+/* prefetch
+ * can be disabled, by declaring NO_PREFETCH macro
+ * All prefetch invocations use a single default locality 2,
+ * generating instruction prefetcht1,
+ * which, according to Intel, means "load data into L2 cache".
+ * This is a good enough "middle ground" for the time being,
+ * though in theory, it would be better to specialize locality depending on data being prefetched.
+ * Tests could not determine any sensible difference based on locality value. */
+#if defined(NO_PREFETCH)
+#  define PREFETCH(ptr)     (void)(ptr)  /* disabled */
 #else
-#  define PREFETCH(ptr)   /* disabled */
-#endif
+#  if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))  /* _mm_prefetch() is not defined outside of x86/x64 */
+#    include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
+#    define PREFETCH(ptr)   _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
+#  elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
+#    define PREFETCH(ptr)   __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
+#  else
+#    define PREFETCH(ptr)   (void)(ptr)  /* disabled */
+#  endif
+#endif  /* NO_PREFETCH */
+
+#define CACHELINE_SIZE 64
+
+#define PREFETCH_AREA(p, s)  {            \
+    const char* const _ptr = (const char*)(p);  \
+    size_t const _size = (size_t)(s);     \
+    size_t _pos;                          \
+    for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) {  \
+        PREFETCH(_ptr + _pos);            \
+    }                                     \
+}
 
 /* disable warnings */
 #ifdef _MSC_VER    /* Visual Studio */
--- a/contrib/python-zstandard/zstd/common/cpu.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/cpu.h	Mon Oct 22 14:46:06 2018 -0400
@@ -36,7 +36,7 @@
     U32 f1d = 0;
     U32 f7b = 0;
     U32 f7c = 0;
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
     int reg[4];
     __cpuid((int*)reg, 0);
     {
@@ -72,8 +72,7 @@
           "cpuid\n\t"
           "popl %%ebx\n\t"
           : "=a"(f1a), "=c"(f1c), "=d"(f1d)
-          : "a"(1)
-          :);
+          : "a"(1));
     }
     if (n >= 7) {
       __asm__(
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/debug.c	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,44 @@
+/* ******************************************************************
+   debug
+   Part of FSE library
+   Copyright (C) 2013-present, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+
+/*
+ * This module only hosts one global variable
+ * which can be used to dynamically influence the verbosity of traces,
+ * such as DEBUGLOG and RAWLOG
+ */
+
+#include "debug.h"
+
+int g_debuglevel = DEBUGLEVEL;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/debug.h	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,123 @@
+/* ******************************************************************
+   debug
+   Part of FSE library
+   Copyright (C) 2013-present, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+
+/*
+ * The purpose of this header is to enable debug functions.
+ * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
+ * and DEBUG_STATIC_ASSERT() for compile-time.
+ *
+ * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
+ *
+ * Level 1 enables assert() only.
+ * Starting level 2, traces can be generated and pushed to stderr.
+ * The higher the level, the more verbose the traces.
+ *
+ * It's possible to dynamically adjust level using variable g_debug_level,
+ * which is only declared if DEBUGLEVEL>=2,
+ * and is a global variable, not multi-thread protected (use with care)
+ */
+
+#ifndef DEBUG_H_12987983217
+#define DEBUG_H_12987983217
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* static assert is triggered at compile time, leaving no runtime artefact,
+ * but can only work with compile-time constants.
+ * This variant can only be used inside a function. */
+#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
+
+
+/* DEBUGLEVEL is expected to be defined externally,
+ * typically through compiler command line.
+ * Value must be a number. */
+#ifndef DEBUGLEVEL
+#  define DEBUGLEVEL 0
+#endif
+
+/* recommended values for DEBUGLEVEL :
+ * 0 : no debug, all run-time functions disabled
+ * 1 : no display, enables assert() only
+ * 2 : reserved, for currently active debug path
+ * 3 : events once per object lifetime (CCtx, CDict, etc.)
+ * 4 : events once per frame
+ * 5 : events once per block
+ * 6 : events once per sequence (verbose)
+ * 7+: events at every position (*very* verbose)
+ *
+ * It's generally inconvenient to output traces > 5.
+ * In which case, it's possible to selectively enable higher verbosity levels
+ * by modifying g_debug_level.
+ */
+
+#if (DEBUGLEVEL>=1)
+#  include <assert.h>
+#else
+#  ifndef assert   /* assert may be already defined, due to prior #include <assert.h> */
+#    define assert(condition) ((void)0)   /* disable assert (default) */
+#  endif
+#endif
+
+#if (DEBUGLEVEL>=2)
+#  include <stdio.h>
+extern int g_debuglevel; /* here, this variable is only declared,
+                           it actually lives in debug.c,
+                           and is shared by the whole process.
+                           It's typically used to enable very verbose levels
+                           on selective conditions (such as position in src) */
+
+#  define RAWLOG(l, ...) {                                      \
+                if (l<=g_debuglevel) {                          \
+                    fprintf(stderr, __VA_ARGS__);               \
+            }   }
+#  define DEBUGLOG(l, ...) {                                    \
+                if (l<=g_debuglevel) {                          \
+                    fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
+                    fprintf(stderr, " \n");                     \
+            }   }
+#else
+#  define RAWLOG(l, ...)      {}    /* disabled */
+#  define DEBUGLOG(l, ...)    {}    /* disabled */
+#endif
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* DEBUG_H_12987983217 */
--- a/contrib/python-zstandard/zstd/common/entropy_common.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/entropy_common.c	Mon Oct 22 14:46:06 2018 -0400
@@ -72,7 +72,21 @@
     unsigned charnum = 0;
     int previous0 = 0;
 
-    if (hbSize < 4) return ERROR(srcSize_wrong);
+    if (hbSize < 4) {
+        /* This function only works when hbSize >= 4 */
+        char buffer[4];
+        memset(buffer, 0, sizeof(buffer));
+        memcpy(buffer, headerBuffer, hbSize);
+        {   size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
+                                                    buffer, sizeof(buffer));
+            if (FSE_isError(countSize)) return countSize;
+            if (countSize > hbSize) return ERROR(corruption_detected);
+            return countSize;
+    }   }
+    assert(hbSize >= 4);
+
+    /* init */
+    memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0]));   /* all symbols not present in NCount have a frequency of 0 */
     bitStream = MEM_readLE32(ip);
     nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
     if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
@@ -105,6 +119,7 @@
             if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
             while (charnum < n0) normalizedCounter[charnum++] = 0;
             if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                assert((bitCount >> 3) <= 3); /* For first condition to work */
                 ip += bitCount>>3;
                 bitCount &= 7;
                 bitStream = MEM_readLE32(ip) >> bitCount;
--- a/contrib/python-zstandard/zstd/common/fse.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/fse.h	Mon Oct 22 14:46:06 2018 -0400
@@ -72,6 +72,7 @@
 #define FSE_VERSION_NUMBER  (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
 FSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number; to be used when checking dll version */
 
+
 /*-****************************************
 *  FSE simple functions
 ******************************************/
@@ -129,7 +130,7 @@
 ******************************************/
 /*!
 FSE_compress() does the following:
-1. count symbol occurrence from source[] into table count[]
+1. count symbol occurrence from source[] into table count[] (see hist.h)
 2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
 3. save normalized counters to memory buffer using writeNCount()
 4. build encoding table 'CTable' from normalized counters
@@ -147,15 +148,6 @@
 
 /* *** COMPRESSION *** */
 
-/*! FSE_count():
-    Provides the precise count of each byte within a table 'count'.
-    'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
-    *maxSymbolValuePtr will be updated if detected smaller than initial value.
-    @return : the count of the most frequent symbol (which is not identified).
-              if return == srcSize, there is only one symbol.
-              Can also return an error code, which can be tested with FSE_isError(). */
-FSE_PUBLIC_API size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
-
 /*! FSE_optimalTableLog():
     dynamically downsize 'tableLog' when conditions are met.
     It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
@@ -167,7 +159,8 @@
     'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
     @return : tableLog,
               or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
+FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
+                    const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
 
 /*! FSE_NCountWriteBound():
     Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
@@ -178,8 +171,9 @@
     Compactly save 'normalizedCounter' into 'buffer'.
     @return : size of the compressed table,
               or an errorCode, which can be tested using FSE_isError(). */
-FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
-
+FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+                                 const short* normalizedCounter,
+                                 unsigned maxSymbolValue, unsigned tableLog);
 
 /*! Constructor and Destructor of FSE_CTable.
     Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
@@ -250,7 +244,9 @@
     @return : size read from 'rBuffer',
               or an errorCode, which can be tested using FSE_isError().
               maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
-FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
+                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
+                           const void* rBuffer, size_t rBuffSize);
 
 /*! Constructor and Destructor of FSE_DTable.
     Note that its size depends on 'tableLog' */
@@ -325,33 +321,8 @@
 
 
 /* *****************************************
-*  FSE advanced API
-*******************************************/
-/* FSE_count_wksp() :
- * Same as FSE_count(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned
- */
-size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
-                 const void* source, size_t sourceSize, unsigned* workSpace);
-
-/** FSE_countFast() :
- *  same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr
- */
-size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
-
-/* FSE_countFast_wksp() :
- * Same as FSE_countFast(), but using an externally provided scratch buffer.
- * `workSpace` must be a table of minimum `1024` unsigned
- */
-size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace);
-
-/*! FSE_count_simple() :
- * Same as FSE_countFast(), but does not use any additional memory (not even on stack).
- * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
-*/
-size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
-
-
+ *  FSE advanced API
+ ***************************************** */
 
 unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
 /**< same as FSE_optimalTableLog(), which used `minus==2` */
@@ -576,6 +547,39 @@
 }
 
 
+/* FSE_getMaxNbBits() :
+ * Approximate maximum cost of a symbol, in bits.
+ * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
+{
+    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+    return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
+}
+
+/* FSE_bitCost() :
+ * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
+{
+    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+    U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
+    U32 const threshold = (minNbBits+1) << 16;
+    assert(tableLog < 16);
+    assert(accuracyLog < 31-tableLog);  /* ensure enough room for renormalization double shift */
+    {   U32 const tableSize = 1 << tableLog;
+        U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
+        U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog;   /* linear interpolation (very approximate) */
+        U32 const bitMultiplier = 1 << accuracyLog;
+        assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
+        assert(normalizedDeltaFromThreshold <= bitMultiplier);
+        return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
+    }
+}
+
+
 /* ======    Decompression    ====== */
 
 typedef struct {
--- a/contrib/python-zstandard/zstd/common/fse_decompress.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/fse_decompress.c	Mon Oct 22 14:46:06 2018 -0400
@@ -49,7 +49,7 @@
 *  Error Management
 ****************************************************************/
 #define FSE_isError ERR_isError
-#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
 
 /* check and forward error code */
 #define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
--- a/contrib/python-zstandard/zstd/common/huf.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/huf.h	Mon Oct 22 14:46:06 2018 -0400
@@ -1,7 +1,7 @@
 /* ******************************************************************
-   Huffman coder, part of New Generation Entropy library
-   header file
-   Copyright (C) 2013-2016, Yann Collet.
+   huff0 huffman codec,
+   part of Finite State Entropy library
+   Copyright (C) 2013-present, Yann Collet.
 
    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
@@ -163,25 +163,25 @@
 /* static allocation of HUF's DTable */
 typedef U32 HUF_DTable;
 #define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
-#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
         HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
-#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
         HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
 
 
 /* ****************************************
 *  Advanced decompression functions
 ******************************************/
-size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
 
 size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
 size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
 size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
-size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
-size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
+size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
 
 
 /* ****************************************
@@ -208,7 +208,7 @@
 typedef enum {
    HUF_repeat_none,  /**< Cannot use the previous table */
    HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
-   HUF_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
+   HUF_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
  } HUF_repeat;
 /** HUF_compress4X_repeat() :
  *  Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
@@ -227,7 +227,9 @@
  */
 #define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
 #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
-size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize);
+size_t HUF_buildCTable_wksp (HUF_CElt* tree,
+                       const U32* count, U32 maxSymbolValue, U32 maxNbBits,
+                             void* workSpace, size_t wkspSize);
 
 /*! HUF_readStats() :
  *  Read compact Huffman tree, saved by HUF_writeCTable().
@@ -242,10 +244,15 @@
  *  Loading a CTable saved with HUF_writeCTable() */
 size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
 
+/** HUF_getNbBits() :
+ *  Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
+ *  Note 1 : is not inlined, as HUF_CElt definition is private
+ *  Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
+U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
 
 /*
  * HUF_decompress() does the following:
- * 1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
+ * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
  * 2. build Huffman table from save, using HUF_readDTableX?()
  * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
  */
@@ -253,13 +260,13 @@
 /** HUF_selectDecoder() :
  *  Tells which decoder is likely to decode faster,
  *  based on a set of pre-computed metrics.
- * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
  *  Assumption : 0 < dstSize <= 128 KB */
 U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
 
 /**
  *  The minimum workspace size for the `workSpace` used in
- *  HUF_readDTableX2_wksp() and HUF_readDTableX4_wksp().
+ *  HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
  *
  *  The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
  *  HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
@@ -270,14 +277,14 @@
 #define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
 #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
 
+size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
 size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
 size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
-size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize);
-size_t HUF_readDTableX4_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
 
 size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
 size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
 
 
 /* ====================== */
@@ -298,25 +305,25 @@
                        void* workSpace, size_t wkspSize,   /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
                        HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
 
-size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
-size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
 
 size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
 size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
-size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
-size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
-size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
+size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
+size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
 
 size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);   /**< automatic selection of sing or double symbol decoder, based on DTable */
+size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
 size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
-size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
 
 /* BMI2 variants.
  * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
  */
 size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
-size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
 size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
 size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
 
--- a/contrib/python-zstandard/zstd/common/mem.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/mem.h	Mon Oct 22 14:46:06 2018 -0400
@@ -57,11 +57,23 @@
   typedef  uint64_t U64;
   typedef   int64_t S64;
 #else
+# include <limits.h>
+#if CHAR_BIT != 8
+#  error "this implementation requires char to be exactly 8-bit type"
+#endif
   typedef unsigned char      BYTE;
+#if USHRT_MAX != 65535
+#  error "this implementation requires short to be exactly 16-bit type"
+#endif
   typedef unsigned short      U16;
   typedef   signed short      S16;
+#if UINT_MAX != 4294967295
+#  error "this implementation requires int to be exactly 32-bit type"
+#endif
   typedef unsigned int        U32;
   typedef   signed int        S32;
+/* note : there are no limits defined for long long type in C90.
+ * limits exist in C99, however, in such case, <stdint.h> is preferred */
   typedef unsigned long long  U64;
   typedef   signed long long  S64;
 #endif
--- a/contrib/python-zstandard/zstd/common/pool.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/pool.c	Mon Oct 22 14:46:06 2018 -0400
@@ -10,9 +10,10 @@
 
 
 /* ======   Dependencies   ======= */
-#include <stddef.h>  /* size_t */
+#include <stddef.h>    /* size_t */
+#include "debug.h"     /* assert */
+#include "zstd_internal.h"  /* ZSTD_malloc, ZSTD_free */
 #include "pool.h"
-#include "zstd_internal.h"  /* ZSTD_malloc, ZSTD_free */
 
 /* ======   Compiler specifics   ====== */
 #if defined(_MSC_VER)
@@ -33,8 +34,9 @@
 struct POOL_ctx_s {
     ZSTD_customMem customMem;
     /* Keep track of the threads */
-    ZSTD_pthread_t *threads;
-    size_t numThreads;
+    ZSTD_pthread_t* threads;
+    size_t threadCapacity;
+    size_t threadLimit;
 
     /* The queue is a circular buffer */
     POOL_job *queue;
@@ -58,10 +60,10 @@
 };
 
 /* POOL_thread() :
-   Work thread for the thread pool.
-   Waits for jobs and executes them.
-   @returns : NULL on failure else non-null.
-*/
+ * Work thread for the thread pool.
+ * Waits for jobs and executes them.
+ * @returns : NULL on failure else non-null.
+ */
 static void* POOL_thread(void* opaque) {
     POOL_ctx* const ctx = (POOL_ctx*)opaque;
     if (!ctx) { return NULL; }
@@ -69,14 +71,17 @@
         /* Lock the mutex and wait for a non-empty queue or until shutdown */
         ZSTD_pthread_mutex_lock(&ctx->queueMutex);
 
-        while (ctx->queueEmpty && !ctx->shutdown) {
+        while ( ctx->queueEmpty
+            || (ctx->numThreadsBusy >= ctx->threadLimit) ) {
+            if (ctx->shutdown) {
+                /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
+                 * a few threads will be shutdown while !queueEmpty,
+                 * but enough threads will remain active to finish the queue */
+                ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+                return opaque;
+            }
             ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
         }
-        /* empty => shutting down: so stop */
-        if (ctx->queueEmpty) {
-            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-            return opaque;
-        }
         /* Pop a job off the queue */
         {   POOL_job const job = ctx->queue[ctx->queueHead];
             ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
@@ -89,30 +94,32 @@
             job.function(job.opaque);
 
             /* If the intended queue size was 0, signal after finishing job */
+            ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+            ctx->numThreadsBusy--;
             if (ctx->queueSize == 1) {
-                ZSTD_pthread_mutex_lock(&ctx->queueMutex);
-                ctx->numThreadsBusy--;
-                ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
                 ZSTD_pthread_cond_signal(&ctx->queuePushCond);
-        }   }
+            }
+            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+        }
     }  /* for (;;) */
-    /* Unreachable */
+    assert(0);  /* Unreachable */
 }
 
 POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
     return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
 }
 
-POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
+                               ZSTD_customMem customMem) {
     POOL_ctx* ctx;
-    /* Check the parameters */
+    /* Check parameters */
     if (!numThreads) { return NULL; }
     /* Allocate the context and zero initialize */
     ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
     if (!ctx) { return NULL; }
     /* Initialize the job queue.
-     * It needs one extra space since one space is wasted to differentiate empty
-     * and full queues.
+     * It needs one extra space since one space is wasted to differentiate
+     * empty and full queues.
      */
     ctx->queueSize = queueSize + 1;
     ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem);
@@ -126,7 +133,7 @@
     ctx->shutdown = 0;
     /* Allocate space for the thread handles */
     ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
-    ctx->numThreads = 0;
+    ctx->threadCapacity = 0;
     ctx->customMem = customMem;
     /* Check for errors */
     if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
@@ -134,11 +141,12 @@
     {   size_t i;
         for (i = 0; i < numThreads; ++i) {
             if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
-                ctx->numThreads = i;
+                ctx->threadCapacity = i;
                 POOL_free(ctx);
                 return NULL;
         }   }
-        ctx->numThreads = numThreads;
+        ctx->threadCapacity = numThreads;
+        ctx->threadLimit = numThreads;
     }
     return ctx;
 }
@@ -156,8 +164,8 @@
     ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
     /* Join all of the threads */
     {   size_t i;
-        for (i = 0; i < ctx->numThreads; ++i) {
-            ZSTD_pthread_join(ctx->threads[i], NULL);
+        for (i = 0; i < ctx->threadCapacity; ++i) {
+            ZSTD_pthread_join(ctx->threads[i], NULL);  /* note : could fail */
     }   }
 }
 
@@ -172,24 +180,68 @@
     ZSTD_free(ctx, ctx->customMem);
 }
 
+
+
 size_t POOL_sizeof(POOL_ctx *ctx) {
     if (ctx==NULL) return 0;  /* supports sizeof NULL */
     return sizeof(*ctx)
         + ctx->queueSize * sizeof(POOL_job)
-        + ctx->numThreads * sizeof(ZSTD_pthread_t);
+        + ctx->threadCapacity * sizeof(ZSTD_pthread_t);
+}
+
+
+/* @return : 0 on success, 1 on error */
+static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
+{
+    if (numThreads <= ctx->threadCapacity) {
+        if (!numThreads) return 1;
+        ctx->threadLimit = numThreads;
+        return 0;
+    }
+    /* numThreads > threadCapacity */
+    {   ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
+        if (!threadPool) return 1;
+        /* replace existing thread pool */
+        memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
+        ZSTD_free(ctx->threads, ctx->customMem);
+        ctx->threads = threadPool;
+        /* Initialize additional threads */
+        {   size_t threadId;
+            for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
+                if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
+                    ctx->threadCapacity = threadId;
+                    return 1;
+            }   }
+    }   }
+    /* successfully expanded */
+    ctx->threadCapacity = numThreads;
+    ctx->threadLimit = numThreads;
+    return 0;
+}
+
+/* @return : 0 on success, 1 on error */
+int POOL_resize(POOL_ctx* ctx, size_t numThreads)
+{
+    int result;
+    if (ctx==NULL) return 1;
+    ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+    result = POOL_resize_internal(ctx, numThreads);
+    ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
+    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+    return result;
 }
 
 /**
  * Returns 1 if the queue is full and 0 otherwise.
  *
- * If the queueSize is 1 (the pool was created with an intended queueSize of 0),
- * then a queue is empty if there is a thread free and no job is waiting.
+ * When queueSize is 1 (pool was created with an intended queueSize of 0),
+ * then a queue is empty if there is a thread free _and_ no job is waiting.
  */
 static int isQueueFull(POOL_ctx const* ctx) {
     if (ctx->queueSize > 1) {
         return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
     } else {
-        return ctx->numThreadsBusy == ctx->numThreads ||
+        return (ctx->numThreadsBusy == ctx->threadLimit) ||
                !ctx->queueEmpty;
     }
 }
@@ -263,6 +315,11 @@
     (void)ctx;
 }
 
+int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
+    (void)ctx; (void)numThreads;
+    return 0;
+}
+
 void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
     (void)ctx;
     function(opaque);
--- a/contrib/python-zstandard/zstd/common/pool.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/pool.h	Mon Oct 22 14:46:06 2018 -0400
@@ -30,40 +30,50 @@
 */
 POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
 
-POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem);
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
+                               ZSTD_customMem customMem);
 
 /*! POOL_free() :
-    Free a thread pool returned by POOL_create().
-*/
+ *  Free a thread pool returned by POOL_create().
+ */
 void POOL_free(POOL_ctx* ctx);
 
+/*! POOL_resize() :
+ *  Expands or shrinks pool's number of threads.
+ *  This is more efficient than releasing + creating a new context,
+ *  since it tries to preserve and re-use existing threads.
+ * `numThreads` must be at least 1.
+ * @return : 0 when resize was successful,
+ *           !0 (typically 1) if there is an error.
+ *    note : only numThreads can be resized, queueSize remains unchanged.
+ */
+int POOL_resize(POOL_ctx* ctx, size_t numThreads);
+
 /*! POOL_sizeof() :
-    return memory usage of pool returned by POOL_create().
-*/
+ * @return threadpool memory usage
+ *  note : compatible with NULL (returns 0 in this case)
+ */
 size_t POOL_sizeof(POOL_ctx* ctx);
 
 /*! POOL_function :
-    The function type that can be added to a thread pool.
-*/
+ *  The function type that can be added to a thread pool.
+ */
 typedef void (*POOL_function)(void*);
-/*! POOL_add_function :
-    The function type for a generic thread pool add function.
-*/
-typedef void (*POOL_add_function)(void*, POOL_function, void*);
 
 /*! POOL_add() :
-    Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
-    Possibly blocks until there is room in the queue.
-    Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed.
-*/
+ *  Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
+ *  Possibly blocks until there is room in the queue.
+ *  Note : The function may be executed asynchronously,
+ *         therefore, `opaque` must live until function has been completed.
+ */
 void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
 
 
 /*! POOL_tryAdd() :
-    Add the job `function(opaque)` to the thread pool if a worker is available.
-    return immediately otherwise.
-   @return : 1 if successful, 0 if not.
-*/
+ *  Add the job `function(opaque)` to thread pool _if_ a worker is available.
+ *  Returns immediately even if not (does not block).
+ * @return : 1 if successful, 0 if not.
+ */
 int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
 
 
--- a/contrib/python-zstandard/zstd/common/xxhash.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/xxhash.c	Mon Oct 22 14:46:06 2018 -0400
@@ -98,6 +98,7 @@
 /* Modify the local functions below should you wish to use some other memory routines */
 /* for malloc(), free() */
 #include <stdlib.h>
+#include <stddef.h>     /* size_t */
 static void* XXH_malloc(size_t s) { return malloc(s); }
 static void  XXH_free  (void* p)  { free(p); }
 /* for memcpy() */
--- a/contrib/python-zstandard/zstd/common/zstd_common.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/zstd_common.c	Mon Oct 22 14:46:06 2018 -0400
@@ -46,11 +46,6 @@
  *  provides error code string from enum */
 const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
 
-/*! g_debuglog_enable :
- *  turn on/off debug traces (global switch) */
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 2)
-int g_debuglog_enable = 1;
-#endif
 
 
 /*=**************************************************************
--- a/contrib/python-zstandard/zstd/common/zstd_internal.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/common/zstd_internal.h	Mon Oct 22 14:46:06 2018 -0400
@@ -21,6 +21,7 @@
 ***************************************/
 #include "compiler.h"
 #include "mem.h"
+#include "debug.h"                 /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
 #include "error_private.h"
 #define ZSTD_STATIC_LINKING_ONLY
 #include "zstd.h"
@@ -38,43 +39,8 @@
 extern "C" {
 #endif
 
-
-/*-*************************************
-*  Debug
-***************************************/
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
-#  include <assert.h>
-#else
-#  ifndef assert
-#    define assert(condition) ((void)0)
-#  endif
-#endif
-
-#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
-
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
-#  include <stdio.h>
-extern int g_debuglog_enable;
-/* recommended values for ZSTD_DEBUG display levels :
- * 1 : no display, enables assert() only
- * 2 : reserved for currently active debug path
- * 3 : events once per object lifetime (CCtx, CDict, etc.)
- * 4 : events once per frame
- * 5 : events once per block
- * 6 : events once per sequence (*very* verbose) */
-#  define RAWLOG(l, ...) {                                      \
-                if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) {    \
-                    fprintf(stderr, __VA_ARGS__);               \
-            }   }
-#  define DEBUGLOG(l, ...) {                                    \
-                if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) {    \
-                    fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
-                    fprintf(stderr, " \n");                     \
-            }   }
-#else
-#  define RAWLOG(l, ...)      {}    /* disabled */
-#  define DEBUGLOG(l, ...)    {}    /* disabled */
-#endif
+/* ---- static assert (debug) --- */
+#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
 
 
 /*-*************************************
@@ -113,8 +79,7 @@
 static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
 static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
 
-#define ZSTD_FRAMEIDSIZE 4
-static const size_t ZSTD_frameIdSize = ZSTD_FRAMEIDSIZE;  /* magic number size */
+#define ZSTD_FRAMEIDSIZE 4   /* magic number size */
 
 #define ZSTD_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
 static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
@@ -227,6 +192,8 @@
     BYTE* llCode;
     BYTE* mlCode;
     BYTE* ofCode;
+    size_t maxNbSeq;
+    size_t maxNbLit;
     U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
     U32   longLengthPos;
 } seqStore_t;
--- a/contrib/python-zstandard/zstd/compress/fse_compress.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/fse_compress.c	Mon Oct 22 14:46:06 2018 -0400
@@ -1,6 +1,6 @@
 /* ******************************************************************
    FSE : Finite State Entropy encoder
-   Copyright (C) 2013-2015, Yann Collet.
+   Copyright (C) 2013-present, Yann Collet.
 
    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
@@ -37,9 +37,11 @@
 ****************************************************************/
 #include <stdlib.h>     /* malloc, free, qsort */
 #include <string.h>     /* memcpy, memset */
-#include <stdio.h>      /* printf (debug) */
+#include "compiler.h"
+#include "mem.h"        /* U32, U16, etc. */
+#include "debug.h"      /* assert, DEBUGLOG */
+#include "hist.h"       /* HIST_count_wksp */
 #include "bitstream.h"
-#include "compiler.h"
 #define FSE_STATIC_LINKING_ONLY
 #include "fse.h"
 #include "error_private.h"
@@ -49,7 +51,6 @@
 *  Error Management
 ****************************************************************/
 #define FSE_isError ERR_isError
-#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
 
 
 /* **************************************************************
@@ -82,7 +83,9 @@
  * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
  * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
  */
-size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+size_t FSE_buildCTable_wksp(FSE_CTable* ct,
+                      const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+                            void* workSpace, size_t wkspSize)
 {
     U32 const tableSize = 1 << tableLog;
     U32 const tableMask = tableSize - 1;
@@ -100,9 +103,14 @@
     if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
     tableU16[-2] = (U16) tableLog;
     tableU16[-1] = (U16) maxSymbolValue;
+    assert(tableLog < 16);   /* required for threshold strategy to work */
 
     /* For explanations on how to distribute symbol values over the table :
-    *  http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+     * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+
+     #ifdef __clang_analyzer__
+     memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize);   /* useless initialization, just to keep scan-build happy */
+     #endif
 
     /* symbol start positions */
     {   U32 u;
@@ -122,13 +130,15 @@
         U32 symbol;
         for (symbol=0; symbol<=maxSymbolValue; symbol++) {
             int nbOccurences;
-            for (nbOccurences=0; nbOccurences<normalizedCounter[symbol]; nbOccurences++) {
+            int const freq = normalizedCounter[symbol];
+            for (nbOccurences=0; nbOccurences<freq; nbOccurences++) {
                 tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
                 position = (position + step) & tableMask;
-                while (position > highThreshold) position = (position + step) & tableMask;   /* Low proba area */
+                while (position > highThreshold)
+                    position = (position + step) & tableMask;   /* Low proba area */
         }   }
 
-        if (position!=0) return ERROR(GENERIC);   /* Must have gone through all positions */
+        assert(position==0);  /* Must have initialized all positions */
     }
 
     /* Build table */
@@ -143,7 +153,10 @@
         for (s=0; s<=maxSymbolValue; s++) {
             switch (normalizedCounter[s])
             {
-            case  0: break;
+            case  0:
+                /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
+                symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
+                break;
 
             case -1:
             case  1:
@@ -160,6 +173,18 @@
                     total +=  normalizedCounter[s];
     }   }   }   }
 
+#if 0  /* debug : symbol costs */
+    DEBUGLOG(5, "\n --- table statistics : ");
+    {   U32 symbol;
+        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+            DEBUGLOG(5, "%3u: w=%3i,   maxBits=%u, fracBits=%.2f",
+                symbol, normalizedCounter[symbol],
+                FSE_getMaxNbBits(symbolTT, symbol),
+                (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
+        }
+    }
+#endif
+
     return 0;
 }
 
@@ -174,8 +199,9 @@
 
 #ifndef FSE_COMMONDEFS_ONLY
 
+
 /*-**************************************************************
-*  FSE NCount encoding-decoding
+*  FSE NCount encoding
 ****************************************************************/
 size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
 {
@@ -183,9 +209,10 @@
     return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND;  /* maxSymbolValue==0 ? use default */
 }
 
-static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
-                                       const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
-                                       unsigned writeIsSafe)
+static size_t
+FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+                   const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+                         unsigned writeIsSafe)
 {
     BYTE* const ostart = (BYTE*) header;
     BYTE* out = ostart;
@@ -194,13 +221,12 @@
     const int tableSize = 1 << tableLog;
     int remaining;
     int threshold;
-    U32 bitStream;
-    int bitCount;
-    unsigned charnum = 0;
-    int previous0 = 0;
+    U32 bitStream = 0;
+    int bitCount = 0;
+    unsigned symbol = 0;
+    unsigned const alphabetSize = maxSymbolValue + 1;
+    int previousIs0 = 0;
 
-    bitStream = 0;
-    bitCount  = 0;
     /* Table Size */
     bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
     bitCount  += 4;
@@ -210,48 +236,53 @@
     threshold = tableSize;
     nbBits = tableLog+1;
 
-    while (remaining>1) {  /* stops at 1 */
-        if (previous0) {
-            unsigned start = charnum;
-            while (!normalizedCounter[charnum]) charnum++;
-            while (charnum >= start+24) {
+    while ((symbol < alphabetSize) && (remaining>1)) {  /* stops at 1 */
+        if (previousIs0) {
+            unsigned start = symbol;
+            while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
+            if (symbol == alphabetSize) break;   /* incorrect distribution */
+            while (symbol >= start+24) {
                 start+=24;
                 bitStream += 0xFFFFU << bitCount;
-                if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+                if ((!writeIsSafe) && (out > oend-2))
+                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
                 out[0] = (BYTE) bitStream;
                 out[1] = (BYTE)(bitStream>>8);
                 out+=2;
                 bitStream>>=16;
             }
-            while (charnum >= start+3) {
+            while (symbol >= start+3) {
                 start+=3;
                 bitStream += 3 << bitCount;
                 bitCount += 2;
             }
-            bitStream += (charnum-start) << bitCount;
+            bitStream += (symbol-start) << bitCount;
             bitCount += 2;
             if (bitCount>16) {
-                if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+                if ((!writeIsSafe) && (out > oend - 2))
+                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
                 out[0] = (BYTE)bitStream;
                 out[1] = (BYTE)(bitStream>>8);
                 out += 2;
                 bitStream >>= 16;
                 bitCount -= 16;
         }   }
-        {   int count = normalizedCounter[charnum++];
-            int const max = (2*threshold-1)-remaining;
+        {   int count = normalizedCounter[symbol++];
+            int const max = (2*threshold-1) - remaining;
             remaining -= count < 0 ? -count : count;
             count++;   /* +1 for extra accuracy */
-            if (count>=threshold) count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
+            if (count>=threshold)
+                count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
             bitStream += count << bitCount;
             bitCount  += nbBits;
             bitCount  -= (count<max);
-            previous0  = (count==1);
+            previousIs0  = (count==1);
             if (remaining<1) return ERROR(GENERIC);
             while (remaining<threshold) { nbBits--; threshold>>=1; }
         }
         if (bitCount>16) {
-            if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+            if ((!writeIsSafe) && (out > oend - 2))
+                return ERROR(dstSize_tooSmall);   /* Buffer overflow */
             out[0] = (BYTE)bitStream;
             out[1] = (BYTE)(bitStream>>8);
             out += 2;
@@ -259,19 +290,23 @@
             bitCount -= 16;
     }   }
 
+    if (remaining != 1)
+        return ERROR(GENERIC);  /* incorrect normalized distribution */
+    assert(symbol <= alphabetSize);
+
     /* flush remaining bitStream */
-    if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+    if ((!writeIsSafe) && (out > oend - 2))
+        return ERROR(dstSize_tooSmall);   /* Buffer overflow */
     out[0] = (BYTE)bitStream;
     out[1] = (BYTE)(bitStream>>8);
     out+= (bitCount+7) /8;
 
-    if (charnum > maxSymbolValue + 1) return ERROR(GENERIC);
-
     return (out-ostart);
 }
 
 
-size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+                  const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
 {
     if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported */
     if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
@@ -279,179 +314,13 @@
     if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
         return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
 
-    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
-}
-
-
-
-/*-**************************************************************
-*  Counting histogram
-****************************************************************/
-/*! FSE_count_simple
-    This function counts byte values within `src`, and store the histogram into table `count`.
-    It doesn't use any additional memory.
-    But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
-    For this reason, prefer using a table `count` with 256 elements.
-    @return : count of most numerous element.
-*/
-size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
-                        const void* src, size_t srcSize)
-{
-    const BYTE* ip = (const BYTE*)src;
-    const BYTE* const end = ip + srcSize;
-    unsigned maxSymbolValue = *maxSymbolValuePtr;
-    unsigned max=0;
-
-    memset(count, 0, (maxSymbolValue+1)*sizeof(*count));
-    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
-
-    while (ip<end) {
-        assert(*ip <= maxSymbolValue);
-        count[*ip++]++;
-    }
-
-    while (!count[maxSymbolValue]) maxSymbolValue--;
-    *maxSymbolValuePtr = maxSymbolValue;
-
-    { U32 s; for (s=0; s<=maxSymbolValue; s++) if (count[s] > max) max = count[s]; }
-
-    return (size_t)max;
+    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
 }
 
 
-/* FSE_count_parallel_wksp() :
- * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
- * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`.
- * @return : largest histogram frequency, or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
-static size_t FSE_count_parallel_wksp(
-                                unsigned* count, unsigned* maxSymbolValuePtr,
-                                const void* source, size_t sourceSize,
-                                unsigned checkMax, unsigned* const workSpace)
-{
-    const BYTE* ip = (const BYTE*)source;
-    const BYTE* const iend = ip+sourceSize;
-    unsigned maxSymbolValue = *maxSymbolValuePtr;
-    unsigned max=0;
-    U32* const Counting1 = workSpace;
-    U32* const Counting2 = Counting1 + 256;
-    U32* const Counting3 = Counting2 + 256;
-    U32* const Counting4 = Counting3 + 256;
-
-    memset(workSpace, 0, 4*256*sizeof(unsigned));
-
-    /* safety checks */
-    if (!sourceSize) {
-        memset(count, 0, maxSymbolValue + 1);
-        *maxSymbolValuePtr = 0;
-        return 0;
-    }
-    if (!maxSymbolValue) maxSymbolValue = 255;            /* 0 == default */
-
-    /* by stripes of 16 bytes */
-    {   U32 cached = MEM_read32(ip); ip += 4;
-        while (ip < iend-15) {
-            U32 c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-            c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-            c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-            c = cached; cached = MEM_read32(ip); ip += 4;
-            Counting1[(BYTE) c     ]++;
-            Counting2[(BYTE)(c>>8) ]++;
-            Counting3[(BYTE)(c>>16)]++;
-            Counting4[       c>>24 ]++;
-        }
-        ip-=4;
-    }
-
-    /* finish last symbols */
-    while (ip<iend) Counting1[*ip++]++;
-
-    if (checkMax) {   /* verify stats will fit into destination table */
-        U32 s; for (s=255; s>maxSymbolValue; s--) {
-            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
-            if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
-    }   }
-
-    {   U32 s;
-        if (maxSymbolValue > 255) maxSymbolValue = 255;
-        for (s=0; s<=maxSymbolValue; s++) {
-            count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
-            if (count[s] > max) max = count[s];
-    }   }
-
-    while (!count[maxSymbolValue]) maxSymbolValue--;
-    *maxSymbolValuePtr = maxSymbolValue;
-    return (size_t)max;
-}
-
-/* FSE_countFast_wksp() :
- * Same as FSE_countFast(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned */
-size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
-                          const void* source, size_t sourceSize,
-                          unsigned* workSpace)
-{
-    if (sourceSize < 1500) /* heuristic threshold */
-        return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
-    return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
-}
-
-/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
-size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
-                     const void* source, size_t sourceSize)
-{
-    unsigned tmpCounters[1024];
-    return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters);
-}
-
-/* FSE_count_wksp() :
- * Same as FSE_count(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned */
-size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
-                 const void* source, size_t sourceSize, unsigned* workSpace)
-{
-    if (*maxSymbolValuePtr < 255)
-        return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
-    *maxSymbolValuePtr = 255;
-    return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
-}
-
-size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr,
-                 const void* src, size_t srcSize)
-{
-    unsigned tmpCounters[1024];
-    return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters);
-}
-
-
-
 /*-**************************************************************
 *  FSE Compression Code
 ****************************************************************/
-/*! FSE_sizeof_CTable() :
-    FSE_CTable is a variable size structure which contains :
-    `U16 tableLog;`
-    `U16 maxSymbolValue;`
-    `U16 nextStateNumber[1 << tableLog];`                         // This size is variable
-    `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];`  // This size is variable
-Allocation is manual (C standard does not support variable-size structures).
-*/
-size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog)
-{
-    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
-    return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
-}
 
 FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
 {
@@ -466,7 +335,7 @@
 /* provides the minimum logSize to safely represent a distribution */
 static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
 {
-    U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
+    U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
     U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
     U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
     assert(srcSize > 1); /* Not supported, RLE should be used instead */
@@ -529,6 +398,9 @@
     }
     ToDistribute = (1 << tableLog) - distributed;
 
+    if (ToDistribute == 0)
+        return 0;
+
     if ((total / ToDistribute) > lowOne) {
         /* risk of rounding to zero */
         lowOne = (U32)((total * 3) / (ToDistribute * 2));
@@ -629,11 +501,11 @@
         U32 s;
         U32 nTotal = 0;
         for (s=0; s<=maxSymbolValue; s++)
-            printf("%3i: %4i \n", s, normalizedCounter[s]);
+            RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
         for (s=0; s<=maxSymbolValue; s++)
             nTotal += abs(normalizedCounter[s]);
         if (nTotal != (1U<<tableLog))
-            printf("Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
+            RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
         getchar();
     }
 #endif
@@ -800,7 +672,7 @@
     if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
 
     /* Scan input and build symbol stats */
-    {   CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
+    {   CHECK_V_F(maxCount, HIST_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
         if (maxCount == srcSize) return 1;   /* only a single symbol in src : rle */
         if (maxCount == 1) return 0;         /* each symbol present maximum once => not compressible */
         if (maxCount < (srcSize >> 7)) return 0;   /* Heuristic : not compressible enough */
@@ -835,7 +707,7 @@
 size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
 {
     fseWkspMax_t scratchBuffer;
-    FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE));   /* compilation failures here means scratchBuffer is not large enough */
+    DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE));   /* compilation failures here means scratchBuffer is not large enough */
     if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
     return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/hist.c	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,195 @@
+/* ******************************************************************
+   hist : Histogram functions
+   part of Finite State Entropy project
+   Copyright (C) 2013-present, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* --- dependencies --- */
+#include "mem.h"             /* U32, BYTE, etc. */
+#include "debug.h"           /* assert, DEBUGLOG */
+#include "error_private.h"   /* ERROR */
+#include "hist.h"
+
+
+/* --- Error management --- */
+unsigned HIST_isError(size_t code) { return ERR_isError(code); }
+
+/*-**************************************************************
+ *  Histogram functions
+ ****************************************************************/
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+                           const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* const end = ip + srcSize;
+    unsigned maxSymbolValue = *maxSymbolValuePtr;
+    unsigned largestCount=0;
+
+    memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
+    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
+
+    while (ip<end) {
+        assert(*ip <= maxSymbolValue);
+        count[*ip++]++;
+    }
+
+    while (!count[maxSymbolValue]) maxSymbolValue--;
+    *maxSymbolValuePtr = maxSymbolValue;
+
+    {   U32 s;
+        for (s=0; s<=maxSymbolValue; s++)
+            if (count[s] > largestCount) largestCount = count[s];
+    }
+
+    return largestCount;
+}
+
+
+/* HIST_count_parallel_wksp() :
+ * store histogram into 4 intermediate tables, recombined at the end.
+ * this design makes better use of OoO cpus,
+ * and is noticeably faster when some values are heavily repeated.
+ * But it needs some additional workspace for intermediate tables.
+ * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32.
+ * @return : largest histogram frequency,
+ *           or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
+static size_t HIST_count_parallel_wksp(
+                                unsigned* count, unsigned* maxSymbolValuePtr,
+                                const void* source, size_t sourceSize,
+                                unsigned checkMax,
+                                unsigned* const workSpace)
+{
+    const BYTE* ip = (const BYTE*)source;
+    const BYTE* const iend = ip+sourceSize;
+    unsigned maxSymbolValue = *maxSymbolValuePtr;
+    unsigned max=0;
+    U32* const Counting1 = workSpace;
+    U32* const Counting2 = Counting1 + 256;
+    U32* const Counting3 = Counting2 + 256;
+    U32* const Counting4 = Counting3 + 256;
+
+    memset(workSpace, 0, 4*256*sizeof(unsigned));
+
+    /* safety checks */
+    if (!sourceSize) {
+        memset(count, 0, maxSymbolValue + 1);
+        *maxSymbolValuePtr = 0;
+        return 0;
+    }
+    if (!maxSymbolValue) maxSymbolValue = 255;            /* 0 == default */
+
+    /* by stripes of 16 bytes */
+    {   U32 cached = MEM_read32(ip); ip += 4;
+        while (ip < iend-15) {
+            U32 c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+        }
+        ip-=4;
+    }
+
+    /* finish last symbols */
+    while (ip<iend) Counting1[*ip++]++;
+
+    if (checkMax) {   /* verify stats will fit into destination table */
+        U32 s; for (s=255; s>maxSymbolValue; s--) {
+            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
+            if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
+    }   }
+
+    {   U32 s;
+        if (maxSymbolValue > 255) maxSymbolValue = 255;
+        for (s=0; s<=maxSymbolValue; s++) {
+            count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
+            if (count[s] > max) max = count[s];
+    }   }
+
+    while (!count[maxSymbolValue]) maxSymbolValue--;
+    *maxSymbolValuePtr = maxSymbolValue;
+    return (size_t)max;
+}
+
+/* HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                          const void* source, size_t sourceSize,
+                          unsigned* workSpace)
+{
+    if (sourceSize < 1500) /* heuristic threshold */
+        return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
+    return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
+}
+
+/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+                     const void* source, size_t sourceSize)
+{
+    unsigned tmpCounters[HIST_WKSP_SIZE_U32];
+    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters);
+}
+
+/* HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                 const void* source, size_t sourceSize, unsigned* workSpace)
+{
+    if (*maxSymbolValuePtr < 255)
+        return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
+    *maxSymbolValuePtr = 255;
+    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
+}
+
+size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
+                 const void* src, size_t srcSize)
+{
+    unsigned tmpCounters[HIST_WKSP_SIZE_U32];
+    return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/hist.h	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,92 @@
+/* ******************************************************************
+   hist : Histogram functions
+   part of Finite State Entropy project
+   Copyright (C) 2013-present, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* --- dependencies --- */
+#include <stddef.h>   /* size_t */
+
+
+/* --- simple histogram functions --- */
+
+/*! HIST_count():
+ *  Provides the precise count of each byte within a table 'count'.
+ *  'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
+ *  Updates *maxSymbolValuePtr with actual largest symbol value detected.
+ *  @return : count of the most frequent symbol (which isn't identified).
+ *            or an error code, which can be tested using HIST_isError().
+ *            note : if return == srcSize, there is only one symbol.
+ */
+size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
+                  const void* src, size_t srcSize);
+
+unsigned HIST_isError(size_t code);  /**< tells if a return value is an error code */
+
+
+/* --- advanced histogram functions --- */
+
+#define HIST_WKSP_SIZE_U32 1024
+/** HIST_count_wksp() :
+ *  Same as HIST_count(), but using an externally provided scratch buffer.
+ *  Benefit is this function will use very little stack space.
+ * `workSpace` must be a table of unsigned of size >= HIST_WKSP_SIZE_U32
+ */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                       const void* src, size_t srcSize,
+                       unsigned* workSpace);
+
+/** HIST_countFast() :
+ *  same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
+ *  This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
+ */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+                      const void* src, size_t srcSize);
+
+/** HIST_countFast_wksp() :
+ *  Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` must be a table of unsigned of size >= HIST_WKSP_SIZE_U32
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                           const void* src, size_t srcSize,
+                           unsigned* workSpace);
+
+/*! HIST_count_simple() :
+ *  Same as HIST_countFast(), this function is unsafe,
+ *  and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
+ *  It is also a bit slower for large inputs.
+ *  However, it does not need any additional memory (not even on stack).
+ * @return : count of the most frequent symbol.
+ *  Note this function doesn't produce any error (i.e. it must succeed).
+ */
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+                           const void* src, size_t srcSize);
--- a/contrib/python-zstandard/zstd/compress/huf_compress.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/huf_compress.c	Mon Oct 22 14:46:06 2018 -0400
@@ -45,8 +45,9 @@
 ****************************************************************/
 #include <string.h>     /* memcpy, memset */
 #include <stdio.h>      /* printf (debug) */
+#include "compiler.h"
 #include "bitstream.h"
-#include "compiler.h"
+#include "hist.h"
 #define FSE_STATIC_LINKING_ONLY   /* FSE_optimalTableLog_internal */
 #include "fse.h"        /* header compression */
 #define HUF_STATIC_LINKING_ONLY
@@ -58,7 +59,7 @@
 *  Error Management
 ****************************************************************/
 #define HUF_isError ERR_isError
-#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
 #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
 #define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
 
@@ -81,7 +82,7 @@
  * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
  */
 #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
-size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
+static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
 {
     BYTE* const ostart = (BYTE*) dst;
     BYTE* op = ostart;
@@ -100,9 +101,9 @@
     if (wtSize <= 1) return 0;  /* Not compressible */
 
     /* Scan input and build symbol stats */
-    {   CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
+    {   unsigned const maxCount = HIST_count_simple(count, &maxSymbolValue, weightTable, wtSize);   /* never fails */
         if (maxCount == wtSize) return 1;   /* only a single symbol in src : rle */
-        if (maxCount == 1) return 0;         /* each symbol present maximum once => not compressible */
+        if (maxCount == 1) return 0;        /* each symbol present maximum once => not compressible */
     }
 
     tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
@@ -216,6 +217,13 @@
     return readSize;
 }
 
+U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
+{
+    const HUF_CElt* table = (const HUF_CElt*)symbolTable;
+    assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
+    return table[symbolValue].nbBits;
+}
+
 
 typedef struct nodeElt_s {
     U32 count;
@@ -660,9 +668,9 @@
     }
 
     /* Scan input and build symbol stats */
-    {   CHECK_V_F(largest, FSE_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->count) );
+    {   CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->count) );
         if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; }   /* single symbol, rle */
-        if (largest <= (srcSize >> 7)+1) return 0;   /* heuristic : probably not compressible enough */
+        if (largest <= (srcSize >> 7)+4) return 0;   /* heuristic : probably not compressible enough */
     }
 
     /* Check validity of previous table */
--- a/contrib/python-zstandard/zstd/compress/zstd_compress.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress.c	Mon Oct 22 14:46:06 2018 -0400
@@ -8,21 +8,13 @@
  * You may select, at your option, one of the above-listed licenses.
  */
 
-
-/*-*************************************
-*  Tuning parameters
-***************************************/
-#ifndef ZSTD_CLEVEL_DEFAULT
-#  define ZSTD_CLEVEL_DEFAULT 3
-#endif
-
-
 /*-*************************************
 *  Dependencies
 ***************************************/
 #include <string.h>         /* memset */
 #include "cpu.h"
 #include "mem.h"
+#include "hist.h"           /* HIST_countFast_wksp */
 #define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
 #include "fse.h"
 #define HUF_STATIC_LINKING_ONLY
@@ -54,7 +46,6 @@
     size_t workspaceSize;
     ZSTD_matchState_t matchState;
     ZSTD_compressedBlockState_t cBlockState;
-    ZSTD_compressionParameters cParams;
     ZSTD_customMem customMem;
     U32 dictID;
 };  /* typedef'd to ZSTD_CDict within "zstd.h" */
@@ -64,17 +55,26 @@
     return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
 }
 
+static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
+{
+    assert(cctx != NULL);
+    memset(cctx, 0, sizeof(*cctx));
+    cctx->customMem = memManager;
+    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+    {   size_t const err = ZSTD_CCtx_resetParameters(cctx);
+        assert(!ZSTD_isError(err));
+        (void)err;
+    }
+}
+
 ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
 {
     ZSTD_STATIC_ASSERT(zcss_init==0);
     ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
-    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_calloc(sizeof(ZSTD_CCtx), customMem);
+    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
         if (!cctx) return NULL;
-        cctx->customMem = customMem;
-        cctx->requestedParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;
-        cctx->requestedParams.fParams.contentSizeFlag = 1;
-        cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+        ZSTD_initCCtx(cctx, customMem);
         return cctx;
     }
 }
@@ -102,17 +102,24 @@
     return cctx;
 }
 
-size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
 {
-    if (cctx==NULL) return 0;   /* support free on NULL */
-    if (cctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static CCtx */
+    assert(cctx != NULL);
+    assert(cctx->staticSize == 0);
     ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
     ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL;
 #ifdef ZSTD_MULTITHREAD
     ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
 #endif
+}
+
+size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+{
+    if (cctx==NULL) return 0;   /* support free on NULL */
+    if (cctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static CCtx */
+    ZSTD_freeCCtxContent(cctx);
     ZSTD_free(cctx, cctx->customMem);
-    return 0;   /* reserved as a potential error code in the future */
+    return 0;
 }
 
 
@@ -143,21 +150,6 @@
 /* private API call, for dictBuilder only */
 const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
 
-ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
-        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
-{
-    ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
-    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
-    if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
-    if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
-    if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
-    if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
-    if (CCtxParams->cParams.searchLength) cParams.searchLength = CCtxParams->cParams.searchLength;
-    if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
-    if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
-    return cParams;
-}
-
 static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
         ZSTD_compressionParameters cParams)
 {
@@ -251,7 +243,6 @@
     case ZSTD_p_minMatch:
     case ZSTD_p_targetLength:
     case ZSTD_p_compressionStrategy:
-    case ZSTD_p_compressLiterals:
         return 1;
 
     case ZSTD_p_format:
@@ -268,6 +259,7 @@
     case ZSTD_p_ldmMinMatch:
     case ZSTD_p_ldmBucketSizeLog:
     case ZSTD_p_ldmHashEveryLog:
+    case ZSTD_p_forceAttachDict:
     default:
         return 0;
     }
@@ -302,7 +294,6 @@
         if (cctx->cdict) return ERROR(stage_wrong);
         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
 
-    case ZSTD_p_compressLiterals:
     case ZSTD_p_contentSizeFlag:
     case ZSTD_p_checksumFlag:
     case ZSTD_p_dictIDFlag:
@@ -313,6 +304,9 @@
                                    * default : 0 when using a CDict, 1 when using a Prefix */
         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
 
+    case ZSTD_p_forceAttachDict:
+        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
     case ZSTD_p_nbWorkers:
         if ((value>0) && cctx->staticSize) {
             return ERROR(parameter_unsupported);  /* MT not compatible with static alloc */
@@ -351,7 +345,6 @@
         int cLevel = (int)value;  /* cast expected to restore negative sign */
         if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel();
         if (cLevel) {  /* 0 : does not change current level */
-            CCtxParams->disableLiteralCompression = (cLevel<0);  /* negative levels disable huffman */
             CCtxParams->compressionLevel = cLevel;
         }
         if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
@@ -399,10 +392,6 @@
         CCtxParams->cParams.strategy = (ZSTD_strategy)value;
         return (size_t)CCtxParams->cParams.strategy;
 
-    case ZSTD_p_compressLiterals:
-        CCtxParams->disableLiteralCompression = !value;
-        return !CCtxParams->disableLiteralCompression;
-
     case ZSTD_p_contentSizeFlag :
         /* Content size written in frame header _when known_ (default:1) */
         DEBUGLOG(4, "set content size flag = %u", (value>0));
@@ -423,6 +412,12 @@
         CCtxParams->forceWindow = (value > 0);
         return CCtxParams->forceWindow;
 
+    case ZSTD_p_forceAttachDict :
+        CCtxParams->attachDictPref = value ?
+                                    (value > 0 ? ZSTD_dictForceAttach : ZSTD_dictForceCopy) :
+                                     ZSTD_dictDefaultAttach;
+        return CCtxParams->attachDictPref;
+
     case ZSTD_p_nbWorkers :
 #ifndef ZSTD_MULTITHREAD
         if (value>0) return ERROR(parameter_unsupported);
@@ -477,6 +472,98 @@
     }
 }
 
+size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned* value)
+{
+    return ZSTD_CCtxParam_getParameter(&cctx->requestedParams, param, value);
+}
+
+size_t ZSTD_CCtxParam_getParameter(
+        ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned* value)
+{
+    switch(param)
+    {
+    case ZSTD_p_format :
+        *value = CCtxParams->format;
+        break;
+    case ZSTD_p_compressionLevel :
+        *value = CCtxParams->compressionLevel;
+        break;
+    case ZSTD_p_windowLog :
+        *value = CCtxParams->cParams.windowLog;
+        break;
+    case ZSTD_p_hashLog :
+        *value = CCtxParams->cParams.hashLog;
+        break;
+    case ZSTD_p_chainLog :
+        *value = CCtxParams->cParams.chainLog;
+        break;
+    case ZSTD_p_searchLog :
+        *value = CCtxParams->cParams.searchLog;
+        break;
+    case ZSTD_p_minMatch :
+        *value = CCtxParams->cParams.searchLength;
+        break;
+    case ZSTD_p_targetLength :
+        *value = CCtxParams->cParams.targetLength;
+        break;
+    case ZSTD_p_compressionStrategy :
+        *value = (unsigned)CCtxParams->cParams.strategy;
+        break;
+    case ZSTD_p_contentSizeFlag :
+        *value = CCtxParams->fParams.contentSizeFlag;
+        break;
+    case ZSTD_p_checksumFlag :
+        *value = CCtxParams->fParams.checksumFlag;
+        break;
+    case ZSTD_p_dictIDFlag :
+        *value = !CCtxParams->fParams.noDictIDFlag;
+        break;
+    case ZSTD_p_forceMaxWindow :
+        *value = CCtxParams->forceWindow;
+        break;
+    case ZSTD_p_forceAttachDict :
+        *value = CCtxParams->attachDictPref;
+        break;
+    case ZSTD_p_nbWorkers :
+#ifndef ZSTD_MULTITHREAD
+        assert(CCtxParams->nbWorkers == 0);
+#endif
+        *value = CCtxParams->nbWorkers;
+        break;
+    case ZSTD_p_jobSize :
+#ifndef ZSTD_MULTITHREAD
+        return ERROR(parameter_unsupported);
+#else
+        *value = CCtxParams->jobSize;
+        break;
+#endif
+    case ZSTD_p_overlapSizeLog :
+#ifndef ZSTD_MULTITHREAD
+        return ERROR(parameter_unsupported);
+#else
+        *value = CCtxParams->overlapSizeLog;
+        break;
+#endif
+    case ZSTD_p_enableLongDistanceMatching :
+        *value = CCtxParams->ldmParams.enableLdm;
+        break;
+    case ZSTD_p_ldmHashLog :
+        *value = CCtxParams->ldmParams.hashLog;
+        break;
+    case ZSTD_p_ldmMinMatch :
+        *value = CCtxParams->ldmParams.minMatchLength;
+        break;
+    case ZSTD_p_ldmBucketSizeLog :
+        *value = CCtxParams->ldmParams.bucketSizeLog;
+        break;
+    case ZSTD_p_ldmHashEveryLog :
+        *value = CCtxParams->ldmParams.hashEveryLog;
+        break;
+    default: return ERROR(parameter_unsupported);
+    }
+    return 0;
+}
+
 /** ZSTD_CCtx_setParametersUsingCCtxParams() :
  *  just applies `params` into `cctx`
  *  no action is performed, parameters are merely stored.
@@ -487,6 +574,7 @@
 size_t ZSTD_CCtx_setParametersUsingCCtxParams(
         ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
 {
+    DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
     if (cctx->cdict) return ERROR(stage_wrong);
 
@@ -565,18 +653,19 @@
     return 0;
 }
 
-static void ZSTD_startNewCompression(ZSTD_CCtx* cctx)
+/*! ZSTD_CCtx_reset() :
+ *  Also dumps dictionary */
+void ZSTD_CCtx_reset(ZSTD_CCtx* cctx)
 {
     cctx->streamStage = zcss_init;
     cctx->pledgedSrcSizePlusOne = 0;
 }
 
-/*! ZSTD_CCtx_reset() :
- *  Also dumps dictionary */
-void ZSTD_CCtx_reset(ZSTD_CCtx* cctx)
+size_t ZSTD_CCtx_resetParameters(ZSTD_CCtx* cctx)
 {
-    ZSTD_startNewCompression(cctx);
+    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
     cctx->cdict = NULL;
+    return ZSTD_CCtxParams_reset(&cctx->requestedParams);
 }
 
 /** ZSTD_checkCParams() :
@@ -589,8 +678,9 @@
     CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
     CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
     CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
-    if ((U32)(cParams.targetLength) < ZSTD_TARGETLENGTH_MIN)
-        return ERROR(parameter_unsupported);
+    ZSTD_STATIC_ASSERT(ZSTD_TARGETLENGTH_MIN == 0);
+    if (cParams.targetLength > ZSTD_TARGETLENGTH_MAX)
+        return ERROR(parameter_outOfBound);
     if ((U32)(cParams.strategy) > (U32)ZSTD_btultra)
         return ERROR(parameter_unsupported);
     return 0;
@@ -599,7 +689,8 @@
 /** ZSTD_clampCParams() :
  *  make CParam values within valid range.
  *  @return : valid CParams */
-static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams)
+static ZSTD_compressionParameters
+ZSTD_clampCParams(ZSTD_compressionParameters cParams)
 {
 #   define CLAMP(val,min,max) {      \
         if (val<min) val=min;        \
@@ -610,8 +701,10 @@
     CLAMP(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
     CLAMP(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
     CLAMP(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
-    if ((U32)(cParams.targetLength) < ZSTD_TARGETLENGTH_MIN) cParams.targetLength = ZSTD_TARGETLENGTH_MIN;
-    if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) cParams.strategy = ZSTD_btultra;
+    ZSTD_STATIC_ASSERT(ZSTD_TARGETLENGTH_MIN == 0);
+    if (cParams.targetLength > ZSTD_TARGETLENGTH_MAX)
+        cParams.targetLength = ZSTD_TARGETLENGTH_MAX;
+    CLAMP(cParams.strategy, ZSTD_fast, ZSTD_btultra);
     return cParams;
 }
 
@@ -627,8 +720,11 @@
     optimize `cPar` for a given input (`srcSize` and `dictSize`).
     mostly downsizing to reduce memory consumption and initialization latency.
     Both `srcSize` and `dictSize` are optional (use 0 if unknown).
-    Note : cPar is considered validated at this stage. Use ZSTD_checkCParams() to ensure that condition. */
-ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
+    Note : cPar is assumed validated. Use ZSTD_checkCParams() to ensure this condition. */
+static ZSTD_compressionParameters
+ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
+                            unsigned long long srcSize,
+                            size_t dictSize)
 {
     static const U64 minSrcSize = 513; /* (1<<9) + 1 */
     static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
@@ -648,7 +744,7 @@
                             ZSTD_highbit32(tSize-1) + 1;
         if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
     }
-    if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
+    if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;
     {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
         if (cycleLog > cPar.windowLog)
             cPar.chainLog -= (cycleLog - cPar.windowLog);
@@ -660,13 +756,34 @@
     return cPar;
 }
 
-ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
+ZSTD_compressionParameters
+ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
+                   unsigned long long srcSize,
+                   size_t dictSize)
 {
     cPar = ZSTD_clampCParams(cPar);
     return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
 }
 
-static size_t ZSTD_sizeof_matchState(ZSTD_compressionParameters const* cParams, const U32 forCCtx)
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
+{
+    ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
+    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
+    if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
+    if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
+    if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
+    if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
+    if (CCtxParams->cParams.searchLength) cParams.searchLength = CCtxParams->cParams.searchLength;
+    if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
+    if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
+    assert(!ZSTD_checkCParams(cParams));
+    return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);
+}
+
+static size_t
+ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+                       const U32 forCCtx)
 {
     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
     size_t const hSize = ((size_t)1) << cParams->hashLog;
@@ -693,7 +810,7 @@
         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
         U32    const divider = (cParams.searchLength==3) ? 3 : 4;
         size_t const maxNbSeq = blockSize / divider;
-        size_t const tokenSpace = blockSize + 11*maxNbSeq;
+        size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
         size_t const entropySpace = HUF_WORKSPACE_SIZE;
         size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
         size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
@@ -752,12 +869,14 @@
     return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
 }
 
-static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) {
+static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
+{
     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
     return ZSTD_estimateCStreamSize_usingCParams(cParams);
 }
 
-size_t ZSTD_estimateCStreamSize(int compressionLevel) {
+size_t ZSTD_estimateCStreamSize(int compressionLevel)
+{
     int level;
     size_t memBudget = 0;
     for (level=1; level<=compressionLevel; level++) {
@@ -786,9 +905,27 @@
         fp.ingested = cctx->consumedSrcSize + buffered;
         fp.consumed = cctx->consumedSrcSize;
         fp.produced = cctx->producedCSize;
+        fp.flushed  = cctx->producedCSize;   /* simplified; some data might still be left within streaming output buffer */
+        fp.currentJobID = 0;
+        fp.nbActiveWorkers = 0;
         return fp;
 }   }
 
+/*! ZSTD_toFlushNow()
+ *  Only useful for multithreading scenarios currently (nbWorkers >= 1).
+ */
+size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+    if (cctx->appliedParams.nbWorkers > 0) {
+        return ZSTDMT_toFlushNow(cctx->mtctx);
+    }
+#endif
+    (void)cctx;
+    return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
+}
+
+
 
 static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
                                   ZSTD_compressionParameters cParams2)
@@ -799,6 +936,20 @@
          & ((cParams1.searchLength==3) == (cParams2.searchLength==3));  /* hashlog3 space */
 }
 
+static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
+                                    ZSTD_compressionParameters cParams2)
+{
+    (void)cParams1;
+    (void)cParams2;
+    assert(cParams1.windowLog    == cParams2.windowLog);
+    assert(cParams1.chainLog     == cParams2.chainLog);
+    assert(cParams1.hashLog      == cParams2.hashLog);
+    assert(cParams1.searchLog    == cParams2.searchLog);
+    assert(cParams1.searchLength == cParams2.searchLength);
+    assert(cParams1.targetLength == cParams2.targetLength);
+    assert(cParams1.strategy     == cParams2.strategy);
+}
+
 /** The parameters are equivalent if ldm is not enabled in both sets or
  *  all the parameters are equivalent. */
 static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
@@ -817,33 +968,51 @@
 /* ZSTD_sufficientBuff() :
  * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
  * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
-static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t blockSize1,
+static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1,
+                            size_t maxNbLit1,
                             ZSTD_buffered_policy_e buffPol2,
                             ZSTD_compressionParameters cParams2,
                             U64 pledgedSrcSize)
 {
     size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
     size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
+    size_t const maxNbSeq2 = blockSize2 / ((cParams2.searchLength == 3) ? 3 : 4);
+    size_t const maxNbLit2 = blockSize2;
     size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
-    DEBUGLOG(4, "ZSTD_sufficientBuff: is windowSize2=%u <= wlog1=%u",
-                (U32)windowSize2, cParams2.windowLog);
-    DEBUGLOG(4, "ZSTD_sufficientBuff: is blockSize2=%u <= blockSize1=%u",
-                (U32)blockSize2, (U32)blockSize1);
-    return (blockSize2 <= blockSize1) /* seqStore space depends on blockSize */
+    DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u",
+                (U32)neededBufferSize2, (U32)bufferSize1);
+    DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u",
+                (U32)maxNbSeq2, (U32)maxNbSeq1);
+    DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u",
+                (U32)maxNbLit2, (U32)maxNbLit1);
+    return (maxNbLit2 <= maxNbLit1)
+         & (maxNbSeq2 <= maxNbSeq1)
          & (neededBufferSize2 <= bufferSize1);
 }
 
 /** Equivalence for resetCCtx purposes */
 static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
                                  ZSTD_CCtx_params params2,
-                                 size_t buffSize1, size_t blockSize1,
+                                 size_t buffSize1,
+                                 size_t maxNbSeq1, size_t maxNbLit1,
                                  ZSTD_buffered_policy_e buffPol2,
                                  U64 pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
-    return ZSTD_equivalentCParams(params1.cParams, params2.cParams) &&
-           ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams) &&
-           ZSTD_sufficientBuff(buffSize1, blockSize1, buffPol2, params2.cParams, pledgedSrcSize);
+    if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) {
+      DEBUGLOG(4, "ZSTD_equivalentCParams() == 0");
+      return 0;
+    }
+    if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) {
+      DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0");
+      return 0;
+    }
+    if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2,
+                             params2.cParams, pledgedSrcSize)) {
+      DEBUGLOG(4, "ZSTD_sufficientBuff() == 0");
+      return 0;
+    }
+    return 1;
 }
 
 static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
@@ -851,10 +1020,10 @@
     int i;
     for (i = 0; i < ZSTD_REP_NUM; ++i)
         bs->rep[i] = repStartValue[i];
-    bs->entropy.hufCTable_repeatMode = HUF_repeat_none;
-    bs->entropy.offcode_repeatMode = FSE_repeat_none;
-    bs->entropy.matchlength_repeatMode = FSE_repeat_none;
-    bs->entropy.litlength_repeatMode = FSE_repeat_none;
+    bs->entropy.huf.repeatMode = HUF_repeat_none;
+    bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
+    bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
+    bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
 }
 
 /*! ZSTD_invalidateMatchState()
@@ -866,8 +1035,10 @@
     ZSTD_window_clear(&ms->window);
 
     ms->nextToUpdate = ms->window.dictLimit + 1;
+    ms->nextToUpdate3 = ms->window.dictLimit + 1;
     ms->loadedDictEnd = 0;
     ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
+    ms->dictMatchState = NULL;
 }
 
 /*! ZSTD_continueCCtx() :
@@ -880,6 +1051,7 @@
 
     cctx->blockSize = blockSize;   /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
     cctx->appliedParams = params;
+    cctx->blockState.matchState.cParams = params.cParams;
     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
     cctx->consumedSrcSize = 0;
     cctx->producedCSize = 0;
@@ -900,7 +1072,11 @@
 
 typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
 
-static void* ZSTD_reset_matchState(ZSTD_matchState_t* ms, void* ptr, ZSTD_compressionParameters const* cParams, ZSTD_compResetPolicy_e const crp, U32 const forCCtx)
+static void*
+ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+                      void* ptr,
+                const ZSTD_compressionParameters* cParams,
+                      ZSTD_compResetPolicy_e const crp, U32 const forCCtx)
 {
     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
     size_t const hSize = ((size_t)1) << cParams->hashLog;
@@ -912,6 +1088,9 @@
 
     ms->hashLog3 = hashLog3;
     memset(&ms->window, 0, sizeof(ms->window));
+    ms->window.dictLimit = 1;    /* start from 1, so that 1st position is valid */
+    ms->window.lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
+    ms->window.nextSrc = ms->window.base + 1;   /* see issue #1241 */
     ZSTD_invalidateMatchState(ms);
 
     /* opt parser space */
@@ -937,14 +1116,24 @@
     ms->hashTable3 = ms->chainTable + chainSize;
     ptr = ms->hashTable3 + h3Size;
 
+    ms->cParams = *cParams;
+
     assert(((size_t)ptr & 3) == 0);
     return ptr;
 }
 
+#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
+#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128  /* when workspace is continuously too large
+                                         * during at least this number of times,
+                                         * context's memory usage is considered wasteful,
+                                         * because it's sized to handle a worst case scenario which rarely happens.
+                                         * In which case, resize it down to free some memory */
+
 /*! ZSTD_resetCCtx_internal() :
     note : `params` are assumed fully validated at this stage */
 static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
-                                      ZSTD_CCtx_params params, U64 pledgedSrcSize,
+                                      ZSTD_CCtx_params params,
+                                      U64 pledgedSrcSize,
                                       ZSTD_compResetPolicy_e const crp,
                                       ZSTD_buffered_policy_e const zbuff)
 {
@@ -954,34 +1143,35 @@
 
     if (crp == ZSTDcrp_continue) {
         if (ZSTD_equivalentParams(zc->appliedParams, params,
-                                zc->inBuffSize, zc->blockSize,
-                                zbuff, pledgedSrcSize)) {
-            DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%u)",
-                        zc->appliedParams.cParams.windowLog, (U32)zc->blockSize);
-            return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
+                                  zc->inBuffSize,
+                                  zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
+                                  zbuff, pledgedSrcSize)) {
+            DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%zu)",
+                        zc->appliedParams.cParams.windowLog, zc->blockSize);
+            zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0);   /* if it was too large, it still is */
+            if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION)
+                return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
     }   }
     DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
 
     if (params.ldmParams.enableLdm) {
         /* Adjust long distance matching parameters */
-        params.ldmParams.windowLog = params.cParams.windowLog;
         ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
         assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
         assert(params.ldmParams.hashEveryLog < 32);
-        zc->ldmState.hashPower =
-                ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
+        zc->ldmState.hashPower = ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
     }
 
     {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
         U32    const divider = (params.cParams.searchLength==3) ? 3 : 4;
         size_t const maxNbSeq = blockSize / divider;
-        size_t const tokenSpace = blockSize + 11*maxNbSeq;
+        size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
         size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
         size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
         size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
         size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
-        void* ptr;
+        void* ptr;   /* used to partition workSpace */
 
         /* Check if workSpace is large enough, alloc a new one if needed */
         {   size_t const entropySpace = HUF_WORKSPACE_SIZE;
@@ -993,14 +1183,20 @@
             size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
                                        ldmSeqSpace + matchStateSize + tokenSpace +
                                        bufferSpace;
-            DEBUGLOG(4, "Need %uKB workspace, including %uKB for match state, and %uKB for buffers",
-                        (U32)(neededSpace>>10), (U32)(matchStateSize>>10), (U32)(bufferSpace>>10));
-            DEBUGLOG(4, "windowSize: %u - blockSize: %u", (U32)windowSize, (U32)blockSize);
-
-            if (zc->workSpaceSize < neededSpace) {  /* too small : resize */
-                DEBUGLOG(4, "Need to update workSpaceSize from %uK to %uK",
-                            (unsigned)(zc->workSpaceSize>>10),
-                            (unsigned)(neededSpace>>10));
+
+            int const workSpaceTooSmall = zc->workSpaceSize < neededSpace;
+            int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace;
+            int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION);
+            zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0;
+
+            DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers",
+                        neededSpace>>10, matchStateSize>>10, bufferSpace>>10);
+            DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
+
+            if (workSpaceTooSmall || workSpaceWasteful) {
+                DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB",
+                            zc->workSpaceSize >> 10,
+                            neededSpace >> 10);
                 /* static cctx : no resize, error out */
                 if (zc->staticSize) return ERROR(memory_allocation);
 
@@ -1009,9 +1205,11 @@
                 zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
                 if (zc->workSpace == NULL) return ERROR(memory_allocation);
                 zc->workSpaceSize = neededSpace;
-                ptr = zc->workSpace;
-
-                /* Statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
+                zc->workSpaceOversizedDuration = 0;
+
+                /* Statically sized space.
+                 * entropyWorkspace never moves,
+                 * though prev/next block swap places */
                 assert(((size_t)zc->workSpace & 3) == 0);   /* ensure correct alignment */
                 assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
                 zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
@@ -1022,6 +1220,7 @@
 
         /* init params */
         zc->appliedParams = params;
+        zc->blockState.matchState.cParams = params.cParams;
         zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
         zc->consumedSrcSize = 0;
         zc->producedCSize = 0;
@@ -1058,13 +1257,18 @@
         ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, &params.cParams, crp, /* forCCtx */ 1);
 
         /* sequences storage */
+        zc->seqStore.maxNbSeq = maxNbSeq;
         zc->seqStore.sequencesStart = (seqDef*)ptr;
         ptr = zc->seqStore.sequencesStart + maxNbSeq;
         zc->seqStore.llCode = (BYTE*) ptr;
         zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
         zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
         zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
-        ptr = zc->seqStore.litStart + blockSize;
+        /* ZSTD_wildcopy() is used to copy into the literals buffer,
+         * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
+         */
+        zc->seqStore.maxNbLit = blockSize;
+        ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH;
 
         /* ldm bucketOffsets table */
         if (params.ldmParams.enableLdm) {
@@ -1098,28 +1302,110 @@
     assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
 }
 
-static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
+/* These are the approximate sizes for each strategy past which copying the
+ * dictionary tables into the working context is faster than using them
+ * in-place.
+ */
+static const size_t attachDictSizeCutoffs[(unsigned)ZSTD_btultra+1] = {
+    8 KB, /* unused */
+    8 KB, /* ZSTD_fast */
+    16 KB, /* ZSTD_dfast */
+    32 KB, /* ZSTD_greedy */
+    32 KB, /* ZSTD_lazy */
+    32 KB, /* ZSTD_lazy2 */
+    32 KB, /* ZSTD_btlazy2 */
+    32 KB, /* ZSTD_btopt */
+    8 KB /* ZSTD_btultra */
+};
+
+static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
+                                 ZSTD_CCtx_params params,
+                                 U64 pledgedSrcSize)
+{
+    size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
+    return ( pledgedSrcSize <= cutoff
+          || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+          || params.attachDictPref == ZSTD_dictForceAttach )
+        && params.attachDictPref != ZSTD_dictForceCopy
+        && !params.forceWindow; /* dictMatchState isn't correctly
+                                 * handled in _enforceMaxDist */
+}
+
+static size_t ZSTD_resetCCtx_byAttachingCDict(
+    ZSTD_CCtx* cctx,
+    const ZSTD_CDict* cdict,
+    ZSTD_CCtx_params params,
+    U64 pledgedSrcSize,
+    ZSTD_buffered_policy_e zbuff)
+{
+    {
+        const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
+        unsigned const windowLog = params.cParams.windowLog;
+        assert(windowLog != 0);
+        /* Resize working context table params for input only, since the dict
+         * has its own tables. */
+        params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0);
+        params.cParams.windowLog = windowLog;
+        ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+                                ZSTDcrp_continue, zbuff);
+        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+    }
+
+    {
+        const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
+                                  - cdict->matchState.window.base);
+        const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
+        if (cdictLen == 0) {
+            /* don't even attach dictionaries with no contents */
+            DEBUGLOG(4, "skipping attaching empty dictionary");
+        } else {
+            DEBUGLOG(4, "attaching dictionary into context");
+            cctx->blockState.matchState.dictMatchState = &cdict->matchState;
+
+            /* prep working match state so dict matches never have negative indices
+             * when they are translated to the working context's index space. */
+            if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
+                cctx->blockState.matchState.window.nextSrc =
+                    cctx->blockState.matchState.window.base + cdictEnd;
+                ZSTD_window_clear(&cctx->blockState.matchState.window);
+            }
+            cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
+        }
+    }
+
+    cctx->dictID = cdict->dictID;
+
+    /* copy block state */
+    memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+    return 0;
+}
+
+static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
                             const ZSTD_CDict* cdict,
-                            unsigned windowLog,
-                            ZSTD_frameParameters fParams,
+                            ZSTD_CCtx_params params,
                             U64 pledgedSrcSize,
                             ZSTD_buffered_policy_e zbuff)
 {
-    {   ZSTD_CCtx_params params = cctx->requestedParams;
+    const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
+
+    DEBUGLOG(4, "copying dictionary into context");
+
+    {   unsigned const windowLog = params.cParams.windowLog;
+        assert(windowLog != 0);
         /* Copy only compression parameters related to tables. */
-        params.cParams = cdict->cParams;
-        if (windowLog) params.cParams.windowLog = windowLog;
-        params.fParams = fParams;
+        params.cParams = *cdict_cParams;
+        params.cParams.windowLog = windowLog;
         ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
                                 ZSTDcrp_noMemset, zbuff);
-        assert(cctx->appliedParams.cParams.strategy == cdict->cParams.strategy);
-        assert(cctx->appliedParams.cParams.hashLog == cdict->cParams.hashLog);
-        assert(cctx->appliedParams.cParams.chainLog == cdict->cParams.chainLog);
+        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+        assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
+        assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
     }
 
     /* copy tables */
-    {   size_t const chainSize = (cdict->cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict->cParams.chainLog);
-        size_t const hSize =  (size_t)1 << cdict->cParams.hashLog;
+    {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
+        size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;
         size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
         assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
         assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
@@ -1127,6 +1413,7 @@
         assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
         memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
     }
+
     /* Zero the hashTable3, since the cdict never fills it */
     {   size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
         assert(cdict->matchState.hashLog3 == 0);
@@ -1134,14 +1421,14 @@
     }
 
     /* copy dictionary offsets */
-    {
-        ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
+    {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
         ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
         dstMatchState->window       = srcMatchState->window;
         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
         dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
         dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
     }
+
     cctx->dictID = cdict->dictID;
 
     /* copy block state */
@@ -1150,6 +1437,27 @@
     return 0;
 }
 
+/* We have a choice between copying the dictionary context into the working
+ * context, or referencing the dictionary context from the working context
+ * in-place. We decide here which strategy to use. */
+static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
+                            const ZSTD_CDict* cdict,
+                            ZSTD_CCtx_params params,
+                            U64 pledgedSrcSize,
+                            ZSTD_buffered_policy_e zbuff)
+{
+
+    DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
+
+    if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
+        return ZSTD_resetCCtx_byAttachingCDict(
+            cctx, cdict, params, pledgedSrcSize, zbuff);
+    } else {
+        return ZSTD_resetCCtx_byCopyingCDict(
+            cctx, cdict, params, pledgedSrcSize, zbuff);
+    }
+}
+
 /*! ZSTD_copyCCtx_internal() :
  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
@@ -1192,7 +1500,7 @@
 
     /* copy dictionary offsets */
     {
-        ZSTD_matchState_t const* srcMatchState = &srcCCtx->blockState.matchState;
+        const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
         ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
         dstMatchState->window       = srcMatchState->window;
         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
@@ -1294,15 +1602,15 @@
 
 /* See doc/zstd_compression_format.md for detailed format description */
 
-size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
 {
+    U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
     if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    MEM_writeLE24(dst, cBlockHeader24);
     memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
-    MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
-    return ZSTD_blockHeaderSize+srcSize;
+    return ZSTD_blockHeaderSize + srcSize;
 }
 
-
 static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
 {
     BYTE* const ostart = (BYTE* const)dst;
@@ -1356,16 +1664,24 @@
 }
 
 
-static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
-
-static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t const* prevEntropy,
-                                     ZSTD_entropyCTables_t* nextEntropy,
+/* ZSTD_minGain() :
+ * minimum compression required
+ * to generate a compress block or a compressed literals section.
+ * note : use same formula for both situations */
+static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
+{
+    U32 const minlog = (strat==ZSTD_btultra) ? 7 : 6;
+    return (srcSize >> minlog) + 2;
+}
+
+static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+                                     ZSTD_hufCTables_t* nextHuf,
                                      ZSTD_strategy strategy, int disableLiteralCompression,
                                      void* dst, size_t dstCapacity,
                                const void* src, size_t srcSize,
                                      U32* workspace, const int bmi2)
 {
-    size_t const minGain = ZSTD_minGain(srcSize);
+    size_t const minGain = ZSTD_minGain(srcSize, strategy);
     size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
     BYTE*  const ostart = (BYTE*)dst;
     U32 singleStream = srcSize < 256;
@@ -1376,27 +1692,25 @@
                 disableLiteralCompression);
 
     /* Prepare nextEntropy assuming reusing the existing table */
-    nextEntropy->hufCTable_repeatMode = prevEntropy->hufCTable_repeatMode;
-    memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable,
-           sizeof(prevEntropy->hufCTable));
+    memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
 
     if (disableLiteralCompression)
         return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
 
     /* small ? don't even attempt compression (speed opt) */
 #   define COMPRESS_LITERALS_SIZE_MIN 63
-    {   size_t const minLitSize = (prevEntropy->hufCTable_repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
         if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
     }
 
     if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
-    {   HUF_repeat repeat = prevEntropy->hufCTable_repeatMode;
+    {   HUF_repeat repeat = prevHuf->repeatMode;
         int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
         if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
         cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
-                                      workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextEntropy->hufCTable, &repeat, preferRepeat, bmi2)
+                                      workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
                                 : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
-                                      workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextEntropy->hufCTable, &repeat, preferRepeat, bmi2);
+                                      workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
         if (repeat != HUF_repeat_none) {
             /* reused the existing table */
             hType = set_repeat;
@@ -1404,17 +1718,17 @@
     }
 
     if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
-        memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable, sizeof(prevEntropy->hufCTable));
+        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
         return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
     }
     if (cLitSize==1) {
-        memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable, sizeof(prevEntropy->hufCTable));
+        memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
         return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
     }
 
     if (hType == set_compressed) {
         /* using a newly constructed table */
-        nextEntropy->hufCTable_repeatMode = HUF_repeat_check;
+        nextHuf->repeatMode = HUF_repeat_check;
     }
 
     /* Build header */
@@ -1451,6 +1765,7 @@
     BYTE* const mlCodeTable = seqStorePtr->mlCode;
     U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
     U32 u;
+    assert(nbSeq <= seqStorePtr->maxNbSeq);
     for (u=0; u<nbSeq; u++) {
         U32 const llv = sequences[u].litLength;
         U32 const mlv = sequences[u].matchLength;
@@ -1464,61 +1779,234 @@
         mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
 }
 
+
+/**
+ * -log2(x / 256) lookup table for x in [0, 256).
+ * If x == 0: Return 0
+ * Else: Return floor(-log2(x / 256) * 256)
+ */
+static unsigned const kInverseProbabiltyLog256[256] = {
+    0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
+    1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
+    874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
+    724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
+    618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
+    535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
+    468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
+    411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
+    362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
+    318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
+    279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
+    244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
+    212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
+    182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
+    155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
+    130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
+    106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
+    83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
+    62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
+    42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
+    23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
+    5,    4,    2,    1,
+};
+
+
+/**
+ * Returns the cost in bits of encoding the distribution described by count
+ * using the entropy bound.
+ */
+static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
+{
+    unsigned cost = 0;
+    unsigned s;
+    for (s = 0; s <= max; ++s) {
+        unsigned norm = (unsigned)((256 * count[s]) / total);
+        if (count[s] != 0 && norm == 0)
+            norm = 1;
+        assert(count[s] < total);
+        cost += count[s] * kInverseProbabiltyLog256[norm];
+    }
+    return cost >> 8;
+}
+
+
+/**
+ * Returns the cost in bits of encoding the distribution in count using the
+ * table described by norm. The max symbol support by norm is assumed >= max.
+ * norm must be valid for every symbol with non-zero probability in count.
+ */
+static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
+                                    unsigned const* count, unsigned const max)
+{
+    unsigned const shift = 8 - accuracyLog;
+    size_t cost = 0;
+    unsigned s;
+    assert(accuracyLog <= 8);
+    for (s = 0; s <= max; ++s) {
+        unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
+        unsigned const norm256 = normAcc << shift;
+        assert(norm256 > 0);
+        assert(norm256 < 256);
+        cost += count[s] * kInverseProbabiltyLog256[norm256];
+    }
+    return cost >> 8;
+}
+
+
+static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
+  void const* ptr = ctable;
+  U16 const* u16ptr = (U16 const*)ptr;
+  U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
+  return maxSymbolValue;
+}
+
+
+/**
+ * Returns the cost in bits of encoding the distribution in count using ctable.
+ * Returns an error if ctable cannot represent all the symbols in count.
+ */
+static size_t ZSTD_fseBitCost(
+    FSE_CTable const* ctable,
+    unsigned const* count,
+    unsigned const max)
+{
+    unsigned const kAccuracyLog = 8;
+    size_t cost = 0;
+    unsigned s;
+    FSE_CState_t cstate;
+    FSE_initCState(&cstate, ctable);
+    if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
+        DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
+                    ZSTD_getFSEMaxSymbolValue(ctable), max);
+        return ERROR(GENERIC);
+    }
+    for (s = 0; s <= max; ++s) {
+        unsigned const tableLog = cstate.stateLog;
+        unsigned const badCost = (tableLog + 1) << kAccuracyLog;
+        unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
+        if (count[s] == 0)
+            continue;
+        if (bitCost >= badCost) {
+            DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
+            return ERROR(GENERIC);
+        }
+        cost += count[s] * bitCost;
+    }
+    return cost >> kAccuracyLog;
+}
+
+/**
+ * Returns the cost in bytes of encoding the normalized count header.
+ * Returns an error if any of the helper functions return an error.
+ */
+static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
+                              size_t const nbSeq, unsigned const FSELog)
+{
+    BYTE wksp[FSE_NCOUNTBOUND];
+    S16 norm[MaxSeq + 1];
+    const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+    CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
+    return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
+}
+
+
 typedef enum {
     ZSTD_defaultDisallowed = 0,
     ZSTD_defaultAllowed = 1
 } ZSTD_defaultPolicy_e;
 
-MEM_STATIC
-symbolEncodingType_e ZSTD_selectEncodingType(
-        FSE_repeat* repeatMode, size_t const mostFrequent, size_t nbSeq,
-        U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed)
+MEM_STATIC symbolEncodingType_e
+ZSTD_selectEncodingType(
+        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+        FSE_CTable const* prevCTable,
+        short const* defaultNorm, U32 defaultNormLog,
+        ZSTD_defaultPolicy_e const isDefaultAllowed,
+        ZSTD_strategy const strategy)
 {
-#define MIN_SEQ_FOR_DYNAMIC_FSE   64
-#define MAX_SEQ_FOR_STATIC_FSE  1000
     ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
-    if ((mostFrequent == nbSeq) && (!isDefaultAllowed || nbSeq > 2)) {
+    if (mostFrequent == nbSeq) {
+        *repeatMode = FSE_repeat_none;
+        if (isDefaultAllowed && nbSeq <= 2) {
+            /* Prefer set_basic over set_rle when there are 2 or less symbols,
+             * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
+             * If basic encoding isn't possible, always choose RLE.
+             */
+            DEBUGLOG(5, "Selected set_basic");
+            return set_basic;
+        }
         DEBUGLOG(5, "Selected set_rle");
-        /* Prefer set_basic over set_rle when there are 2 or less symbols,
-         * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
-         * If basic encoding isn't possible, always choose RLE.
-         */
-        *repeatMode = FSE_repeat_check;
         return set_rle;
     }
-    if ( isDefaultAllowed
-      && (*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
-        DEBUGLOG(5, "Selected set_repeat");
-        return set_repeat;
-    }
-    if ( isDefaultAllowed
-      && ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1)))) ) {
-        DEBUGLOG(5, "Selected set_basic");
-        /* The format allows default tables to be repeated, but it isn't useful.
-         * When using simple heuristics to select encoding type, we don't want
-         * to confuse these tables with dictionaries. When running more careful
-         * analysis, we don't need to waste time checking both repeating tables
-         * and default tables.
-         */
-        *repeatMode = FSE_repeat_none;
-        return set_basic;
+    if (strategy < ZSTD_lazy) {
+        if (isDefaultAllowed) {
+            size_t const staticFse_nbSeq_max = 1000;
+            size_t const mult = 10 - strategy;
+            size_t const baseLog = 3;
+            size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
+            assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
+            assert(mult <= 9 && mult >= 7);
+            if ( (*repeatMode == FSE_repeat_valid)
+              && (nbSeq < staticFse_nbSeq_max) ) {
+                DEBUGLOG(5, "Selected set_repeat");
+                return set_repeat;
+            }
+            if ( (nbSeq < dynamicFse_nbSeq_min)
+              || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
+                DEBUGLOG(5, "Selected set_basic");
+                /* The format allows default tables to be repeated, but it isn't useful.
+                 * When using simple heuristics to select encoding type, we don't want
+                 * to confuse these tables with dictionaries. When running more careful
+                 * analysis, we don't need to waste time checking both repeating tables
+                 * and default tables.
+                 */
+                *repeatMode = FSE_repeat_none;
+                return set_basic;
+            }
+        }
+    } else {
+        size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
+        size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
+        size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
+        size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
+
+        if (isDefaultAllowed) {
+            assert(!ZSTD_isError(basicCost));
+            assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
+        }
+        assert(!ZSTD_isError(NCountCost));
+        assert(compressedCost < ERROR(maxCode));
+        DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
+                    (U32)basicCost, (U32)repeatCost, (U32)compressedCost);
+        if (basicCost <= repeatCost && basicCost <= compressedCost) {
+            DEBUGLOG(5, "Selected set_basic");
+            assert(isDefaultAllowed);
+            *repeatMode = FSE_repeat_none;
+            return set_basic;
+        }
+        if (repeatCost <= compressedCost) {
+            DEBUGLOG(5, "Selected set_repeat");
+            assert(!ZSTD_isError(repeatCost));
+            return set_repeat;
+        }
+        assert(compressedCost < basicCost && compressedCost < repeatCost);
     }
     DEBUGLOG(5, "Selected set_compressed");
     *repeatMode = FSE_repeat_check;
     return set_compressed;
 }
 
-MEM_STATIC
-size_t ZSTD_buildCTable(void* dst, size_t dstCapacity,
-        FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
-        U32* count, U32 max,
-        BYTE const* codeTable, size_t nbSeq,
-        S16 const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
-        FSE_CTable const* prevCTable, size_t prevCTableSize,
-        void* workspace, size_t workspaceSize)
+MEM_STATIC size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+                U32* count, U32 max,
+                const BYTE* codeTable, size_t nbSeq,
+                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+                const FSE_CTable* prevCTable, size_t prevCTableSize,
+                void* workspace, size_t workspaceSize)
 {
     BYTE* op = (BYTE*)dst;
-    BYTE const* const oend = op + dstCapacity;
+    const BYTE* const oend = op + dstCapacity;
 
     switch (type) {
     case set_rle:
@@ -1674,7 +2162,7 @@
 
 #endif
 
-size_t ZSTD_encodeSequences(
+static size_t ZSTD_encodeSequences(
             void* dst, size_t dstCapacity,
             FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
             FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
@@ -1706,10 +2194,11 @@
                               const int bmi2)
 {
     const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
+    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
     U32 count[MaxSeq+1];
-    FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
-    FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
-    FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
+    FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
+    FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
+    FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
     U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
     const seqDef* const sequences = seqStorePtr->sequencesStart;
     const BYTE* const ofCodeTable = seqStorePtr->ofCode;
@@ -1720,15 +2209,17 @@
     BYTE* op = ostart;
     size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
     BYTE* seqHead;
+    BYTE* lastNCount = NULL;
 
     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
 
     /* Compress literals */
     {   const BYTE* const literals = seqStorePtr->litStart;
         size_t const litSize = seqStorePtr->lit - literals;
+        int const disableLiteralCompression = (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
         size_t const cSize = ZSTD_compressLiterals(
-                                    prevEntropy, nextEntropy,
-                                    cctxParams->cParams.strategy, cctxParams->disableLiteralCompression,
+                                    &prevEntropy->huf, &nextEntropy->huf,
+                                    cctxParams->cParams.strategy, disableLiteralCompression,
                                     op, dstCapacity,
                                     literals, litSize,
                                     workspace, bmi2);
@@ -1747,13 +2238,9 @@
     else
         op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
     if (nbSeq==0) {
-      memcpy(nextEntropy->litlengthCTable, prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable));
-      nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
-      memcpy(nextEntropy->offcodeCTable, prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable));
-      nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
-      memcpy(nextEntropy->matchlengthCTable, prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable));
-      nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
-      return op - ostart;
+        /* Copy the old tables over as if we repeated them */
+        memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
+        return op - ostart;
     }
 
     /* seqHead : flags for FSE encoding type */
@@ -1763,43 +2250,53 @@
     ZSTD_seqToCodes(seqStorePtr);
     /* build CTable for Literal Lengths */
     {   U32 max = MaxLL;
-        size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
+        size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);   /* can't fail */
         DEBUGLOG(5, "Building LL table");
-        nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
-        LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog, ZSTD_defaultAllowed);
+        nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
+        LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode, count, max, mostFrequent, nbSeq, LLFSELog, prevEntropy->fse.litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_defaultAllowed, strategy);
+        assert(set_basic < set_compressed && set_rle < set_compressed);
+        assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
-                    count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
-                    prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
-                    workspace, HUF_WORKSPACE_SIZE);
+                                                    count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
+                                                    prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
+                                                    workspace, HUF_WORKSPACE_SIZE);
             if (ZSTD_isError(countSize)) return countSize;
+            if (LLtype == set_compressed)
+                lastNCount = op;
             op += countSize;
     }   }
     /* build CTable for Offsets */
     {   U32 max = MaxOff;
-        size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
+        size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);  /* can't fail */
         /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
         ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
         DEBUGLOG(5, "Building OF table");
-        nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
-        Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog, defaultPolicy);
+        nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
+        Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode, count, max, mostFrequent, nbSeq, OffFSELog, prevEntropy->fse.offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy);
+        assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
-                    count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
-                    prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
-                    workspace, HUF_WORKSPACE_SIZE);
+                                                    count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+                                                    prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
+                                                    workspace, HUF_WORKSPACE_SIZE);
             if (ZSTD_isError(countSize)) return countSize;
+            if (Offtype == set_compressed)
+                lastNCount = op;
             op += countSize;
     }   }
     /* build CTable for MatchLengths */
     {   U32 max = MaxML;
-        size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
+        size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);   /* can't fail */
         DEBUGLOG(5, "Building ML table");
-        nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
-        MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog, ZSTD_defaultAllowed);
+        nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
+        MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode, count, max, mostFrequent, nbSeq, MLFSELog, prevEntropy->fse.matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_defaultAllowed, strategy);
+        assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
-                    count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
-                    prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
-                    workspace, HUF_WORKSPACE_SIZE);
+                                                    count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
+                                                    prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
+                                                    workspace, HUF_WORKSPACE_SIZE);
             if (ZSTD_isError(countSize)) return countSize;
+            if (MLtype == set_compressed)
+                lastNCount = op;
             op += countSize;
     }   }
 
@@ -1814,21 +2311,37 @@
                                         longOffsets, bmi2);
         if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
         op += bitstreamSize;
+        /* zstd versions <= 1.3.4 mistakenly report corruption when
+         * FSE_readNCount() recieves a buffer < 4 bytes.
+         * Fixed by https://github.com/facebook/zstd/pull/1146.
+         * This can happen when the last set_compressed table present is 2
+         * bytes and the bitstream is only one byte.
+         * In this exceedingly rare case, we will simply emit an uncompressed
+         * block, since it isn't worth optimizing.
+         */
+        if (lastNCount && (op - lastNCount) < 4) {
+            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+            assert(op - lastNCount == 3);
+            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
+                        "emitting an uncompressed block.");
+            return 0;
+        }
     }
 
     return op - ostart;
 }
 
 MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
-                              ZSTD_entropyCTables_t const* prevEntropy,
+                        const ZSTD_entropyCTables_t* prevEntropy,
                               ZSTD_entropyCTables_t* nextEntropy,
-                              ZSTD_CCtx_params const* cctxParams,
+                        const ZSTD_CCtx_params* cctxParams,
                               void* dst, size_t dstCapacity,
                               size_t srcSize, U32* workspace, int bmi2)
 {
     size_t const cSize = ZSTD_compressSequences_internal(
             seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity,
             workspace, bmi2);
+    if (cSize == 0) return 0;
     /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
      * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
      */
@@ -1837,40 +2350,55 @@
     if (ZSTD_isError(cSize)) return cSize;
 
     /* Check compressibility */
-    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize);  /* note : fixed formula, maybe should depend on compression level, or strategy */
+    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
         if (cSize >= maxCSize) return 0;  /* block not compressed */
     }
 
-    /* We check that dictionaries have offset codes available for the first
-     * block. After the first block, the offcode table might not have large
-     * enough codes to represent the offsets in the data.
-     */
-    if (nextEntropy->offcode_repeatMode == FSE_repeat_valid)
-        nextEntropy->offcode_repeatMode = FSE_repeat_check;
-
     return cSize;
 }
 
 /* ZSTD_selectBlockCompressor() :
  * Not static, but internal use only (used by long distance matcher)
  * assumption : strat is a valid strategy */
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
 {
-    static const ZSTD_blockCompressor blockCompressor[2][(unsigned)ZSTD_btultra+1] = {
+    static const ZSTD_blockCompressor blockCompressor[3][(unsigned)ZSTD_btultra+1] = {
         { ZSTD_compressBlock_fast  /* default for 0 */,
-          ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy,
-          ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2,
-          ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra },
+          ZSTD_compressBlock_fast,
+          ZSTD_compressBlock_doubleFast,
+          ZSTD_compressBlock_greedy,
+          ZSTD_compressBlock_lazy,
+          ZSTD_compressBlock_lazy2,
+          ZSTD_compressBlock_btlazy2,
+          ZSTD_compressBlock_btopt,
+          ZSTD_compressBlock_btultra },
         { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
-          ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict,
-          ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict,
-          ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict }
+          ZSTD_compressBlock_fast_extDict,
+          ZSTD_compressBlock_doubleFast_extDict,
+          ZSTD_compressBlock_greedy_extDict,
+          ZSTD_compressBlock_lazy_extDict,
+          ZSTD_compressBlock_lazy2_extDict,
+          ZSTD_compressBlock_btlazy2_extDict,
+          ZSTD_compressBlock_btopt_extDict,
+          ZSTD_compressBlock_btultra_extDict },
+        { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,
+          ZSTD_compressBlock_fast_dictMatchState,
+          ZSTD_compressBlock_doubleFast_dictMatchState,
+          ZSTD_compressBlock_greedy_dictMatchState,
+          ZSTD_compressBlock_lazy_dictMatchState,
+          ZSTD_compressBlock_lazy2_dictMatchState,
+          ZSTD_compressBlock_btlazy2_dictMatchState,
+          ZSTD_compressBlock_btopt_dictMatchState,
+          ZSTD_compressBlock_btultra_dictMatchState }
     };
+    ZSTD_blockCompressor selectedCompressor;
     ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
 
     assert((U32)strat >= (U32)ZSTD_fast);
     assert((U32)strat <= (U32)ZSTD_btultra);
-    return blockCompressor[extDict!=0][(U32)strat];
+    selectedCompressor = blockCompressor[(int)dictMode][(U32)strat];
+    assert(selectedCompressor != NULL);
+    return selectedCompressor;
 }
 
 static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
@@ -1880,7 +2408,7 @@
     seqStorePtr->lit += lastLLSize;
 }
 
-static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+void ZSTD_resetSeqStore(seqStore_t* ssPtr)
 {
     ssPtr->lit = ssPtr->litStart;
     ssPtr->sequences = ssPtr->sequencesStart;
@@ -1892,24 +2420,38 @@
                                         const void* src, size_t srcSize)
 {
     ZSTD_matchState_t* const ms = &zc->blockState.matchState;
-    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
-                (U32)dstCapacity, ms->window.dictLimit, ms->nextToUpdate);
+    size_t cSize;
+    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%zu, dictLimit=%u, nextToUpdate=%u)",
+                dstCapacity, ms->window.dictLimit, ms->nextToUpdate);
+    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+
+    /* Assert that we have correctly flushed the ctx params into the ms's copy */
+    ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
+
     if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
         ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.searchLength);
-        return 0;   /* don't even attempt compression below a certain srcSize */
+        cSize = 0;
+        goto out;  /* don't even attempt compression below a certain srcSize */
     }
     ZSTD_resetSeqStore(&(zc->seqStore));
+    ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;   /* required for optimal parser to read stats from dictionary */
+
+    /* a gap between an attached dict and the current window is not safe,
+     * they must remain adjacent, and when that stops being the case, the dict
+     * must be unset */
+    assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
 
     /* limited update after a very long match */
     {   const BYTE* const base = ms->window.base;
         const BYTE* const istart = (const BYTE*)src;
         const U32 current = (U32)(istart-base);
+        if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */
         if (current > ms->nextToUpdate + 384)
             ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));
     }
 
     /* select and store sequences */
-    {   U32 const extDict = ZSTD_window_hasExtDict(ms->window);
+    {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
         size_t lastLLSize;
         {   int i;
             for (i = 0; i < ZSTD_REP_NUM; ++i)
@@ -1922,8 +2464,7 @@
                 ZSTD_ldm_blockCompress(&zc->externSeqStore,
                                        ms, &zc->seqStore,
                                        zc->blockState.nextCBlock->rep,
-                                       &zc->appliedParams.cParams,
-                                       src, srcSize, extDict);
+                                       src, srcSize);
             assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
         } else if (zc->appliedParams.ldmParams.enableLdm) {
             rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
@@ -1939,31 +2480,38 @@
                 ZSTD_ldm_blockCompress(&ldmSeqStore,
                                        ms, &zc->seqStore,
                                        zc->blockState.nextCBlock->rep,
-                                       &zc->appliedParams.cParams,
-                                       src, srcSize, extDict);
+                                       src, srcSize);
             assert(ldmSeqStore.pos == ldmSeqStore.size);
         } else {   /* not long range mode */
-            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, extDict);
-            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, &zc->appliedParams.cParams, src, srcSize);
+            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
+            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
         }
         {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
             ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
     }   }
 
     /* encode sequences and literals */
-    {   size_t const cSize = ZSTD_compressSequences(&zc->seqStore,
-                                &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
-                                &zc->appliedParams,
-                                dst, dstCapacity,
-                                srcSize, zc->entropyWorkspace, zc->bmi2);
-        if (ZSTD_isError(cSize) || cSize == 0) return cSize;
-        /* confirm repcodes and entropy tables */
-        {   ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
-            zc->blockState.prevCBlock = zc->blockState.nextCBlock;
-            zc->blockState.nextCBlock = tmp;
-        }
-        return cSize;
+    cSize = ZSTD_compressSequences(&zc->seqStore,
+            &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+            &zc->appliedParams,
+            dst, dstCapacity,
+            srcSize, zc->entropyWorkspace, zc->bmi2);
+
+out:
+    if (!ZSTD_isError(cSize) && cSize != 0) {
+        /* confirm repcodes and entropy tables when emitting a compressed block */
+        ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
+        zc->blockState.prevCBlock = zc->blockState.nextCBlock;
+        zc->blockState.nextCBlock = tmp;
     }
+    /* We check that dictionaries have offset codes available for the first
+     * block. After the first block, the offcode table might not have large
+     * enough codes to represent the offsets in the data.
+     */
+    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+    return cSize;
 }
 
 
@@ -2005,13 +2553,13 @@
             ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
-
             ZSTD_reduceIndex(cctx, correction);
             if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
             else ms->nextToUpdate -= correction;
             ms->loadedDictEnd = 0;
+            ms->dictMatchState = NULL;
         }
-        ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd);
+        ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
         if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
 
         {   size_t cSize = ZSTD_compressBlock_internal(cctx,
@@ -2020,11 +2568,8 @@
             if (ZSTD_isError(cSize)) return cSize;
 
             if (cSize == 0) {  /* block is not compressible */
-                U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3);
-                if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
-                MEM_writeLE32(op, cBlockHeader24);   /* 4th byte will be overwritten */
-                memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
-                cSize = ZSTD_blockHeaderSize + blockSize;
+                cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+                if (ZSTD_isError(cSize)) return cSize;
             } else {
                 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
                 MEM_writeLE24(op, cBlockHeader24);
@@ -2060,6 +2605,7 @@
     BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
     size_t pos=0;
 
+    assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
     if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
     DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
                 !params.fParams.noDictIDFlag, dictID,  dictIDSizeCode);
@@ -2122,7 +2668,7 @@
                         const void* src, size_t srcSize,
                                U32 frame, U32 lastFrameChunk)
 {
-    ZSTD_matchState_t* ms = &cctx->blockState.matchState;
+    ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
     size_t fhSize = 0;
 
     DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
@@ -2143,8 +2689,25 @@
     if (!ZSTD_window_update(&ms->window, src, srcSize)) {
         ms->nextToUpdate = ms->window.dictLimit;
     }
-    if (cctx->appliedParams.ldmParams.enableLdm)
+    if (cctx->appliedParams.ldmParams.enableLdm) {
         ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
+    }
+
+    if (!frame) {
+        /* overflow check and correction for block mode */
+        if (ZSTD_window_needOverflowCorrection(ms->window, (const char*)src + srcSize)) {
+            U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
+            U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, 1 << cctx->appliedParams.cParams.windowLog, src);
+            ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
+            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
+            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+            ZSTD_reduceIndex(cctx, correction);
+            if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
+            else ms->nextToUpdate -= correction;
+            ms->loadedDictEnd = 0;
+            ms->dictMatchState = NULL;
+        }
+    }
 
     DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (U32)cctx->blockSize);
     {   size_t const cSize = frame ?
@@ -2153,7 +2716,9 @@
         if (ZSTD_isError(cSize)) return cSize;
         cctx->consumedSrcSize += srcSize;
         cctx->producedCSize += (cSize + fhSize);
-        if (cctx->appliedParams.fParams.contentSizeFlag) {  /* control src size */
+        assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+        if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
+            ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
             if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
                 DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
                     (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
@@ -2184,44 +2749,50 @@
 {
     size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
     if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
+
     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
 }
 
 /*! ZSTD_loadDictionaryContent() :
  *  @return : 0, or an error code
  */
-static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* src, size_t srcSize)
+static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+                                         ZSTD_CCtx_params const* params,
+                                         const void* src, size_t srcSize,
+                                         ZSTD_dictTableLoadMethod_e dtlm)
 {
     const BYTE* const ip = (const BYTE*) src;
     const BYTE* const iend = ip + srcSize;
-    ZSTD_compressionParameters const* cParams = &params->cParams;
 
     ZSTD_window_update(&ms->window, src, srcSize);
     ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
 
+    /* Assert that we the ms params match the params we're being given */
+    ZSTD_assertEqualCParams(params->cParams, ms->cParams);
+
     if (srcSize <= HASH_READ_SIZE) return 0;
 
     switch(params->cParams.strategy)
     {
     case ZSTD_fast:
-        ZSTD_fillHashTable(ms, cParams, iend);
+        ZSTD_fillHashTable(ms, iend, dtlm);
         break;
     case ZSTD_dfast:
-        ZSTD_fillDoubleHashTable(ms, cParams, iend);
+        ZSTD_fillDoubleHashTable(ms, iend, dtlm);
         break;
 
     case ZSTD_greedy:
     case ZSTD_lazy:
     case ZSTD_lazy2:
         if (srcSize >= HASH_READ_SIZE)
-            ZSTD_insertAndFindFirstIndex(ms, cParams, iend-HASH_READ_SIZE);
+            ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
         break;
 
     case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
     case ZSTD_btopt:
     case ZSTD_btultra:
         if (srcSize >= HASH_READ_SIZE)
-            ZSTD_updateTree(ms, cParams, iend-HASH_READ_SIZE, iend);
+            ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
         break;
 
     default:
@@ -2256,7 +2827,12 @@
  *  assumptions : magic number supposed already checked
  *                dictSize supposed > 8
  */
-static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, void* workspace)
+static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
+                                      ZSTD_matchState_t* ms,
+                                      ZSTD_CCtx_params const* params,
+                                      const void* dict, size_t dictSize,
+                                      ZSTD_dictTableLoadMethod_e dtlm,
+                                      void* workspace)
 {
     const BYTE* dictPtr = (const BYTE*)dict;
     const BYTE* const dictEnd = dictPtr + dictSize;
@@ -2265,13 +2841,15 @@
     size_t dictID;
 
     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+    assert(dictSize > 8);
+    assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
 
     dictPtr += 4;   /* skip magic number */
     dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr);
     dictPtr += 4;
 
     {   unsigned maxSymbolValue = 255;
-        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.hufCTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
+        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
         if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
         if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
         dictPtr += hufHeaderSize;
@@ -2282,7 +2860,8 @@
         if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
         if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
         /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
-        CHECK_E( FSE_buildCTable_wksp(bs->entropy.offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, workspace, HUF_WORKSPACE_SIZE),
+        /* fill all offset symbols to avoid garbage at end of table */
+        CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable, offcodeNCount, MaxOff, offcodeLog, workspace, HUF_WORKSPACE_SIZE),
                  dictionary_corrupted);
         dictPtr += offcodeHeaderSize;
     }
@@ -2294,7 +2873,7 @@
         if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
         /* Every match length code must have non-zero probability */
         CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
-        CHECK_E( FSE_buildCTable_wksp(bs->entropy.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE),
+        CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE),
                  dictionary_corrupted);
         dictPtr += matchlengthHeaderSize;
     }
@@ -2306,7 +2885,7 @@
         if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
         /* Every literal length code must have non-zero probability */
         CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
-        CHECK_E( FSE_buildCTable_wksp(bs->entropy.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE),
+        CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE),
                  dictionary_corrupted);
         dictPtr += litlengthHeaderSize;
     }
@@ -2332,22 +2911,25 @@
                 if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
         }   }
 
-        bs->entropy.hufCTable_repeatMode = HUF_repeat_valid;
-        bs->entropy.offcode_repeatMode = FSE_repeat_valid;
-        bs->entropy.matchlength_repeatMode = FSE_repeat_valid;
-        bs->entropy.litlength_repeatMode = FSE_repeat_valid;
-        CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize));
+        bs->entropy.huf.repeatMode = HUF_repeat_valid;
+        bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
+        bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
+        bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
+        CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
         return dictID;
     }
 }
 
 /** ZSTD_compress_insertDictionary() :
 *   @return : dictID, or an error code */
-static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms,
-                                             ZSTD_CCtx_params const* params,
-                                       const void* dict, size_t dictSize,
-                                             ZSTD_dictContentType_e dictContentType,
-                                             void* workspace)
+static size_t
+ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
+                               ZSTD_matchState_t* ms,
+                         const ZSTD_CCtx_params* params,
+                         const void* dict, size_t dictSize,
+                               ZSTD_dictContentType_e dictContentType,
+                               ZSTD_dictTableLoadMethod_e dtlm,
+                               void* workspace)
 {
     DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
     if ((dict==NULL) || (dictSize<=8)) return 0;
@@ -2356,12 +2938,12 @@
 
     /* dict restricted modes */
     if (dictContentType == ZSTD_dct_rawContent)
-        return ZSTD_loadDictionaryContent(ms, params, dict, dictSize);
+        return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
 
     if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
         if (dictContentType == ZSTD_dct_auto) {
             DEBUGLOG(4, "raw content dictionary detected");
-            return ZSTD_loadDictionaryContent(ms, params, dict, dictSize);
+            return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
         }
         if (dictContentType == ZSTD_dct_fullDict)
             return ERROR(dictionary_wrong);
@@ -2369,17 +2951,18 @@
     }
 
     /* dict as full zstd dictionary */
-    return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, workspace);
+    return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace);
 }
 
 /*! ZSTD_compressBegin_internal() :
  * @return : 0, or an error code */
-size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
-                             const void* dict, size_t dictSize,
-                             ZSTD_dictContentType_e dictContentType,
-                             const ZSTD_CDict* cdict,
-                             ZSTD_CCtx_params params, U64 pledgedSrcSize,
-                             ZSTD_buffered_policy_e zbuff)
+static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
+                                    const void* dict, size_t dictSize,
+                                    ZSTD_dictContentType_e dictContentType,
+                                    ZSTD_dictTableLoadMethod_e dtlm,
+                                    const ZSTD_CDict* cdict,
+                                    ZSTD_CCtx_params params, U64 pledgedSrcSize,
+                                    ZSTD_buffered_policy_e zbuff)
 {
     DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
     /* params are supposed to be fully validated at this point */
@@ -2387,9 +2970,7 @@
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
 
     if (cdict && cdict->dictContentSize>0) {
-        cctx->requestedParams = params;
-        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params.cParams.windowLog,
-                                         params.fParams, pledgedSrcSize, zbuff);
+        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
     }
 
     CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
@@ -2397,7 +2978,7 @@
     {
         size_t const dictID = ZSTD_compress_insertDictionary(
                 cctx->blockState.prevCBlock, &cctx->blockState.matchState,
-                &params, dict, dictSize, dictContentType, cctx->entropyWorkspace);
+                &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
         if (ZSTD_isError(dictID)) return dictID;
         assert(dictID <= (size_t)(U32)-1);
         cctx->dictID = (U32)dictID;
@@ -2408,6 +2989,7 @@
 size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
                                     const void* dict, size_t dictSize,
                                     ZSTD_dictContentType_e dictContentType,
+                                    ZSTD_dictTableLoadMethod_e dtlm,
                                     const ZSTD_CDict* cdict,
                                     ZSTD_CCtx_params params,
                                     unsigned long long pledgedSrcSize)
@@ -2416,7 +2998,7 @@
     /* compression parameters verification and optimization */
     CHECK_F( ZSTD_checkCParams(params.cParams) );
     return ZSTD_compressBegin_internal(cctx,
-                                       dict, dictSize, dictContentType,
+                                       dict, dictSize, dictContentType, dtlm,
                                        cdict,
                                        params, pledgedSrcSize,
                                        ZSTDb_not_buffered);
@@ -2431,7 +3013,7 @@
     ZSTD_CCtx_params const cctxParams =
             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
     return ZSTD_compressBegin_advanced_internal(cctx,
-                                            dict, dictSize, ZSTD_dct_auto,
+                                            dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
                                             NULL /*cdict*/,
                                             cctxParams, pledgedSrcSize);
 }
@@ -2442,7 +3024,7 @@
     ZSTD_CCtx_params const cctxParams =
             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
     DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (U32)dictSize);
-    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, NULL,
+    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
                                        cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
 }
 
@@ -2505,7 +3087,9 @@
     if (ZSTD_isError(cSize)) return cSize;
     endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
     if (ZSTD_isError(endResult)) return endResult;
-    if (cctx->appliedParams.fParams.contentSizeFlag) {  /* control src size */
+    assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+    if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
+        ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
         DEBUGLOG(4, "end of frame : controlling src size");
         if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
             DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
@@ -2517,22 +3101,22 @@
 
 
 static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
-                               void* dst, size_t dstCapacity,
-                         const void* src, size_t srcSize,
-                         const void* dict,size_t dictSize,
-                               ZSTD_parameters params)
+                                      void* dst, size_t dstCapacity,
+                                const void* src, size_t srcSize,
+                                const void* dict,size_t dictSize,
+                                      ZSTD_parameters params)
 {
     ZSTD_CCtx_params const cctxParams =
             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
     DEBUGLOG(4, "ZSTD_compress_internal");
     return ZSTD_compress_advanced_internal(cctx,
-                                          dst, dstCapacity,
-                                          src, srcSize,
-                                          dict, dictSize,
-                                          cctxParams);
+                                           dst, dstCapacity,
+                                           src, srcSize,
+                                           dict, dictSize,
+                                           cctxParams);
 }
 
-size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
+size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
                                void* dst, size_t dstCapacity,
                          const void* src, size_t srcSize,
                          const void* dict,size_t dictSize,
@@ -2540,7 +3124,11 @@
 {
     DEBUGLOG(4, "ZSTD_compress_advanced");
     CHECK_F(ZSTD_checkCParams(params.cParams));
-    return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
+    return ZSTD_compress_internal(cctx,
+                                  dst, dstCapacity,
+                                  src, srcSize,
+                                  dict, dictSize,
+                                  params);
 }
 
 /* Internal */
@@ -2551,37 +3139,44 @@
         const void* dict,size_t dictSize,
         ZSTD_CCtx_params params)
 {
-    DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)",
-                (U32)srcSize);
-    CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, NULL,
-                                         params, srcSize, ZSTDb_not_buffered) );
+    DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (U32)srcSize);
+    CHECK_F( ZSTD_compressBegin_internal(cctx,
+                         dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
+                         params, srcSize, ZSTDb_not_buffered) );
     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
 }
 
-size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize,
-                               const void* dict, size_t dictSize, int compressionLevel)
+size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
+                               void* dst, size_t dstCapacity,
+                         const void* src, size_t srcSize,
+                         const void* dict, size_t dictSize,
+                               int compressionLevel)
 {
-    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize ? srcSize : 1, dict ? dictSize : 0);
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0);
     ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
     assert(params.fParams.contentSizeFlag == 1);
-    ZSTD_CCtxParam_setParameter(&cctxParams, ZSTD_p_compressLiterals, compressionLevel>=0);
     return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
 }
 
-size_t ZSTD_compressCCtx (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
+size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+                         void* dst, size_t dstCapacity,
+                   const void* src, size_t srcSize,
+                         int compressionLevel)
 {
     DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (U32)srcSize);
+    assert(cctx != NULL);
     return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
 }
 
-size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
+size_t ZSTD_compress(void* dst, size_t dstCapacity,
+               const void* src, size_t srcSize,
+                     int compressionLevel)
 {
     size_t result;
     ZSTD_CCtx ctxBody;
-    memset(&ctxBody, 0, sizeof(ctxBody));
-    ctxBody.customMem = ZSTD_defaultCMem;
+    ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
     result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
-    ZSTD_free(ctxBody.workSpace, ZSTD_defaultCMem);  /* can't free ctxBody itself, as it's on stack; free only heap content */
+    ZSTD_freeCCtxContent(&ctxBody);   /* can't free ctxBody itself, as it's on stack; free only heap content */
     return result;
 }
 
@@ -2619,9 +3214,9 @@
                     ZSTD_dictContentType_e dictContentType,
                     ZSTD_compressionParameters cParams)
 {
-    DEBUGLOG(3, "ZSTD_initCDict_internal, dictContentType %u", (U32)dictContentType);
+    DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (U32)dictContentType);
     assert(!ZSTD_checkCParams(cParams));
-    cdict->cParams = cParams;
+    cdict->matchState.cParams = cParams;
     if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
         cdict->dictBuffer = NULL;
         cdict->dictContent = dictBuffer;
@@ -2654,7 +3249,7 @@
         {   size_t const dictID = ZSTD_compress_insertDictionary(
                     &cdict->cBlockState, &cdict->matchState, &params,
                     cdict->dictContent, cdict->dictContentSize,
-                    dictContentType, cdict->workspace);
+                    dictContentType, ZSTD_dtlm_full, cdict->workspace);
             if (ZSTD_isError(dictID)) return dictID;
             assert(dictID <= (size_t)(U32)-1);
             cdict->dictID = (U32)dictID;
@@ -2775,7 +3370,7 @@
 ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
 {
     assert(cdict != NULL);
-    return cdict->cParams;
+    return cdict->matchState.cParams;
 }
 
 /* ZSTD_compressBegin_usingCDict_advanced() :
@@ -2799,7 +3394,7 @@
         }
         params.fParams = fParams;
         return ZSTD_compressBegin_internal(cctx,
-                                           NULL, 0, ZSTD_dct_auto,
+                                           NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
                                            cdict,
                                            params, pledgedSrcSize,
                                            ZSTDb_not_buffered);
@@ -2813,7 +3408,7 @@
 {
     ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
-    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, 0);
+    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
 }
 
 size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
@@ -2880,16 +3475,17 @@
 static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
                     const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
                     const ZSTD_CDict* const cdict,
-                    ZSTD_CCtx_params const params, unsigned long long const pledgedSrcSize)
+                    ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize)
 {
-    DEBUGLOG(4, "ZSTD_resetCStream_internal (disableLiteralCompression=%i)",
-                params.disableLiteralCompression);
+    DEBUGLOG(4, "ZSTD_resetCStream_internal");
+    /* Finalize the compression parameters */
+    params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
     /* params are supposed to be fully validated at this point */
     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
 
     CHECK_F( ZSTD_compressBegin_internal(cctx,
-                                         dict, dictSize, dictContentType,
+                                         dict, dictSize, dictContentType, ZSTD_dtlm_fast,
                                          cdict,
                                          params, pledgedSrcSize,
                                          ZSTDb_buffered) );
@@ -2912,7 +3508,6 @@
     DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (U32)pledgedSrcSize);
     if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
     params.fParams.contentSizeFlag = 1;
-    params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, 0);
     return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
 }
 
@@ -2925,6 +3520,7 @@
                     ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
 {
     DEBUGLOG(4, "ZSTD_initCStream_internal");
+    params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
 
@@ -2991,25 +3587,21 @@
                 (U32)pledgedSrcSize, params.fParams.contentSizeFlag);
     CHECK_F( ZSTD_checkCParams(params.cParams) );
     if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
-    {   ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
-        return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, cctxParams, pledgedSrcSize);
-    }
+    zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
+    return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, zcs->requestedParams, pledgedSrcSize);
 }
 
 size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
 {
-    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
-    ZSTD_CCtx_params const cctxParams =
-            ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
-    return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
+    ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
+    return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, zcs->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN);
 }
 
 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
 {
     U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;  /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */
-    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
-    ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
-    return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, cctxParams, pledgedSrcSize);
+    ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
+    return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, zcs->requestedParams, pledgedSrcSize);
 }
 
 size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
@@ -3073,7 +3665,7 @@
                 ip = iend;
                 op += cSize;
                 zcs->frameEnded = 1;
-                ZSTD_startNewCompression(zcs);
+                ZSTD_CCtx_reset(zcs);
                 someMoreWork = 0; break;
             }
             /* complete loading into inBuffer */
@@ -3126,7 +3718,7 @@
                     if (zcs->frameEnded) {
                         DEBUGLOG(5, "Frame completed directly in outBuffer");
                         someMoreWork = 0;
-                        ZSTD_startNewCompression(zcs);
+                        ZSTD_CCtx_reset(zcs);
                     }
                     break;
                 }
@@ -3154,7 +3746,7 @@
                 if (zcs->frameEnded) {
                     DEBUGLOG(5, "Frame completed on flush");
                     someMoreWork = 0;
-                    ZSTD_startNewCompression(zcs);
+                    ZSTD_CCtx_reset(zcs);
                     break;
                 }
                 zcs->streamStage = zcss_load;
@@ -3207,19 +3799,16 @@
         params.cParams = ZSTD_getCParamsFromCCtxParams(
                 &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
 
+
 #ifdef ZSTD_MULTITHREAD
         if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
             params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
         }
         if (params.nbWorkers > 0) {
             /* mt context creation */
-            if (cctx->mtctx == NULL || (params.nbWorkers != ZSTDMT_getNbWorkers(cctx->mtctx))) {
+            if (cctx->mtctx == NULL) {
                 DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbWorkers=%u",
                             params.nbWorkers);
-                if (cctx->mtctx != NULL)
-                    DEBUGLOG(4, "ZSTD_compress_generic: previous nbWorkers was %u",
-                                ZSTDMT_getNbWorkers(cctx->mtctx));
-                ZSTDMT_freeCCtx(cctx->mtctx);
                 cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
                 if (cctx->mtctx == NULL) return ERROR(memory_allocation);
             }
@@ -3251,8 +3840,9 @@
         {   size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
             if ( ZSTD_isError(flushMin)
               || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
-                ZSTD_startNewCompression(cctx);
+                ZSTD_CCtx_reset(cctx);
             }
+            DEBUGLOG(5, "completed ZSTD_compress_generic delegating to ZSTDMT_compressStream_generic");
             return flushMin;
     }   }
 #endif
@@ -3308,82 +3898,83 @@
 
 #define ZSTD_MAX_CLEVEL     22
 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
+int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
 
 static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
 {   /* "default" - guarantees a monotonically increasing memory budget */
     /* W,  C,  H,  S,  L, TL, strat */
     { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
-    { 19, 13, 14,  1,  7,  1, ZSTD_fast    },  /* level  1 */
-    { 19, 15, 16,  1,  6,  1, ZSTD_fast    },  /* level  2 */
-    { 20, 16, 17,  1,  5,  8, ZSTD_dfast   },  /* level  3 */
-    { 20, 17, 18,  1,  5,  8, ZSTD_dfast   },  /* level  4 */
-    { 20, 17, 18,  2,  5, 16, ZSTD_greedy  },  /* level  5 */
-    { 21, 17, 19,  2,  5, 16, ZSTD_lazy    },  /* level  6 */
-    { 21, 18, 19,  3,  5, 16, ZSTD_lazy    },  /* level  7 */
-    { 21, 18, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
-    { 21, 19, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  9 */
-    { 21, 19, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
-    { 22, 20, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
+    { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
+    { 19, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
+    { 20, 16, 17,  1,  5,  1, ZSTD_dfast   },  /* level  3 */
+    { 20, 18, 18,  1,  5,  1, ZSTD_dfast   },  /* level  4 */
+    { 20, 18, 18,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
+    { 21, 18, 19,  2,  5,  4, ZSTD_lazy    },  /* level  6 */
+    { 21, 18, 19,  3,  5,  8, ZSTD_lazy2   },  /* level  7 */
+    { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
+    { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */
+    { 21, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
+    { 21, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
     { 22, 20, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
     { 22, 21, 22,  4,  5, 32, ZSTD_btlazy2 },  /* level 13 */
     { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
     { 22, 22, 22,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
     { 22, 21, 22,  4,  5, 48, ZSTD_btopt   },  /* level 16 */
-    { 23, 22, 22,  4,  4, 48, ZSTD_btopt   },  /* level 17 */
-    { 23, 22, 22,  5,  3, 64, ZSTD_btopt   },  /* level 18 */
-    { 23, 23, 22,  7,  3,128, ZSTD_btopt   },  /* level 19 */
-    { 25, 25, 23,  7,  3,128, ZSTD_btultra },  /* level 20 */
-    { 26, 26, 24,  7,  3,256, ZSTD_btultra },  /* level 21 */
-    { 27, 27, 25,  9,  3,512, ZSTD_btultra },  /* level 22 */
+    { 23, 22, 22,  4,  4, 64, ZSTD_btopt   },  /* level 17 */
+    { 23, 23, 22,  6,  3,256, ZSTD_btopt   },  /* level 18 */
+    { 23, 24, 22,  7,  3,256, ZSTD_btultra },  /* level 19 */
+    { 25, 25, 23,  7,  3,256, ZSTD_btultra },  /* level 20 */
+    { 26, 26, 24,  7,  3,512, ZSTD_btultra },  /* level 21 */
+    { 27, 27, 25,  9,  3,999, ZSTD_btultra },  /* level 22 */
 },
 {   /* for srcSize <= 256 KB */
     /* W,  C,  H,  S,  L,  T, strat */
     { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
-    { 18, 13, 14,  1,  6,  1, ZSTD_fast    },  /* level  1 */
-    { 18, 14, 13,  1,  5,  8, ZSTD_dfast   },  /* level  2 */
-    { 18, 16, 15,  1,  5,  8, ZSTD_dfast   },  /* level  3 */
-    { 18, 15, 17,  1,  5,  8, ZSTD_greedy  },  /* level  4.*/
-    { 18, 16, 17,  4,  5,  8, ZSTD_greedy  },  /* level  5.*/
-    { 18, 16, 17,  3,  5,  8, ZSTD_lazy    },  /* level  6.*/
-    { 18, 17, 17,  4,  4,  8, ZSTD_lazy    },  /* level  7 */
-    { 18, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
-    { 18, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
-    { 18, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
-    { 18, 18, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 11.*/
-    { 18, 18, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 12.*/
-    { 18, 19, 17,  7,  4,  8, ZSTD_btlazy2 },  /* level 13 */
-    { 18, 18, 18,  4,  4, 16, ZSTD_btopt   },  /* level 14.*/
-    { 18, 18, 18,  4,  3, 16, ZSTD_btopt   },  /* level 15.*/
-    { 18, 19, 18,  6,  3, 32, ZSTD_btopt   },  /* level 16.*/
-    { 18, 19, 18,  8,  3, 64, ZSTD_btopt   },  /* level 17.*/
-    { 18, 19, 18,  9,  3,128, ZSTD_btopt   },  /* level 18.*/
-    { 18, 19, 18, 10,  3,256, ZSTD_btopt   },  /* level 19.*/
-    { 18, 19, 18, 11,  3,512, ZSTD_btultra },  /* level 20.*/
-    { 18, 19, 18, 12,  3,512, ZSTD_btultra },  /* level 21.*/
-    { 18, 19, 18, 13,  3,512, ZSTD_btultra },  /* level 22.*/
+    { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
+    { 18, 14, 14,  1,  5,  1, ZSTD_dfast   },  /* level  2 */
+    { 18, 16, 16,  1,  4,  1, ZSTD_dfast   },  /* level  3 */
+    { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
+    { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
+    { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
+    { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */
+    { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
+    { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
+    { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
+    { 18, 18, 19,  5,  4, 16, ZSTD_btlazy2 },  /* level 11.*/
+    { 18, 19, 19,  6,  4, 16, ZSTD_btlazy2 },  /* level 12.*/
+    { 18, 19, 19,  8,  4, 16, ZSTD_btlazy2 },  /* level 13 */
+    { 18, 18, 19,  4,  4, 24, ZSTD_btopt   },  /* level 14.*/
+    { 18, 18, 19,  4,  3, 24, ZSTD_btopt   },  /* level 15.*/
+    { 18, 19, 19,  6,  3, 64, ZSTD_btopt   },  /* level 16.*/
+    { 18, 19, 19,  8,  3,128, ZSTD_btopt   },  /* level 17.*/
+    { 18, 19, 19, 10,  3,256, ZSTD_btopt   },  /* level 18.*/
+    { 18, 19, 19, 10,  3,256, ZSTD_btultra },  /* level 19.*/
+    { 18, 19, 19, 11,  3,512, ZSTD_btultra },  /* level 20.*/
+    { 18, 19, 19, 12,  3,512, ZSTD_btultra },  /* level 21.*/
+    { 18, 19, 19, 13,  3,999, ZSTD_btultra },  /* level 22.*/
 },
 {   /* for srcSize <= 128 KB */
     /* W,  C,  H,  S,  L,  T, strat */
-    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* level  0 - not used */
-    { 17, 12, 13,  1,  6,  1, ZSTD_fast    },  /* level  1 */
-    { 17, 13, 16,  1,  5,  1, ZSTD_fast    },  /* level  2 */
-    { 17, 16, 16,  2,  5,  8, ZSTD_dfast   },  /* level  3 */
-    { 17, 13, 15,  3,  4,  8, ZSTD_greedy  },  /* level  4 */
-    { 17, 15, 17,  4,  4,  8, ZSTD_greedy  },  /* level  5 */
-    { 17, 16, 17,  3,  4,  8, ZSTD_lazy    },  /* level  6 */
-    { 17, 15, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  7 */
+    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
+    { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
+    { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
+    { 17, 15, 16,  2,  5,  1, ZSTD_dfast   },  /* level  3 */
+    { 17, 17, 17,  2,  4,  1, ZSTD_dfast   },  /* level  4 */
+    { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
+    { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
+    { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
     { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
     { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
     { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
     { 17, 17, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 11 */
-    { 17, 17, 17,  8,  4,  8, ZSTD_lazy2   },  /* level 12 */
-    { 17, 18, 17,  6,  4,  8, ZSTD_btlazy2 },  /* level 13.*/
-    { 17, 17, 17,  7,  3,  8, ZSTD_btopt   },  /* level 14.*/
-    { 17, 17, 17,  7,  3, 16, ZSTD_btopt   },  /* level 15.*/
-    { 17, 18, 17,  7,  3, 32, ZSTD_btopt   },  /* level 16.*/
-    { 17, 18, 17,  7,  3, 64, ZSTD_btopt   },  /* level 17.*/
-    { 17, 18, 17,  7,  3,256, ZSTD_btopt   },  /* level 18.*/
-    { 17, 18, 17,  8,  3,256, ZSTD_btopt   },  /* level 19.*/
+    { 17, 18, 17,  6,  4, 16, ZSTD_btlazy2 },  /* level 12 */
+    { 17, 18, 17,  8,  4, 16, ZSTD_btlazy2 },  /* level 13.*/
+    { 17, 18, 17,  4,  4, 32, ZSTD_btopt   },  /* level 14.*/
+    { 17, 18, 17,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
+    { 17, 18, 17,  7,  3,128, ZSTD_btopt   },  /* level 16.*/
+    { 17, 18, 17,  7,  3,256, ZSTD_btopt   },  /* level 17.*/
+    { 17, 18, 17,  8,  3,256, ZSTD_btopt   },  /* level 18.*/
+    { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 19.*/
     { 17, 18, 17,  9,  3,256, ZSTD_btultra },  /* level 20.*/
     { 17, 18, 17, 10,  3,256, ZSTD_btultra },  /* level 21.*/
     { 17, 18, 17, 11,  3,512, ZSTD_btultra },  /* level 22.*/
@@ -3391,28 +3982,28 @@
 {   /* for srcSize <= 16 KB */
     /* W,  C,  H,  S,  L,  T, strat */
     { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
-    { 14, 14, 14,  1,  6,  1, ZSTD_fast    },  /* level  1 */
-    { 14, 14, 14,  1,  4,  1, ZSTD_fast    },  /* level  2 */
-    { 14, 14, 14,  1,  4,  6, ZSTD_dfast   },  /* level  3.*/
-    { 14, 14, 14,  4,  4,  6, ZSTD_greedy  },  /* level  4.*/
-    { 14, 14, 14,  3,  4,  6, ZSTD_lazy    },  /* level  5.*/
-    { 14, 14, 14,  4,  4,  6, ZSTD_lazy2   },  /* level  6 */
-    { 14, 14, 14,  5,  4,  6, ZSTD_lazy2   },  /* level  7 */
-    { 14, 14, 14,  6,  4,  6, ZSTD_lazy2   },  /* level  8.*/
-    { 14, 15, 14,  6,  4,  6, ZSTD_btlazy2 },  /* level  9.*/
-    { 14, 15, 14,  3,  3,  6, ZSTD_btopt   },  /* level 10.*/
-    { 14, 15, 14,  6,  3,  8, ZSTD_btopt   },  /* level 11.*/
+    { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
+    { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
+    { 14, 14, 14,  2,  4,  1, ZSTD_dfast   },  /* level  3.*/
+    { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4.*/
+    { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
+    { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
+    { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */
+    { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/
+    { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/
+    { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/
+    { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/
     { 14, 15, 14,  6,  3, 16, ZSTD_btopt   },  /* level 12.*/
     { 14, 15, 14,  6,  3, 24, ZSTD_btopt   },  /* level 13.*/
     { 14, 15, 15,  6,  3, 48, ZSTD_btopt   },  /* level 14.*/
     { 14, 15, 15,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
     { 14, 15, 15,  6,  3, 96, ZSTD_btopt   },  /* level 16.*/
     { 14, 15, 15,  6,  3,128, ZSTD_btopt   },  /* level 17.*/
-    { 14, 15, 15,  6,  3,256, ZSTD_btopt   },  /* level 18.*/
-    { 14, 15, 15,  7,  3,256, ZSTD_btopt   },  /* level 19.*/
+    { 14, 15, 15,  8,  3,256, ZSTD_btopt   },  /* level 18.*/
+    { 14, 15, 15,  6,  3,256, ZSTD_btultra },  /* level 19.*/
     { 14, 15, 15,  8,  3,256, ZSTD_btultra },  /* level 20.*/
     { 14, 15, 15,  9,  3,256, ZSTD_btultra },  /* level 21.*/
-    { 14, 15, 15, 10,  3,256, ZSTD_btultra },  /* level 22.*/
+    { 14, 15, 15, 10,  3,512, ZSTD_btultra },  /* level 22.*/
 },
 };
 
--- a/contrib/python-zstandard/zstd/compress/zstd_compress_internal.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress_internal.h	Mon Oct 22 14:46:06 2018 -0400
@@ -27,6 +27,7 @@
 extern "C" {
 #endif
 
+
 /*-*************************************
 *  Constants
 ***************************************/
@@ -37,7 +38,8 @@
                                        It's not a big deal though : candidate will just be sorted again.
                                        Additionnally, candidate position 1 will be lost.
                                        But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
-                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy */
+                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy
+                                       Constant required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
 
 
 /*-*************************************
@@ -46,6 +48,12 @@
 typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
 typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
 
+typedef enum {
+    ZSTD_dictDefaultAttach = 0,
+    ZSTD_dictForceAttach = 1,
+    ZSTD_dictForceCopy = -1,
+} ZSTD_dictAttachPref_e;
+
 typedef struct ZSTD_prefixDict_s {
     const void* dict;
     size_t dictSize;
@@ -53,14 +61,22 @@
 } ZSTD_prefixDict;
 
 typedef struct {
-    U32 hufCTable[HUF_CTABLE_SIZE_U32(255)];
+    U32 CTable[HUF_CTABLE_SIZE_U32(255)];
+    HUF_repeat repeatMode;
+} ZSTD_hufCTables_t;
+
+typedef struct {
     FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
     FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
     FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
-    HUF_repeat hufCTable_repeatMode;
     FSE_repeat offcode_repeatMode;
     FSE_repeat matchlength_repeatMode;
     FSE_repeat litlength_repeatMode;
+} ZSTD_fseCTables_t;
+
+typedef struct {
+    ZSTD_hufCTables_t huf;
+    ZSTD_fseCTables_t fse;
 } ZSTD_entropyCTables_t;
 
 typedef struct {
@@ -76,26 +92,27 @@
     U32 rep[ZSTD_REP_NUM];
 } ZSTD_optimal_t;
 
+typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
+
 typedef struct {
     /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
-    U32* litFreq;               /* table of literals statistics, of size 256 */
-    U32* litLengthFreq;         /* table of litLength statistics, of size (MaxLL+1) */
-    U32* matchLengthFreq;       /* table of matchLength statistics, of size (MaxML+1) */
-    U32* offCodeFreq;           /* table of offCode statistics, of size (MaxOff+1) */
-    ZSTD_match_t* matchTable;   /* list of found matches, of size ZSTD_OPT_NUM+1 */
-    ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
+    U32* litFreq;                /* table of literals statistics, of size 256 */
+    U32* litLengthFreq;          /* table of litLength statistics, of size (MaxLL+1) */
+    U32* matchLengthFreq;        /* table of matchLength statistics, of size (MaxML+1) */
+    U32* offCodeFreq;            /* table of offCode statistics, of size (MaxOff+1) */
+    ZSTD_match_t* matchTable;    /* list of found matches, of size ZSTD_OPT_NUM+1 */
+    ZSTD_optimal_t* priceTable;  /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
 
     U32  litSum;                 /* nb of literals */
     U32  litLengthSum;           /* nb of litLength codes */
     U32  matchLengthSum;         /* nb of matchLength codes */
     U32  offCodeSum;             /* nb of offset codes */
-    /* begin updated by ZSTD_setLog2Prices */
-    U32  log2litSum;             /* pow2 to compare log2(litfreq) to */
-    U32  log2litLengthSum;       /* pow2 to compare log2(llfreq) to */
-    U32  log2matchLengthSum;     /* pow2 to compare log2(mlfreq) to */
-    U32  log2offCodeSum;         /* pow2 to compare log2(offreq) to */
-    /* end : updated by ZSTD_setLog2Prices */
-    U32  staticPrices;           /* prices follow a pre-defined cost structure, statistics are irrelevant */
+    U32  litSumBasePrice;        /* to compare to log2(litfreq) */
+    U32  litLengthSumBasePrice;  /* to compare to log2(llfreq)  */
+    U32  matchLengthSumBasePrice;/* to compare to log2(mlfreq)  */
+    U32  offCodeSumBasePrice;    /* to compare to log2(offreq)  */
+    ZSTD_OptPrice_e priceType;   /* prices can be determined dynamically, or follow a pre-defined cost structure */
+    const ZSTD_entropyCTables_t* symbolCosts;  /* pre-calculated dictionary statistics */
 } optState_t;
 
 typedef struct {
@@ -111,17 +128,20 @@
     U32 lowLimit;           /* below that point, no more data */
 } ZSTD_window_t;
 
-typedef struct {
-    ZSTD_window_t window;      /* State for window round buffer management */
-    U32 loadedDictEnd;         /* index of end of dictionary */
-    U32 nextToUpdate;          /* index from which to continue table update */
-    U32 nextToUpdate3;         /* index from which to continue table update */
-    U32 hashLog3;              /* dispatch table : larger == faster, more memory */
+typedef struct ZSTD_matchState_t ZSTD_matchState_t;
+struct ZSTD_matchState_t {
+    ZSTD_window_t window;   /* State for window round buffer management */
+    U32 loadedDictEnd;      /* index of end of dictionary */
+    U32 nextToUpdate;       /* index from which to continue table update */
+    U32 nextToUpdate3;      /* index from which to continue table update */
+    U32 hashLog3;           /* dispatch table : larger == faster, more memory */
     U32* hashTable;
     U32* hashTable3;
     U32* chainTable;
     optState_t opt;         /* optimal parser state */
-} ZSTD_matchState_t;
+    const ZSTD_matchState_t *dictMatchState;
+    ZSTD_compressionParameters cParams;
+};
 
 typedef struct {
     ZSTD_compressedBlockState_t* prevCBlock;
@@ -161,7 +181,7 @@
   rawSeq* seq;     /* The start of the sequences */
   size_t pos;      /* The position where reading stopped. <= size. */
   size_t size;     /* The number of sequences. <= capacity. */
-  size_t capacity; /* The capacity of the `seq` pointer */
+  size_t capacity; /* The capacity starting from `seq` pointer */
 } rawSeqStore_t;
 
 struct ZSTD_CCtx_params_s {
@@ -170,10 +190,11 @@
     ZSTD_frameParameters fParams;
 
     int compressionLevel;
-    int disableLiteralCompression;
     int forceWindow;           /* force back-references to respect limit of
                                 * 1<<wLog, even for dictionary */
 
+    ZSTD_dictAttachPref_e attachDictPref;
+
     /* Multithreading: used to pass parameters to mtctx */
     unsigned nbWorkers;
     unsigned jobSize;
@@ -193,6 +214,8 @@
     ZSTD_CCtx_params requestedParams;
     ZSTD_CCtx_params appliedParams;
     U32   dictID;
+
+    int workSpaceOversizedDuration;
     void* workSpace;
     size_t workSpaceSize;
     size_t blockSize;
@@ -235,11 +258,15 @@
 #endif
 };
 
+typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
+
+typedef enum { ZSTD_noDict = 0, ZSTD_extDict = 1, ZSTD_dictMatchState = 2 } ZSTD_dictMode_e;
+
 
 typedef size_t (*ZSTD_blockCompressor) (
         ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict);
+        void const* src, size_t srcSize);
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
 
 
 MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
@@ -280,16 +307,18 @@
 */
 MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
 {
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6)
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
     static const BYTE* g_start = NULL;
     if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */
     {   U32 const pos = (U32)((const BYTE*)literals - g_start);
-        DEBUGLOG(6, "Cpos%7u :%3u literals, match%3u bytes at dist.code%7u",
+        DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
                pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
     }
 #endif
+    assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
     /* copy Literals */
-    assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB);
+    assert(seqStorePtr->maxNbLit <= 128 KB);
+    assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
     ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
     seqStorePtr->lit += litLength;
 
@@ -420,6 +449,11 @@
     const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
     size_t const matchLength = ZSTD_count(ip, match, vEnd);
     if (match + matchLength != mEnd) return matchLength;
+    DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
+    DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
+    DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
+    DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
+    DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
     return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
 }
 
@@ -497,6 +531,20 @@
 }
 
 /**
+ * ZSTD_matchState_dictMode():
+ * Inspects the provided matchState and figures out what dictMode should be
+ * passed to the compressor.
+ */
+MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
+{
+    return ZSTD_window_hasExtDict(ms->window) ?
+        ZSTD_extDict :
+        ms->dictMatchState != NULL ?
+            ZSTD_dictMatchState :
+            ZSTD_noDict;
+}
+
+/**
  * ZSTD_window_needOverflowCorrection():
  * Returns non-zero if the indices are getting too large and need overflow
  * protection.
@@ -563,31 +611,41 @@
  * ZSTD_window_enforceMaxDist():
  * Updates lowLimit so that:
  *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
+ *
  * This allows a simple check that index >= lowLimit to see if index is valid.
  * This must be called before a block compression call, with srcEnd as the block
  * source end.
+ *
  * If loadedDictEndPtr is not NULL, we set it to zero once we update lowLimit.
  * This is because dictionaries are allowed to be referenced as long as the last
  * byte of the dictionary is in the window, but once they are out of range,
  * they cannot be referenced. If loadedDictEndPtr is NULL, we use
  * loadedDictEnd == 0.
+ *
+ * In normal dict mode, the dict is between lowLimit and dictLimit. In
+ * dictMatchState mode, lowLimit and dictLimit are the same, and the dictionary
+ * is below them. forceWindow and dictMatchState are therefore incompatible.
  */
 MEM_STATIC void ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
                                            void const* srcEnd, U32 maxDist,
-                                           U32* loadedDictEndPtr)
+                                           U32* loadedDictEndPtr,
+                                           const ZSTD_matchState_t** dictMatchStatePtr)
 {
     U32 const current = (U32)((BYTE const*)srcEnd - window->base);
     U32 loadedDictEnd = loadedDictEndPtr != NULL ? *loadedDictEndPtr : 0;
+    DEBUGLOG(5, "ZSTD_window_enforceMaxDist: current=%u, maxDist=%u", current, maxDist);
     if (current > maxDist + loadedDictEnd) {
         U32 const newLowLimit = current - maxDist;
         if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
         if (window->dictLimit < window->lowLimit) {
-            DEBUGLOG(5, "Update dictLimit from %u to %u", window->dictLimit,
-                     window->lowLimit);
+            DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
+                        window->dictLimit, window->lowLimit);
             window->dictLimit = window->lowLimit;
         }
         if (loadedDictEndPtr)
             *loadedDictEndPtr = 0;
+        if (dictMatchStatePtr)
+            *dictMatchStatePtr = NULL;
     }
 }
 
@@ -603,12 +661,12 @@
 {
     BYTE const* const ip = (BYTE const*)src;
     U32 contiguous = 1;
+    DEBUGLOG(5, "ZSTD_window_update");
     /* Check if blocks follow each other */
     if (src != window->nextSrc) {
         /* not contiguous */
         size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
-        DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u",
-                 window->dictLimit);
+        DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
         window->lowLimit = window->dictLimit;
         assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */
         window->dictLimit = (U32)distanceFromBase;
@@ -625,10 +683,38 @@
         ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
         U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
         window->lowLimit = lowLimitMax;
+        DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
     }
     return contiguous;
 }
 
+
+/* debug functions */
+
+MEM_STATIC double ZSTD_fWeight(U32 rawStat)
+{
+    U32 const fp_accuracy = 8;
+    U32 const fp_multiplier = (1 << fp_accuracy);
+    U32 const stat = rawStat + 1;
+    U32 const hb = ZSTD_highbit32(stat);
+    U32 const BWeight = hb * fp_multiplier;
+    U32 const FWeight = (stat << fp_accuracy) >> hb;
+    U32 const weight = BWeight + FWeight;
+    assert(hb + fp_accuracy < 31);
+    return (double)weight / fp_multiplier;
+}
+
+MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
+{
+    unsigned u, sum;
+    for (u=0, sum=0; u<=max; u++) sum += table[u];
+    DEBUGLOG(2, "total nb elts: %u", sum);
+    for (u=0; u<=max; u++) {
+        DEBUGLOG(2, "%2u: %5u  (%.2f)",
+                u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
+    }
+}
+
 #if defined (__cplusplus)
 }
 #endif
@@ -640,7 +726,7 @@
  * ============================================================== */
 
 /* ZSTD_getCParamsFromCCtxParams() :
- * cParams are built depending on compressionLevel, src size hints, 
+ * cParams are built depending on compressionLevel, src size hints,
  * LDM and manually set compression parameters.
  */
 ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
@@ -656,6 +742,8 @@
                      const ZSTD_CDict* cdict,
                      ZSTD_CCtx_params  params, unsigned long long pledgedSrcSize);
 
+void ZSTD_resetSeqStore(seqStore_t* ssPtr);
+
 /*! ZSTD_compressStream_generic() :
  *  Private use only. To be called from zstdmt_compress.c in single-thread mode. */
 size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
@@ -672,6 +760,7 @@
 size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
                                     const void* dict, size_t dictSize,
                                     ZSTD_dictContentType_e dictContentType,
+                                    ZSTD_dictTableLoadMethod_e dtlm,
                                     const ZSTD_CDict* cdict,
                                     ZSTD_CCtx_params params,
                                     unsigned long long pledgedSrcSize);
--- a/contrib/python-zstandard/zstd/compress/zstd_double_fast.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_double_fast.c	Mon Oct 22 14:46:06 2018 -0400
@@ -13,9 +13,9 @@
 
 
 void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
-                              ZSTD_compressionParameters const* cParams,
-                              void const* end)
+                              void const* end, ZSTD_dictTableLoadMethod_e dtlm)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const hashLarge = ms->hashTable;
     U32  const hBitsL = cParams->hashLog;
     U32  const mls = cParams->searchLength;
@@ -40,6 +40,9 @@
                 hashSmall[smHash] = current + i;
             if (i == 0 || hashLarge[lgHash] == 0)
                 hashLarge[lgHash] = current + i;
+            /* Only load extra positions for ZSTD_dtlm_full */
+            if (dtlm == ZSTD_dtlm_fast)
+                break;
         }
     }
 }
@@ -48,9 +51,10 @@
 FORCE_INLINE_TEMPLATE
 size_t ZSTD_compressBlock_doubleFast_generic(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
-        U32 const mls /* template */)
+        void const* src, size_t srcSize,
+        U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
 {
+    ZSTD_compressionParameters const* cParams = &ms->cParams;
     U32* const hashLong = ms->hashTable;
     const U32 hBitsL = cParams->hashLog;
     U32* const hashSmall = ms->chainTable;
@@ -59,70 +63,188 @@
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
     const BYTE* anchor = istart;
-    const U32 lowestIndex = ms->window.dictLimit;
-    const BYTE* const lowest = base + lowestIndex;
+    const U32 prefixLowestIndex = ms->window.dictLimit;
+    const BYTE* const prefixLowest = base + prefixLowestIndex;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - HASH_READ_SIZE;
     U32 offset_1=rep[0], offset_2=rep[1];
     U32 offsetSaved = 0;
 
+    const ZSTD_matchState_t* const dms = ms->dictMatchState;
+    const ZSTD_compressionParameters* const dictCParams =
+                                     dictMode == ZSTD_dictMatchState ?
+                                     &dms->cParams : NULL;
+    const U32* const dictHashLong  = dictMode == ZSTD_dictMatchState ?
+                                     dms->hashTable : NULL;
+    const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
+                                     dms->chainTable : NULL;
+    const U32 dictStartIndex       = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.dictLimit : 0;
+    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.base : NULL;
+    const BYTE* const dictStart    = dictMode == ZSTD_dictMatchState ?
+                                     dictBase + dictStartIndex : NULL;
+    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.nextSrc : NULL;
+    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
+                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
+                                     0;
+    const U32 dictHBitsL           = dictMode == ZSTD_dictMatchState ?
+                                     dictCParams->hashLog : hBitsL;
+    const U32 dictHBitsS           = dictMode == ZSTD_dictMatchState ?
+                                     dictCParams->chainLog : hBitsS;
+    const U32 dictAndPrefixLength  = (U32)(ip - prefixLowest + dictEnd - dictStart);
+
+    assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
+
     /* init */
-    ip += (ip==lowest);
-    {   U32 const maxRep = (U32)(ip-lowest);
+    ip += (dictAndPrefixLength == 0);
+    if (dictMode == ZSTD_noDict) {
+        U32 const maxRep = (U32)(ip - prefixLowest);
         if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
         if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
     }
+    if (dictMode == ZSTD_dictMatchState) {
+        /* dictMatchState repCode checks don't currently handle repCode == 0
+         * disabling. */
+        assert(offset_1 <= dictAndPrefixLength);
+        assert(offset_2 <= dictAndPrefixLength);
+    }
 
     /* Main Search Loop */
     while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
         size_t mLength;
+        U32 offset;
         size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
         size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+        size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
+        size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
         U32 const current = (U32)(ip-base);
         U32 const matchIndexL = hashLong[h2];
-        U32 const matchIndexS = hashSmall[h];
+        U32 matchIndexS = hashSmall[h];
         const BYTE* matchLong = base + matchIndexL;
         const BYTE* match = base + matchIndexS;
+        const U32 repIndex = current + 1 - offset_1;
+        const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+                            && repIndex < prefixLowestIndex) ?
+                               dictBase + (repIndex - dictIndexDelta) :
+                               base + repIndex;
         hashLong[h2] = hashSmall[h] = current;   /* update hash tables */
 
-        assert(offset_1 <= current);   /* supposed guaranteed by construction */
-        if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
-            /* favor repcode */
+        /* check dictMatchState repcode */
+        if (dictMode == ZSTD_dictMatchState
+            && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+            const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+            ip++;
+            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+            goto _match_stored;
+        }
+
+        /* check noDict repcode */
+        if ( dictMode == ZSTD_noDict
+          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
             mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
             ip++;
             ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
-        } else {
-            U32 offset;
-            if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
+            goto _match_stored;
+        }
+
+        if (matchIndexL > prefixLowestIndex) {
+            /* check prefix long match */
+            if (MEM_read64(matchLong) == MEM_read64(ip)) {
                 mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
                 offset = (U32)(ip-matchLong);
-                while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
-            } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
-                size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
-                U32 const matchIndexL3 = hashLong[hl3];
-                const BYTE* matchL3 = base + matchIndexL3;
-                hashLong[hl3] = current + 1;
-                if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) {
+                while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+                goto _match_found;
+            }
+        } else if (dictMode == ZSTD_dictMatchState) {
+            /* check dictMatchState long match */
+            U32 const dictMatchIndexL = dictHashLong[dictHL];
+            const BYTE* dictMatchL = dictBase + dictMatchIndexL;
+            assert(dictMatchL < dictEnd);
+
+            if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
+                mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
+                offset = (U32)(current - dictMatchIndexL - dictIndexDelta);
+                while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
+                goto _match_found;
+            }
+        }
+
+        if (matchIndexS > prefixLowestIndex) {
+            /* check prefix short match */
+            if (MEM_read32(match) == MEM_read32(ip)) {
+                goto _search_next_long;
+            }
+        } else if (dictMode == ZSTD_dictMatchState) {
+            /* check dictMatchState short match */
+            U32 const dictMatchIndexS = dictHashSmall[dictHS];
+            match = dictBase + dictMatchIndexS;
+            matchIndexS = dictMatchIndexS + dictIndexDelta;
+
+            if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
+                goto _search_next_long;
+            }
+        }
+
+        ip += ((ip-anchor) >> kSearchStrength) + 1;
+        continue;
+
+_search_next_long:
+
+        {
+            size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+            size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
+            U32 const matchIndexL3 = hashLong[hl3];
+            const BYTE* matchL3 = base + matchIndexL3;
+            hashLong[hl3] = current + 1;
+
+            /* check prefix long +1 match */
+            if (matchIndexL3 > prefixLowestIndex) {
+                if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
                     mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
                     ip++;
                     offset = (U32)(ip-matchL3);
-                    while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
-                } else {
-                    mLength = ZSTD_count(ip+4, match+4, iend) + 4;
-                    offset = (U32)(ip-match);
-                    while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+                    while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+                    goto _match_found;
                 }
-            } else {
-                ip += ((ip-anchor) >> kSearchStrength) + 1;
-                continue;
+            } else if (dictMode == ZSTD_dictMatchState) {
+                /* check dict long +1 match */
+                U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
+                const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
+                assert(dictMatchL3 < dictEnd);
+                if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
+                    mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
+                    ip++;
+                    offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta);
+                    while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
+                    goto _match_found;
+                }
             }
-
-            offset_2 = offset_1;
-            offset_1 = offset;
-
-            ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
         }
 
+        /* if no long +1 match, explore the short match we found */
+        if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
+            mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
+            offset = (U32)(current - matchIndexS);
+            while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+        } else {
+            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+            offset = (U32)(ip - match);
+            while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+        }
+
+        /* fall-through */
+
+_match_found:
+        offset_2 = offset_1;
+        offset_1 = offset;
+
+        ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+_match_stored:
         /* match found */
         ip += mLength;
         anchor = ip;
@@ -135,19 +257,44 @@
                 hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
 
             /* check immediate repcode */
-            while ( (ip <= ilimit)
-                 && ( (offset_2>0)
-                 & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
-                /* store sequence */
-                size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
-                { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
-                hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
-                hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
-                ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
-                ip += rLength;
-                anchor = ip;
-                continue;   /* faster when present ... (?) */
-    }   }   }
+            if (dictMode == ZSTD_dictMatchState) {
+                while (ip <= ilimit) {
+                    U32 const current2 = (U32)(ip-base);
+                    U32 const repIndex2 = current2 - offset_2;
+                    const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
+                        && repIndex2 < prefixLowestIndex ?
+                            dictBase - dictIndexDelta + repIndex2 :
+                            base + repIndex2;
+                    if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+                       && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+                        const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
+                        size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
+                        U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                        ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+                        hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+                        hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+                        ip += repLength2;
+                        anchor = ip;
+                        continue;
+                    }
+                    break;
+                }
+            }
+
+            if (dictMode == ZSTD_noDict) {
+                while ( (ip <= ilimit)
+                     && ( (offset_2>0)
+                        & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+                    /* store sequence */
+                    size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+                    U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
+                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
+                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
+                    ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+                    ip += rLength;
+                    anchor = ip;
+                    continue;   /* faster when present ... (?) */
+    }   }   }   }
 
     /* save reps for next block */
     rep[0] = offset_1 ? offset_1 : offsetSaved;
@@ -160,102 +307,126 @@
 
 size_t ZSTD_compressBlock_doubleFast(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    const U32 mls = cParams->searchLength;
+    const U32 mls = ms->cParams.searchLength;
     switch(mls)
     {
     default: /* includes case 3 */
     case 4 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
     case 5 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
     case 6 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
     case 7 :
-        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
+    }
+}
+
+
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    const U32 mls = ms->cParams.searchLength;
+    switch(mls)
+    {
+    default: /* includes case 3 */
+    case 4 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
+    case 5 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
+    case 6 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
+    case 7 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
     }
 }
 
 
 static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
+        void const* src, size_t srcSize,
         U32 const mls /* template */)
 {
+    ZSTD_compressionParameters const* cParams = &ms->cParams;
     U32* const hashLong = ms->hashTable;
     U32  const hBitsL = cParams->hashLog;
     U32* const hashSmall = ms->chainTable;
     U32  const hBitsS = cParams->chainLog;
-    const BYTE* const base = ms->window.base;
-    const BYTE* const dictBase = ms->window.dictBase;
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
     const BYTE* anchor = istart;
-    const U32   lowestIndex = ms->window.lowLimit;
-    const BYTE* const dictStart = dictBase + lowestIndex;
-    const U32   dictLimit = ms->window.dictLimit;
-    const BYTE* const lowPrefixPtr = base + dictLimit;
-    const BYTE* const dictEnd = dictBase + dictLimit;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - 8;
+    const U32   prefixStartIndex = ms->window.dictLimit;
+    const BYTE* const base = ms->window.base;
+    const BYTE* const prefixStart = base + prefixStartIndex;
+    const U32   dictStartIndex = ms->window.lowLimit;
+    const BYTE* const dictBase = ms->window.dictBase;
+    const BYTE* const dictStart = dictBase + dictStartIndex;
+    const BYTE* const dictEnd = dictBase + prefixStartIndex;
     U32 offset_1=rep[0], offset_2=rep[1];
 
+    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
+
     /* Search Loop */
     while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
         const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
         const U32 matchIndex = hashSmall[hSmall];
-        const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
+        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
         const BYTE* match = matchBase + matchIndex;
 
         const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
         const U32 matchLongIndex = hashLong[hLong];
-        const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
+        const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
         const BYTE* matchLong = matchLongBase + matchLongIndex;
 
         const U32 current = (U32)(ip-base);
         const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */
-        const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
-        const BYTE* repMatch = repBase + repIndex;
+        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+        const BYTE* const repMatch = repBase + repIndex;
         size_t mLength;
         hashSmall[hSmall] = hashLong[hLong] = current;   /* update hash table */
 
-        if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
-           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
-            const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
-            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
+        if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
+            & (repIndex > dictStartIndex))
+          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
             ip++;
             ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
         } else {
-            if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
-                const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
-                const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
+            if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
+                const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
+                const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
                 U32 offset;
-                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
+                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
                 offset = current - matchLongIndex;
                 while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
                 offset_2 = offset_1;
                 offset_1 = offset;
                 ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
 
-            } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
+            } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
                 size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
                 U32 const matchIndex3 = hashLong[h3];
-                const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
+                const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
                 const BYTE* match3 = match3Base + matchIndex3;
                 U32 offset;
                 hashLong[h3] = current + 1;
-                if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
-                    const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
-                    const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
-                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
+                if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+                    const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
+                    const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
+                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
                     ip++;
                     offset = current+1 - matchIndex3;
                     while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
                 } else {
-                    const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
-                    const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
-                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
+                    const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+                    const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
                     offset = current - matchIndex;
                     while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
                 }
@@ -282,12 +453,13 @@
             while (ip <= ilimit) {
                 U32 const current2 = (U32)(ip-base);
                 U32 const repIndex2 = current2 - offset_2;
-                const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
-                if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
-                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
-                    const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
-                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
-                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3)   /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
+                    & (repIndex2 > dictStartIndex))
+                  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+                    U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
                     ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
                     hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
                     hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
@@ -309,19 +481,19 @@
 
 size_t ZSTD_compressBlock_doubleFast_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    U32 const mls = cParams->searchLength;
+    U32 const mls = ms->cParams.searchLength;
     switch(mls)
     {
     default: /* includes case 3 */
     case 4 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
     case 5 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
     case 6 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
     case 7 :
-        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
     }
 }
--- a/contrib/python-zstandard/zstd/compress/zstd_double_fast.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_double_fast.h	Mon Oct 22 14:46:06 2018 -0400
@@ -19,14 +19,16 @@
 #include "zstd_compress_internal.h"     /* ZSTD_CCtx, size_t */
 
 void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
-                              ZSTD_compressionParameters const* cParams,
-                              void const* end);
+                              void const* end, ZSTD_dictTableLoadMethod_e dtlm);
 size_t ZSTD_compressBlock_doubleFast(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_doubleFast_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 
 
 #if defined (__cplusplus)
--- a/contrib/python-zstandard/zstd/compress/zstd_fast.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_fast.c	Mon Oct 22 14:46:06 2018 -0400
@@ -13,9 +13,9 @@
 
 
 void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
-                        ZSTD_compressionParameters const* cParams,
-                        void const* end)
+                        void const* end, ZSTD_dictTableLoadMethod_e dtlm)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const hashTable = ms->hashTable;
     U32  const hBits = cParams->hashLog;
     U32  const mls = cParams->searchLength;
@@ -34,6 +34,9 @@
             size_t const hash = ZSTD_hashPtr(ip + i, hBits, mls);
             if (i == 0 || hashTable[hash] == 0)
                 hashTable[hash] = current + i;
+            /* Only load extra positions for ZSTD_dtlm_full */
+            if (dtlm == ZSTD_dtlm_fast)
+                break;
         }
     }
 }
@@ -42,26 +45,65 @@
 size_t ZSTD_compressBlock_fast_generic(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
         void const* src, size_t srcSize,
-        U32 const hlog, U32 const stepSize, U32 const mls)
+        U32 const mls, ZSTD_dictMode_e const dictMode)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const hashTable = ms->hashTable;
+    U32 const hlog = cParams->hashLog;
+    /* support stepSize of 0 */
+    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
     const BYTE* const base = ms->window.base;
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
     const BYTE* anchor = istart;
-    const U32   lowestIndex = ms->window.dictLimit;
-    const BYTE* const lowest = base + lowestIndex;
+    const U32   prefixStartIndex = ms->window.dictLimit;
+    const BYTE* const prefixStart = base + prefixStartIndex;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - HASH_READ_SIZE;
     U32 offset_1=rep[0], offset_2=rep[1];
     U32 offsetSaved = 0;
 
+    const ZSTD_matchState_t* const dms = ms->dictMatchState;
+    const ZSTD_compressionParameters* const dictCParams =
+                                     dictMode == ZSTD_dictMatchState ?
+                                     &dms->cParams : NULL;
+    const U32* const dictHashTable = dictMode == ZSTD_dictMatchState ?
+                                     dms->hashTable : NULL;
+    const U32 dictStartIndex       = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.dictLimit : 0;
+    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.base : NULL;
+    const BYTE* const dictStart    = dictMode == ZSTD_dictMatchState ?
+                                     dictBase + dictStartIndex : NULL;
+    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.nextSrc : NULL;
+    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
+                                     prefixStartIndex - (U32)(dictEnd - dictBase) :
+                                     0;
+    const U32 dictAndPrefixLength  = (U32)(ip - prefixStart + dictEnd - dictStart);
+    const U32 dictHLog             = dictMode == ZSTD_dictMatchState ?
+                                     dictCParams->hashLog : hlog;
+
+    assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
+
+    /* otherwise, we would get index underflow when translating a dict index
+     * into a local index */
+    assert(dictMode != ZSTD_dictMatchState
+        || prefixStartIndex >= (U32)(dictEnd - dictBase));
+
     /* init */
-    ip += (ip==lowest);
-    {   U32 const maxRep = (U32)(ip-lowest);
+    ip += (dictAndPrefixLength == 0);
+    if (dictMode == ZSTD_noDict) {
+        U32 const maxRep = (U32)(ip - prefixStart);
         if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
         if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
     }
+    if (dictMode == ZSTD_dictMatchState) {
+        /* dictMatchState repCode checks don't currently handle repCode == 0
+         * disabling. */
+        assert(offset_1 <= dictAndPrefixLength);
+        assert(offset_2 <= dictAndPrefixLength);
+    }
 
     /* Main Search Loop */
     while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
@@ -70,26 +112,67 @@
         U32 const current = (U32)(ip-base);
         U32 const matchIndex = hashTable[h];
         const BYTE* match = base + matchIndex;
+        const U32 repIndex = current + 1 - offset_1;
+        const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+                            && repIndex < prefixStartIndex) ?
+                               dictBase + (repIndex - dictIndexDelta) :
+                               base + repIndex;
         hashTable[h] = current;   /* update hash table */
 
-        if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
+        if ( (dictMode == ZSTD_dictMatchState)
+          && ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
+          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+            ip++;
+            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+        } else if ( dictMode == ZSTD_noDict
+                 && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
             mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
             ip++;
             ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
-        } else {
-            if ( (matchIndex <= lowestIndex)
-              || (MEM_read32(match) != MEM_read32(ip)) ) {
+        } else if ( (matchIndex <= prefixStartIndex) ) {
+            if (dictMode == ZSTD_dictMatchState) {
+                size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
+                U32 const dictMatchIndex = dictHashTable[dictHash];
+                const BYTE* dictMatch = dictBase + dictMatchIndex;
+                if (dictMatchIndex <= dictStartIndex ||
+                    MEM_read32(dictMatch) != MEM_read32(ip)) {
+                    assert(stepSize >= 1);
+                    ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+                    continue;
+                } else {
+                    /* found a dict match */
+                    U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
+                    mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
+                    while (((ip>anchor) & (dictMatch>dictStart))
+                         && (ip[-1] == dictMatch[-1])) {
+                        ip--; dictMatch--; mLength++;
+                    } /* catch up */
+                    offset_2 = offset_1;
+                    offset_1 = offset;
+                    ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+                }
+            } else {
                 assert(stepSize >= 1);
                 ip += ((ip-anchor) >> kSearchStrength) + stepSize;
                 continue;
             }
+        } else if (MEM_read32(match) != MEM_read32(ip)) {
+            /* it's not a match, and we're not going to check the dictionary */
+            assert(stepSize >= 1);
+            ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+            continue;
+        } else {
+            /* found a regular match */
+            U32 const offset = (U32)(ip-match);
             mLength = ZSTD_count(ip+4, match+4, iend) + 4;
-            {   U32 const offset = (U32)(ip-match);
-                while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
-                offset_2 = offset_1;
-                offset_1 = offset;
-                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
-        }   }
+            while (((ip>anchor) & (match>prefixStart))
+                 && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+            offset_2 = offset_1;
+            offset_1 = offset;
+            ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+        }
 
         /* match found */
         ip += mLength;
@@ -97,21 +180,46 @@
 
         if (ip <= ilimit) {
             /* Fill Table */
+            assert(base+current+2 > istart);  /* check base overflow */
             hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;  /* here because current+2 could be > iend-8 */
             hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+
             /* check immediate repcode */
-            while ( (ip <= ilimit)
-                 && ( (offset_2>0)
-                 & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
-                /* store sequence */
-                size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
-                { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; }  /* swap offset_2 <=> offset_1 */
-                hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
-                ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
-                ip += rLength;
-                anchor = ip;
-                continue;   /* faster when present ... (?) */
-    }   }   }
+            if (dictMode == ZSTD_dictMatchState) {
+                while (ip <= ilimit) {
+                    U32 const current2 = (U32)(ip-base);
+                    U32 const repIndex2 = current2 - offset_2;
+                    const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
+                            dictBase - dictIndexDelta + repIndex2 :
+                            base + repIndex2;
+                    if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+                       && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+                        const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+                        size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+                        U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                        ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+                        hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+                        ip += repLength2;
+                        anchor = ip;
+                        continue;
+                    }
+                    break;
+                }
+            }
+
+            if (dictMode == ZSTD_noDict) {
+                while ( (ip <= ilimit)
+                     && ( (offset_2>0)
+                        & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+                    /* store sequence */
+                    size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+                    U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
+                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
+                    ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+                    ip += rLength;
+                    anchor = ip;
+                    continue;   /* faster when present ... (?) */
+    }   }   }   }
 
     /* save reps for next block */
     rep[0] = offset_1 ? offset_1 : offsetSaved;
@@ -124,42 +232,66 @@
 
 size_t ZSTD_compressBlock_fast(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    U32 const hlog = cParams->hashLog;
+    ZSTD_compressionParameters const* cParams = &ms->cParams;
     U32 const mls = cParams->searchLength;
-    U32 const stepSize = cParams->targetLength;
+    assert(ms->dictMatchState == NULL);
     switch(mls)
     {
     default: /* includes case 3 */
     case 4 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
     case 5 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
     case 6 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
     case 7 :
-        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
+    }
+}
+
+size_t ZSTD_compressBlock_fast_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    ZSTD_compressionParameters const* cParams = &ms->cParams;
+    U32 const mls = cParams->searchLength;
+    assert(ms->dictMatchState != NULL);
+    switch(mls)
+    {
+    default: /* includes case 3 */
+    case 4 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
+    case 5 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
+    case 6 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
+    case 7 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
     }
 }
 
 
 static size_t ZSTD_compressBlock_fast_extDict_generic(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        void const* src, size_t srcSize,
-        U32 const hlog, U32 const stepSize, U32 const mls)
+        void const* src, size_t srcSize, U32 const mls)
 {
-    U32* hashTable = ms->hashTable;
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
+    U32* const hashTable = ms->hashTable;
+    U32 const hlog = cParams->hashLog;
+    /* support stepSize of 0 */
+    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
     const BYTE* const base = ms->window.base;
     const BYTE* const dictBase = ms->window.dictBase;
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
     const BYTE* anchor = istart;
-    const U32   lowestIndex = ms->window.lowLimit;
-    const BYTE* const dictStart = dictBase + lowestIndex;
-    const U32   dictLimit = ms->window.dictLimit;
-    const BYTE* const lowPrefixPtr = base + dictLimit;
-    const BYTE* const dictEnd = dictBase + dictLimit;
+    const U32   dictStartIndex = ms->window.lowLimit;
+    const BYTE* const dictStart = dictBase + dictStartIndex;
+    const U32   prefixStartIndex = ms->window.dictLimit;
+    const BYTE* const prefixStart = base + prefixStartIndex;
+    const BYTE* const dictEnd = dictBase + prefixStartIndex;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - 8;
     U32 offset_1=rep[0], offset_2=rep[1];
@@ -167,33 +299,34 @@
     /* Search Loop */
     while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
         const size_t h = ZSTD_hashPtr(ip, hlog, mls);
-        const U32 matchIndex = hashTable[h];
-        const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
-        const BYTE* match = matchBase + matchIndex;
-        const U32 current = (U32)(ip-base);
-        const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */
-        const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
-        const BYTE* repMatch = repBase + repIndex;
+        const U32    matchIndex = hashTable[h];
+        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
+        const BYTE*  match = matchBase + matchIndex;
+        const U32    current = (U32)(ip-base);
+        const U32    repIndex = current + 1 - offset_1;
+        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+        const BYTE* const repMatch = repBase + repIndex;
         size_t mLength;
         hashTable[h] = current;   /* update hash table */
+        assert(offset_1 <= current +1);   /* check repIndex */
 
-        if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
+        if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
-            const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
-            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
+            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
             ip++;
             ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
         } else {
-            if ( (matchIndex < lowestIndex) ||
+            if ( (matchIndex < dictStartIndex) ||
                  (MEM_read32(match) != MEM_read32(ip)) ) {
                 assert(stepSize >= 1);
                 ip += ((ip-anchor) >> kSearchStrength) + stepSize;
                 continue;
             }
-            {   const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
-                const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+            {   const BYTE* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+                const BYTE* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
                 U32 offset;
-                mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
+                mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
                 while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
                 offset = current - matchIndex;
                 offset_2 = offset_1;
@@ -213,11 +346,11 @@
             while (ip <= ilimit) {
                 U32 const current2 = (U32)(ip-base);
                 U32 const repIndex2 = current2 - offset_2;
-                const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
-                if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
+                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex))  /* intentional overflow */
                    && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
-                    const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
-                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
+                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
                     U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
                     ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
                     hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
@@ -239,21 +372,20 @@
 
 size_t ZSTD_compressBlock_fast_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    U32 const hlog = cParams->hashLog;
+    ZSTD_compressionParameters const* cParams = &ms->cParams;
     U32 const mls = cParams->searchLength;
-    U32 const stepSize = cParams->targetLength;
     switch(mls)
     {
     default: /* includes case 3 */
     case 4 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
     case 5 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
     case 6 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
     case 7 :
-        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
     }
 }
--- a/contrib/python-zstandard/zstd/compress/zstd_fast.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_fast.h	Mon Oct 22 14:46:06 2018 -0400
@@ -19,14 +19,16 @@
 #include "zstd_compress_internal.h"
 
 void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
-                        ZSTD_compressionParameters const* cParams,
-                        void const* end);
+                        void const* end, ZSTD_dictTableLoadMethod_e dtlm);
 size_t ZSTD_compressBlock_fast(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_fast_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 
 #if defined (__cplusplus)
 }
--- a/contrib/python-zstandard/zstd/compress/zstd_lazy.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_lazy.c	Mon Oct 22 14:46:06 2018 -0400
@@ -16,11 +16,12 @@
 *  Binary Tree search
 ***************************************/
 
-void ZSTD_updateDUBT(
-                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+static void
+ZSTD_updateDUBT(ZSTD_matchState_t* ms,
                 const BYTE* ip, const BYTE* iend,
                 U32 mls)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const hashTable = ms->hashTable;
     U32  const hashLog = cParams->hashLog;
 
@@ -59,11 +60,12 @@
  *  sort one already inserted but unsorted position
  *  assumption : current >= btlow == (current - btmask)
  *  doesn't fail */
-static void ZSTD_insertDUBT1(
-                 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+static void
+ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
                  U32 current, const BYTE* inputEnd,
-                 U32 nbCompares, U32 btLow, int extDict)
+                 U32 nbCompares, U32 btLow, const ZSTD_dictMode_e dictMode)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32*   const bt = ms->chainTable;
     U32    const btLog  = cParams->chainLog - 1;
     U32    const btMask = (1 << btLog) - 1;
@@ -92,10 +94,12 @@
         size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
         assert(matchIndex < current);
 
-        if ( (!extDict)
+        if ( (dictMode != ZSTD_extDict)
           || (matchIndex+matchLength >= dictLimit)  /* both in current segment*/
           || (current < dictLimit) /* both in extDict */) {
-            const BYTE* const mBase = !extDict || ((matchIndex+matchLength) >= dictLimit) ? base : dictBase;
+            const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
+                                     || (matchIndex+matchLength >= dictLimit)) ?
+                                        base : dictBase;
             assert( (matchIndex+matchLength >= dictLimit)   /* might be wrong if extDict is incorrectly set to 0 */
                  || (current < dictLimit) );
             match = mBase + matchIndex;
@@ -138,13 +142,95 @@
 }
 
 
-static size_t ZSTD_DUBT_findBestMatch (
-                            ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                            const BYTE* const ip, const BYTE* const iend,
-                            size_t* offsetPtr,
-                            U32 const mls,
-                            U32 const extDict)
+static size_t
+ZSTD_DUBT_findBetterDictMatch (
+        ZSTD_matchState_t* ms,
+        const BYTE* const ip, const BYTE* const iend,
+        size_t* offsetPtr,
+        U32 nbCompares,
+        U32 const mls,
+        const ZSTD_dictMode_e dictMode)
 {
+    const ZSTD_matchState_t * const dms = ms->dictMatchState;
+    const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
+    const U32 * const dictHashTable = dms->hashTable;
+    U32         const hashLog = dmsCParams->hashLog;
+    size_t      const h  = ZSTD_hashPtr(ip, hashLog, mls);
+    U32               dictMatchIndex = dictHashTable[h];
+
+    const BYTE* const base = ms->window.base;
+    const BYTE* const prefixStart = base + ms->window.dictLimit;
+    U32         const current = (U32)(ip-base);
+    const BYTE* const dictBase = dms->window.base;
+    const BYTE* const dictEnd = dms->window.nextSrc;
+    U32         const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
+    U32         const dictLowLimit = dms->window.lowLimit;
+    U32         const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
+
+    U32*        const dictBt = dms->chainTable;
+    U32         const btLog  = dmsCParams->chainLog - 1;
+    U32         const btMask = (1 << btLog) - 1;
+    U32         const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
+
+    size_t commonLengthSmaller=0, commonLengthLarger=0, bestLength=0;
+    U32 matchEndIdx = current+8+1;
+
+    (void)dictMode;
+    assert(dictMode == ZSTD_dictMatchState);
+
+    while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
+        U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
+        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
+        const BYTE* match = dictBase + dictMatchIndex;
+        matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+        if (dictMatchIndex+matchLength >= dictHighLimit)
+            match = base + dictMatchIndex + dictIndexDelta;   /* to prepare for next usage of match[matchLength] */
+
+        if (matchLength > bestLength) {
+            U32 matchIndex = dictMatchIndex + dictIndexDelta;
+            if (matchLength > matchEndIdx - matchIndex)
+                matchEndIdx = matchIndex + (U32)matchLength;
+            if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
+                DEBUGLOG(2, "ZSTD_DUBT_findBestDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
+                    current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex);
+                bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
+            }
+            if (ip+matchLength == iend) {   /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
+                break;   /* drop, to guarantee consistency (miss a little bit of compression) */
+            }
+        }
+
+        DEBUGLOG(2, "matchLength:%6zu, match:%p, prefixStart:%p, ip:%p", matchLength, match, prefixStart, ip);
+        if (match[matchLength] < ip[matchLength]) {
+            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
+            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
+            dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
+        } else {
+            /* match is larger than current */
+            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
+            commonLengthLarger = matchLength;
+            dictMatchIndex = nextPtr[0];
+        }
+    }
+
+    if (bestLength >= MINMATCH) {
+        U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+        DEBUGLOG(2, "ZSTD_DUBT_findBestDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+                    current, (U32)bestLength, (U32)*offsetPtr, mIndex);
+    }
+    return bestLength;
+
+}
+
+
+static size_t
+ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+                        const BYTE* const ip, const BYTE* const iend,
+                        size_t* offsetPtr,
+                        U32 const mls,
+                        const ZSTD_dictMode_e dictMode)
+{
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32*   const hashTable = ms->hashTable;
     U32    const hashLog = cParams->hashLog;
     size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
@@ -195,8 +281,8 @@
     while (matchIndex) {  /* will end on matchIndex == 0 */
         U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
         U32 const nextCandidateIdx = *nextCandidateIdxPtr;
-        ZSTD_insertDUBT1(ms, cParams, matchIndex, iend,
-                         nbCandidates, unsortLimit, extDict);
+        ZSTD_insertDUBT1(ms, matchIndex, iend,
+                         nbCandidates, unsortLimit, dictMode);
         matchIndex = nextCandidateIdx;
         nbCandidates++;
     }
@@ -221,7 +307,7 @@
             size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
             const BYTE* match;
 
-            if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+            if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
                 match = base + matchIndex;
                 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
             } else {
@@ -259,6 +345,10 @@
 
         *smallerPtr = *largerPtr = 0;
 
+        if (dictMode == ZSTD_dictMatchState && nbCompares) {
+            bestLength = ZSTD_DUBT_findBetterDictMatch(ms, ip, iend, offsetPtr, nbCompares, mls, dictMode);
+        }
+
         assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
         ms->nextToUpdate = matchEndIdx - 8;   /* skip repetitive patterns */
         if (bestLength >= MINMATCH) {
@@ -272,61 +362,64 @@
 
 
 /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                        const BYTE* const ip, const BYTE* const iLimit,
-                        size_t* offsetPtr,
-                        const U32 mls /* template */)
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
+                const BYTE* const ip, const BYTE* const iLimit,
+                      size_t* offsetPtr,
+                const U32 mls /* template */,
+                const ZSTD_dictMode_e dictMode)
 {
     DEBUGLOG(7, "ZSTD_BtFindBestMatch");
     if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
-    ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
-    return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 0);
+    ZSTD_updateDUBT(ms, ip, iLimit, mls);
+    return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
 }
 
 
-static size_t ZSTD_BtFindBestMatch_selectMLS (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                        const BYTE* ip, const BYTE* const iLimit,
-                        size_t* offsetPtr)
+static size_t
+ZSTD_BtFindBestMatch_selectMLS (  ZSTD_matchState_t* ms,
+                            const BYTE* ip, const BYTE* const iLimit,
+                                  size_t* offsetPtr)
 {
-    switch(cParams->searchLength)
+    switch(ms->cParams.searchLength)
     {
     default : /* includes case 3 */
-    case 4 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 4);
-    case 5 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 5);
+    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
+    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
     case 7 :
-    case 6 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 6);
+    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
     }
 }
 
 
-/** Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch_extDict (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                        const BYTE* const ip, const BYTE* const iLimit,
-                        size_t* offsetPtr,
-                        const U32 mls)
+static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
+                        ZSTD_matchState_t* ms,
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* offsetPtr)
 {
-    DEBUGLOG(7, "ZSTD_BtFindBestMatch_extDict");
-    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
-    ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
-    return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 1);
+    switch(ms->cParams.searchLength)
+    {
+    default : /* includes case 3 */
+    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
+    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
+    case 7 :
+    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
+    }
 }
 
 
-static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
+                        ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* const iLimit,
                         size_t* offsetPtr)
 {
-    switch(cParams->searchLength)
+    switch(ms->cParams.searchLength)
     {
     default : /* includes case 3 */
-    case 4 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 4);
-    case 5 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 5);
+    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
+    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
     case 7 :
-    case 6 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 6);
+    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
     }
 }
 
@@ -340,7 +433,8 @@
 /* Update chains up to ip (excluded)
    Assumption : always within prefix (i.e. not within extDict) */
 static U32 ZSTD_insertAndFindFirstIndex_internal(
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
+                        const ZSTD_compressionParameters* const cParams,
                         const BYTE* ip, U32 const mls)
 {
     U32* const hashTable  = ms->hashTable;
@@ -362,22 +456,21 @@
     return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
 }
 
-U32 ZSTD_insertAndFindFirstIndex(
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                        const BYTE* ip)
-{
-    return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, cParams->searchLength);
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
+    return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.searchLength);
 }
 
 
 /* inlining is important to hardwire a hot branch (template emulation) */
 FORCE_INLINE_TEMPLATE
 size_t ZSTD_HcFindBestMatch_generic (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
                         const BYTE* const ip, const BYTE* const iLimit,
                         size_t* offsetPtr,
-                        const U32 mls, const U32 extDict)
+                        const U32 mls, const ZSTD_dictMode_e dictMode)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32* const chainTable = ms->chainTable;
     const U32 chainSize = (1 << cParams->chainLog);
     const U32 chainMask = chainSize-1;
@@ -397,7 +490,7 @@
 
     for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
         size_t currentMl=0;
-        if ((!extDict) || matchIndex >= dictLimit) {
+        if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
             const BYTE* const match = base + matchIndex;
             if (match[ml] == ip[ml])   /* potentially better */
                 currentMl = ZSTD_count(ip, match, iLimit);
@@ -419,38 +512,87 @@
         matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
     }
 
+    if (dictMode == ZSTD_dictMatchState) {
+        const ZSTD_matchState_t* const dms = ms->dictMatchState;
+        const U32* const dmsChainTable = dms->chainTable;
+        const U32 dmsChainSize         = (1 << dms->cParams.chainLog);
+        const U32 dmsChainMask         = dmsChainSize - 1;
+        const U32 dmsLowestIndex       = dms->window.dictLimit;
+        const BYTE* const dmsBase      = dms->window.base;
+        const BYTE* const dmsEnd       = dms->window.nextSrc;
+        const U32 dmsSize              = (U32)(dmsEnd - dmsBase);
+        const U32 dmsIndexDelta        = dictLimit - dmsSize;
+        const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
+
+        matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
+
+        for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
+            size_t currentMl=0;
+            const BYTE* const match = dmsBase + matchIndex;
+            assert(match+4 <= dmsEnd);
+            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
+
+            /* save best solution */
+            if (currentMl > ml) {
+                ml = currentMl;
+                *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
+                if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+            }
+
+            if (matchIndex <= dmsMinChain) break;
+            matchIndex = dmsChainTable[matchIndex & dmsChainMask];
+        }
+    }
+
     return ml;
 }
 
 
 FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* const iLimit,
                         size_t* offsetPtr)
 {
-    switch(cParams->searchLength)
+    switch(ms->cParams.searchLength)
     {
     default : /* includes case 3 */
-    case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 0);
-    case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 0);
+    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
+    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
     case 7 :
-    case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 0);
+    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
+    }
+}
+
+
+static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
+                        ZSTD_matchState_t* ms,
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* offsetPtr)
+{
+    switch(ms->cParams.searchLength)
+    {
+    default : /* includes case 3 */
+    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
+    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
+    case 7 :
+    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
     }
 }
 
 
 FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* const iLimit,
-                        size_t* const offsetPtr)
+                        size_t* offsetPtr)
 {
-    switch(cParams->searchLength)
+    switch(ms->cParams.searchLength)
     {
     default : /* includes case 3 */
-    case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 1);
-    case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 1);
+    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
+    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
     case 7 :
-    case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 1);
+    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
     }
 }
 
@@ -462,30 +604,55 @@
 size_t ZSTD_compressBlock_lazy_generic(
                         ZSTD_matchState_t* ms, seqStore_t* seqStore,
                         U32 rep[ZSTD_REP_NUM],
-                        ZSTD_compressionParameters const* cParams,
                         const void* src, size_t srcSize,
-                        const U32 searchMethod, const U32 depth)
+                        const U32 searchMethod, const U32 depth,
+                        ZSTD_dictMode_e const dictMode)
 {
     const BYTE* const istart = (const BYTE*)src;
     const BYTE* ip = istart;
     const BYTE* anchor = istart;
     const BYTE* const iend = istart + srcSize;
     const BYTE* const ilimit = iend - 8;
-    const BYTE* const base = ms->window.base + ms->window.dictLimit;
+    const BYTE* const base = ms->window.base;
+    const U32 prefixLowestIndex = ms->window.dictLimit;
+    const BYTE* const prefixLowest = base + prefixLowestIndex;
 
     typedef size_t (*searchMax_f)(
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
-    searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
+    searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
+        (searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
+        (searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS);
     U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
 
+    const ZSTD_matchState_t* const dms = ms->dictMatchState;
+    const U32 dictLowestIndex      = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.dictLimit : 0;
+    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.base : NULL;
+    const BYTE* const dictLowest   = dictMode == ZSTD_dictMatchState ?
+                                     dictBase + dictLowestIndex : NULL;
+    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
+                                     dms->window.nextSrc : NULL;
+    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
+                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
+                                     0;
+    const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictLowest);
+
     /* init */
-    ip += (ip==base);
+    ip += (dictAndPrefixLength == 0);
     ms->nextToUpdate3 = ms->nextToUpdate;
-    {   U32 const maxRep = (U32)(ip-base);
+    if (dictMode == ZSTD_noDict) {
+        U32 const maxRep = (U32)(ip - prefixLowest);
         if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
         if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
     }
+    if (dictMode == ZSTD_dictMatchState) {
+        /* dictMatchState repCode checks don't currently handle repCode == 0
+         * disabling. */
+        assert(offset_1 <= dictAndPrefixLength);
+        assert(offset_2 <= dictAndPrefixLength);
+    }
 
     /* Match Loop */
     while (ip < ilimit) {
@@ -494,15 +661,28 @@
         const BYTE* start=ip+1;
 
         /* check repCode */
-        if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
-            /* repcode : we take it */
+        if (dictMode == ZSTD_dictMatchState) {
+            const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
+            const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+                                && repIndex < prefixLowestIndex) ?
+                                   dictBase + (repIndex - dictIndexDelta) :
+                                   base + repIndex;
+            if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+                && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+                const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+                if (depth==0) goto _storeSequence;
+            }
+        }
+        if ( dictMode == ZSTD_noDict
+          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
             matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
             if (depth==0) goto _storeSequence;
         }
 
         /* first search (depth 0) */
-        {   size_t offsetFound = 99999999;
-            size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
+        {   size_t offsetFound = 999999999;
+            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
             if (ml2 > matchLength)
                 matchLength = ml2, start = ip, offset=offsetFound;
         }
@@ -516,15 +696,31 @@
         if (depth>=1)
         while (ip<ilimit) {
             ip ++;
-            if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+            if ( (dictMode == ZSTD_noDict)
+              && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
                 size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
                 int const gain2 = (int)(mlRep * 3);
                 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
                 if ((mlRep >= 4) && (gain2 > gain1))
                     matchLength = mlRep, offset = 0, start = ip;
             }
-            {   size_t offset2=99999999;
-                size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+            if (dictMode == ZSTD_dictMatchState) {
+                const U32 repIndex = (U32)(ip - base) - offset_1;
+                const BYTE* repMatch = repIndex < prefixLowestIndex ?
+                               dictBase + (repIndex - dictIndexDelta) :
+                               base + repIndex;
+                if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+                    && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+                    const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+                    size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+                    int const gain2 = (int)(mlRep * 3);
+                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+                    if ((mlRep >= 4) && (gain2 > gain1))
+                        matchLength = mlRep, offset = 0, start = ip;
+                }
+            }
+            {   size_t offset2=999999999;
+                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
                 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
                 if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -535,15 +731,31 @@
             /* let's find an even better one */
             if ((depth==2) && (ip<ilimit)) {
                 ip ++;
-                if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
-                    size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
-                    int const gain2 = (int)(ml2 * 4);
+                if ( (dictMode == ZSTD_noDict)
+                  && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+                    size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+                    int const gain2 = (int)(mlRep * 4);
                     int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
-                    if ((ml2 >= 4) && (gain2 > gain1))
-                        matchLength = ml2, offset = 0, start = ip;
+                    if ((mlRep >= 4) && (gain2 > gain1))
+                        matchLength = mlRep, offset = 0, start = ip;
                 }
-                {   size_t offset2=99999999;
-                    size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+                if (dictMode == ZSTD_dictMatchState) {
+                    const U32 repIndex = (U32)(ip - base) - offset_1;
+                    const BYTE* repMatch = repIndex < prefixLowestIndex ?
+                                   dictBase + (repIndex - dictIndexDelta) :
+                                   base + repIndex;
+                    if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+                        && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+                        const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+                        size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+                        int const gain2 = (int)(mlRep * 4);
+                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+                        if ((mlRep >= 4) && (gain2 > gain1))
+                            matchLength = mlRep, offset = 0, start = ip;
+                    }
+                }
+                {   size_t offset2=999999999;
+                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
                     int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                     int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
                     if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -560,9 +772,17 @@
          */
         /* catch up */
         if (offset) {
-            while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > base))
-                 && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) )  /* only search for offset within prefix */
-                { start--; matchLength++; }
+            if (dictMode == ZSTD_noDict) {
+                while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
+                     && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) )  /* only search for offset within prefix */
+                    { start--; matchLength++; }
+            }
+            if (dictMode == ZSTD_dictMatchState) {
+                U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+                const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
+                const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
+                while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
+            }
             offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
         }
         /* store sequence */
@@ -573,16 +793,39 @@
         }
 
         /* check immediate repcode */
-        while ( ((ip <= ilimit) & (offset_2>0))
-             && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
-            /* store sequence */
-            matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
-            offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
-            ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
-            ip += matchLength;
-            anchor = ip;
-            continue;   /* faster when present ... (?) */
-    }   }
+        if (dictMode == ZSTD_dictMatchState) {
+            while (ip <= ilimit) {
+                U32 const current2 = (U32)(ip-base);
+                U32 const repIndex = current2 - offset_2;
+                const BYTE* repMatch = dictMode == ZSTD_dictMatchState
+                    && repIndex < prefixLowestIndex ?
+                        dictBase - dictIndexDelta + repIndex :
+                        base + repIndex;
+                if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
+                   && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+                    const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
+                    matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
+                    offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset_2 <=> offset_1 */
+                    ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+                    ip += matchLength;
+                    anchor = ip;
+                    continue;
+                }
+                break;
+            }
+        }
+
+        if (dictMode == ZSTD_noDict) {
+            while ( ((ip <= ilimit) & (offset_2>0))
+                 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
+                /* store sequence */
+                matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
+                ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+                ip += matchLength;
+                anchor = ip;
+                continue;   /* faster when present ... (?) */
+    }   }   }
 
     /* Save reps for next block */
     rep[0] = offset_1 ? offset_1 : savedOffset;
@@ -595,30 +838,58 @@
 
 size_t ZSTD_compressBlock_btlazy2(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict);
 }
 
 size_t ZSTD_compressBlock_lazy2(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict);
 }
 
 size_t ZSTD_compressBlock_lazy(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict);
 }
 
 size_t ZSTD_compressBlock_greedy(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState);
 }
 
 
@@ -626,7 +897,6 @@
 size_t ZSTD_compressBlock_lazy_extDict_generic(
                         ZSTD_matchState_t* ms, seqStore_t* seqStore,
                         U32 rep[ZSTD_REP_NUM],
-                        ZSTD_compressionParameters const* cParams,
                         const void* src, size_t srcSize,
                         const U32 searchMethod, const U32 depth)
 {
@@ -644,9 +914,9 @@
     const BYTE* const dictStart  = dictBase + lowestIndex;
 
     typedef size_t (*searchMax_f)(
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        ZSTD_matchState_t* ms,
                         const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
-    searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
+    searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
 
     U32 offset_1 = rep[0], offset_2 = rep[1];
 
@@ -674,8 +944,8 @@
         }   }
 
         /* first search (depth 0) */
-        {   size_t offsetFound = 99999999;
-            size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
+        {   size_t offsetFound = 999999999;
+            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
             if (ml2 > matchLength)
                 matchLength = ml2, start = ip, offset=offsetFound;
         }
@@ -707,8 +977,8 @@
             }   }
 
             /* search match, depth 1 */
-            {   size_t offset2=99999999;
-                size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+            {   size_t offset2=999999999;
+                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
                 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
                 if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -737,8 +1007,8 @@
                 }   }
 
                 /* search match, depth 2 */
-                {   size_t offset2=99999999;
-                    size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+                {   size_t offset2=999999999;
+                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
                     int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                     int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
                     if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -794,31 +1064,31 @@
 
 size_t ZSTD_compressBlock_greedy_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0);
 }
 
 size_t ZSTD_compressBlock_lazy_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1);
 }
 
 size_t ZSTD_compressBlock_lazy2_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2);
 }
 
 size_t ZSTD_compressBlock_btlazy2_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        void const* src, size_t srcSize)
 
 {
-    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2);
 }
--- a/contrib/python-zstandard/zstd/compress/zstd_lazy.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_lazy.h	Mon Oct 22 14:46:06 2018 -0400
@@ -17,37 +17,48 @@
 
 #include "zstd_compress_internal.h"
 
-U32 ZSTD_insertAndFindFirstIndex(
-        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-        const BYTE* ip);
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
 
 void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). pre-emptively increase value of ZSTD_DUBT_UNSORTED_MARK */
 
 size_t ZSTD_compressBlock_btlazy2(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_lazy2(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_lazy(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_greedy(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
 
 size_t ZSTD_compressBlock_greedy_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_lazy_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_lazy2_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_btlazy2_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 
 #if defined (__cplusplus)
 }
--- a/contrib/python-zstandard/zstd/compress/zstd_ldm.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_ldm.c	Mon Oct 22 14:46:06 2018 -0400
@@ -9,6 +9,7 @@
 
 #include "zstd_ldm.h"
 
+#include "debug.h"
 #include "zstd_fast.h"          /* ZSTD_fillHashTable() */
 #include "zstd_double_fast.h"   /* ZSTD_fillDoubleHashTable() */
 
@@ -20,7 +21,7 @@
 void ZSTD_ldm_adjustParameters(ldmParams_t* params,
                                ZSTD_compressionParameters const* cParams)
 {
-    U32 const windowLog = cParams->windowLog;
+    params->windowLog = cParams->windowLog;
     ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
     DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
     if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
@@ -33,12 +34,13 @@
       params->minMatchLength = minMatch;
     }
     if (params->hashLog == 0) {
-        params->hashLog = MAX(ZSTD_HASHLOG_MIN, windowLog - LDM_HASH_RLOG);
+        params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
         assert(params->hashLog <= ZSTD_HASHLOG_MAX);
     }
     if (params->hashEveryLog == 0) {
-        params->hashEveryLog =
-                windowLog < params->hashLog ? 0 : windowLog - params->hashLog;
+        params->hashEveryLog = params->windowLog < params->hashLog
+                                   ? 0
+                                   : params->windowLog - params->hashLog;
     }
     params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
 }
@@ -216,21 +218,18 @@
  *  The tables for the other strategies are filled within their
  *  block compressors. */
 static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
-                                      ZSTD_compressionParameters const* cParams,
                                       void const* end)
 {
     const BYTE* const iend = (const BYTE*)end;
 
-    switch(cParams->strategy)
+    switch(ms->cParams.strategy)
     {
     case ZSTD_fast:
-        ZSTD_fillHashTable(ms, cParams, iend);
-        ms->nextToUpdate = (U32)(iend - ms->window.base);
+        ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
         break;
 
     case ZSTD_dfast:
-        ZSTD_fillDoubleHashTable(ms, cParams, iend);
-        ms->nextToUpdate = (U32)(iend - ms->window.base);
+        ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
         break;
 
     case ZSTD_greedy:
@@ -508,7 +507,7 @@
          *       * Try invalidation after the sequence generation and test the
          *         the offset against maxDist directly.
          */
-        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL);
+        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL, NULL);
         /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
         newLeftoverSize = ZSTD_ldm_generateSequences_internal(
             ldmState, sequences, params, chunkStart, chunkSize);
@@ -591,19 +590,19 @@
 
 size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
     ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-    ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
-    int const extDict)
+    void const* src, size_t srcSize)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     unsigned const minMatch = cParams->searchLength;
     ZSTD_blockCompressor const blockCompressor =
-        ZSTD_selectBlockCompressor(cParams->strategy, extDict);
-    BYTE const* const base = ms->window.base;
+        ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
     /* Input bounds */
     BYTE const* const istart = (BYTE const*)src;
     BYTE const* const iend = istart + srcSize;
     /* Input positions */
     BYTE const* ip = istart;
 
+    DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
     assert(rawSeqStore->pos <= rawSeqStore->size);
     assert(rawSeqStore->size <= rawSeqStore->capacity);
     /* Loop through each sequence and apply the block compressor to the lits */
@@ -621,14 +620,13 @@
 
         /* Fill tables for block compressor */
         ZSTD_ldm_limitTableUpdate(ms, ip);
-        ZSTD_ldm_fillFastTables(ms, cParams, ip);
+        ZSTD_ldm_fillFastTables(ms, ip);
         /* Run the block compressor */
+        DEBUGLOG(5, "calling block compressor on segment of size %u", sequence.litLength);
         {
             size_t const newLitLength =
-                blockCompressor(ms, seqStore, rep, cParams, ip,
-                                sequence.litLength);
+                blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
             ip += sequence.litLength;
-            ms->nextToUpdate = (U32)(ip - base);
             /* Update the repcodes */
             for (i = ZSTD_REP_NUM - 1; i > 0; i--)
                 rep[i] = rep[i-1];
@@ -642,12 +640,7 @@
     }
     /* Fill the tables for the block compressor */
     ZSTD_ldm_limitTableUpdate(ms, ip);
-    ZSTD_ldm_fillFastTables(ms, cParams, ip);
+    ZSTD_ldm_fillFastTables(ms, ip);
     /* Compress the last literals */
-    {
-        size_t const lastLiterals = blockCompressor(ms, seqStore, rep, cParams,
-                                                    ip, iend - ip);
-        ms->nextToUpdate = (U32)(iend - base);
-        return lastLiterals;
-    }
+    return blockCompressor(ms, seqStore, rep, ip, iend - ip);
 }
--- a/contrib/python-zstandard/zstd/compress/zstd_ldm.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_ldm.h	Mon Oct 22 14:46:06 2018 -0400
@@ -61,9 +61,7 @@
  */
 size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
             ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-            ZSTD_compressionParameters const* cParams,
-            void const* src, size_t srcSize,
-            int const extDict);
+            void const* src, size_t srcSize);
 
 /**
  * ZSTD_ldm_skipSequences():
--- a/contrib/python-zstandard/zstd/compress/zstd_opt.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_opt.c	Mon Oct 22 14:46:06 2018 -0400
@@ -9,10 +9,11 @@
  */
 
 #include "zstd_compress_internal.h"
+#include "hist.h"
 #include "zstd_opt.h"
 
 
-#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats. Also used for matchSum (?) */
+#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
 #define ZSTD_FREQ_DIV       4   /* log factor when using previous stats to init next stats */
 #define ZSTD_MAX_PRICE     (1<<30)
 
@@ -20,128 +21,210 @@
 /*-*************************************
 *  Price functions for optimal parser
 ***************************************/
-static void ZSTD_setLog2Prices(optState_t* optPtr)
+
+#if 0    /* approximation at bit level */
+#  define BITCOST_ACCURACY 0
+#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+#  define WEIGHT(stat)  ((void)opt, ZSTD_bitWeight(stat))
+#elif 0  /* fractional bit accuracy */
+#  define BITCOST_ACCURACY 8
+#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+#  define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
+#else    /* opt==approx, ultra==accurate */
+#  define BITCOST_ACCURACY 8
+#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+#  define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
+#endif
+
+MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
+{
+    return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
+}
+
+MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
 {
-    optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1);
-    optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1);
-    optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1);
-    optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1);
+    U32 const stat = rawStat + 1;
+    U32 const hb = ZSTD_highbit32(stat);
+    U32 const BWeight = hb * BITCOST_MULTIPLIER;
+    U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
+    U32 const weight = BWeight + FWeight;
+    assert(hb + BITCOST_ACCURACY < 31);
+    return weight;
+}
+
+/* debugging function, @return price in bytes */
+MEM_STATIC double ZSTD_fCost(U32 price)
+{
+    return (double)price / (BITCOST_MULTIPLIER*8);
+}
+
+static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
+{
+    optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
+    optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
+    optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
+    optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
 }
 
 
+static U32 ZSTD_downscaleStat(U32* table, U32 lastEltIndex, int malus)
+{
+    U32 s, sum=0;
+    assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
+    for (s=0; s<=lastEltIndex; s++) {
+        table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
+        sum += table[s];
+    }
+    return sum;
+}
+
 static void ZSTD_rescaleFreqs(optState_t* const optPtr,
-                              const BYTE* const src, size_t const srcSize)
+                              const BYTE* const src, size_t const srcSize,
+                              int optLevel)
 {
-    optPtr->staticPrices = 0;
+    optPtr->priceType = zop_dynamic;
+
+    if (optPtr->litLengthSum == 0) {  /* first block : init */
+        if (srcSize <= 1024)   /* heuristic */
+            optPtr->priceType = zop_predef;
+
+        assert(optPtr->symbolCosts != NULL);
+        if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) { /* huffman table presumed generated by dictionary */
+            optPtr->priceType = zop_dynamic;
 
-    if (optPtr->litLengthSum == 0) {  /* first init */
-        unsigned u;
-        if (srcSize <= 1024) optPtr->staticPrices = 1;
+            assert(optPtr->litFreq != NULL);
+            optPtr->litSum = 0;
+            {   unsigned lit;
+                for (lit=0; lit<=MaxLit; lit++) {
+                    U32 const scaleLog = 11;   /* scale to 2K */
+                    U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
+                    assert(bitCost <= scaleLog);
+                    optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+                    optPtr->litSum += optPtr->litFreq[lit];
+            }   }
+
+            {   unsigned ll;
+                FSE_CState_t llstate;
+                FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
+                optPtr->litLengthSum = 0;
+                for (ll=0; ll<=MaxLL; ll++) {
+                    U32 const scaleLog = 10;   /* scale to 1K */
+                    U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
+                    assert(bitCost < scaleLog);
+                    optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+                    optPtr->litLengthSum += optPtr->litLengthFreq[ll];
+            }   }
 
-        assert(optPtr->litFreq!=NULL);
-        for (u=0; u<=MaxLit; u++)
-            optPtr->litFreq[u] = 0;
-        for (u=0; u<srcSize; u++)
-            optPtr->litFreq[src[u]]++;
-        optPtr->litSum = 0;
-        for (u=0; u<=MaxLit; u++) {
-            optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> ZSTD_FREQ_DIV);
-            optPtr->litSum += optPtr->litFreq[u];
+            {   unsigned ml;
+                FSE_CState_t mlstate;
+                FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
+                optPtr->matchLengthSum = 0;
+                for (ml=0; ml<=MaxML; ml++) {
+                    U32 const scaleLog = 10;
+                    U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
+                    assert(bitCost < scaleLog);
+                    optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+                    optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
+            }   }
+
+            {   unsigned of;
+                FSE_CState_t ofstate;
+                FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
+                optPtr->offCodeSum = 0;
+                for (of=0; of<=MaxOff; of++) {
+                    U32 const scaleLog = 10;
+                    U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
+                    assert(bitCost < scaleLog);
+                    optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+                    optPtr->offCodeSum += optPtr->offCodeFreq[of];
+            }   }
+
+        } else {  /* not a dictionary */
+
+            assert(optPtr->litFreq != NULL);
+            {   unsigned lit = MaxLit;
+                HIST_count_simple(optPtr->litFreq, &lit, src, srcSize);   /* use raw first block to init statistics */
+            }
+            optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+
+            {   unsigned ll;
+                for (ll=0; ll<=MaxLL; ll++)
+                    optPtr->litLengthFreq[ll] = 1;
+            }
+            optPtr->litLengthSum = MaxLL+1;
+
+            {   unsigned ml;
+                for (ml=0; ml<=MaxML; ml++)
+                    optPtr->matchLengthFreq[ml] = 1;
+            }
+            optPtr->matchLengthSum = MaxML+1;
+
+            {   unsigned of;
+                for (of=0; of<=MaxOff; of++)
+                    optPtr->offCodeFreq[of] = 1;
+            }
+            optPtr->offCodeSum = MaxOff+1;
+
         }
 
-        for (u=0; u<=MaxLL; u++)
-            optPtr->litLengthFreq[u] = 1;
-        optPtr->litLengthSum = MaxLL+1;
-        for (u=0; u<=MaxML; u++)
-            optPtr->matchLengthFreq[u] = 1;
-        optPtr->matchLengthSum = MaxML+1;
-        for (u=0; u<=MaxOff; u++)
-            optPtr->offCodeFreq[u] = 1;
-        optPtr->offCodeSum = (MaxOff+1);
-
-    } else {
-        unsigned u;
+    } else {   /* new block : re-use previous statistics, scaled down */
 
-        optPtr->litSum = 0;
-        for (u=0; u<=MaxLit; u++) {
-            optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> (ZSTD_FREQ_DIV+1));
-            optPtr->litSum += optPtr->litFreq[u];
-        }
-        optPtr->litLengthSum = 0;
-        for (u=0; u<=MaxLL; u++) {
-            optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
-            optPtr->litLengthSum += optPtr->litLengthFreq[u];
-        }
-        optPtr->matchLengthSum = 0;
-        for (u=0; u<=MaxML; u++) {
-            optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
-            optPtr->matchLengthSum += optPtr->matchLengthFreq[u];
-        }
-        optPtr->offCodeSum = 0;
-        for (u=0; u<=MaxOff; u++) {
-            optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
-            optPtr->offCodeSum += optPtr->offCodeFreq[u];
-        }
+        optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+        optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
+        optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
+        optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
     }
 
-    ZSTD_setLog2Prices(optPtr);
+    ZSTD_setBasePrices(optPtr, optLevel);
 }
 
-
 /* ZSTD_rawLiteralsCost() :
- * cost of literals (only) in given segment (which length can be null)
- * does not include cost of literalLength symbol */
+ * price of literals (only) in specified segment (which length can be 0).
+ * does not include price of literalLength symbol */
 static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
-                                const optState_t* const optPtr)
+                                const optState_t* const optPtr,
+                                int optLevel)
 {
-    if (optPtr->staticPrices) return (litLength*6);  /* 6 bit per literal - no statistic used */
     if (litLength == 0) return 0;
+    if (optPtr->priceType == zop_predef)
+        return (litLength*6) * BITCOST_MULTIPLIER;  /* 6 bit per literal - no statistic used */
 
-    /* literals */
-    {   U32 u;
-        U32 cost = litLength * optPtr->log2litSum;
-        for (u=0; u < litLength; u++)
-            cost -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1);
-        return cost;
+    /* dynamic statistics */
+    {   U32 price = litLength * optPtr->litSumBasePrice;
+        U32 u;
+        for (u=0; u < litLength; u++) {
+            assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice);   /* literal cost should never be negative */
+            price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
+        }
+        return price;
     }
 }
 
 /* ZSTD_litLengthPrice() :
  * cost of literalLength symbol */
-static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr)
+static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
 {
-    if (optPtr->staticPrices) return ZSTD_highbit32((U32)litLength+1);
+    if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
 
-    /* literal Length */
+    /* dynamic statistics */
     {   U32 const llCode = ZSTD_LLcode(litLength);
-        U32 const price = LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
-        return price;
+        return (LL_bits[llCode] * BITCOST_MULTIPLIER) + (optPtr->litLengthSumBasePrice - WEIGHT(optPtr->litLengthFreq[llCode], optLevel));
     }
 }
 
-/* ZSTD_litLengthPrice() :
- * cost of the literal part of a sequence,
- * including literals themselves, and literalLength symbol */
-static U32 ZSTD_fullLiteralsCost(const BYTE* const literals, U32 const litLength,
-                                 const optState_t* const optPtr)
-{
-    return ZSTD_rawLiteralsCost(literals, litLength, optPtr)
-         + ZSTD_litLengthPrice(litLength, optPtr);
-}
-
 /* ZSTD_litLengthContribution() :
  * @return ( cost(litlength) - cost(0) )
  * this value can then be added to rawLiteralsCost()
  * to provide a cost which is directly comparable to a match ending at same position */
-static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr)
+static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr, int optLevel)
 {
-    if (optPtr->staticPrices) return ZSTD_highbit32(litLength+1);
+    if (optPtr->priceType >= zop_predef) return WEIGHT(litLength, optLevel);
 
-    /* literal Length */
+    /* dynamic statistics */
     {   U32 const llCode = ZSTD_LLcode(litLength);
-        int const contribution = LL_bits[llCode]
-                        + ZSTD_highbit32(optPtr->litLengthFreq[0]+1)
-                        - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
+        int const contribution = (LL_bits[llCode] * BITCOST_MULTIPLIER)
+                               + WEIGHT(optPtr->litLengthFreq[0], optLevel)   /* note: log2litLengthSum cancel out */
+                               - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
 #if 1
         return contribution;
 #else
@@ -155,10 +238,11 @@
  * which can be compared to the ending cost of a match
  * should a new match start at this position */
 static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength,
-                                     const optState_t* const optPtr)
+                                     const optState_t* const optPtr,
+                                     int optLevel)
 {
-    int const contribution = ZSTD_rawLiteralsCost(literals, litLength, optPtr)
-                           + ZSTD_litLengthContribution(litLength, optPtr);
+    int const contribution = ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel)
+                           + ZSTD_litLengthContribution(litLength, optPtr, optLevel);
     return contribution;
 }
 
@@ -166,31 +250,38 @@
  * Provides the cost of the match part (offset + matchLength) of a sequence
  * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
  * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
-FORCE_INLINE_TEMPLATE U32 ZSTD_getMatchPrice(
-                                    U32 const offset, U32 const matchLength,
-                                    const optState_t* const optPtr,
-                                    int const optLevel)
+FORCE_INLINE_TEMPLATE U32
+ZSTD_getMatchPrice(U32 const offset,
+                   U32 const matchLength,
+                   const optState_t* const optPtr,
+                   int const optLevel)
 {
     U32 price;
     U32 const offCode = ZSTD_highbit32(offset+1);
     U32 const mlBase = matchLength - MINMATCH;
     assert(matchLength >= MINMATCH);
 
-    if (optPtr->staticPrices)  /* fixed scheme, do not use statistics */
-        return ZSTD_highbit32((U32)mlBase+1) + 16 + offCode;
+    if (optPtr->priceType == zop_predef)  /* fixed scheme, do not use statistics */
+        return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
 
-    price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1);
-    if ((optLevel<2) /*static*/ && offCode >= 20) price += (offCode-19)*2; /* handicap for long distance offsets, favor decompression speed */
+    /* dynamic statistics */
+    price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
+    if ((optLevel<2) /*static*/ && offCode >= 20)
+        price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
 
     /* match Length */
     {   U32 const mlCode = ZSTD_MLcode(mlBase);
-        price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1);
+        price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
     }
 
+    price += BITCOST_MULTIPLIER / 5;   /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
+
     DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
     return price;
 }
 
+/* ZSTD_updateStats() :
+ * assumption : literals + litLengtn <= iend */
 static void ZSTD_updateStats(optState_t* const optPtr,
                              U32 litLength, const BYTE* literals,
                              U32 offsetCode, U32 matchLength)
@@ -269,10 +360,11 @@
  *  ip : assumed <= iend-8 .
  * @return : nb of positions added */
 static U32 ZSTD_insertBt1(
-                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                ZSTD_matchState_t* ms,
                 const BYTE* const ip, const BYTE* const iend,
-                U32 const mls, U32 const extDict)
+                U32 const mls, const int extDict)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32*   const hashTable = ms->hashTable;
     U32    const hashLog = cParams->hashLog;
     size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
@@ -293,6 +385,7 @@
     U32* largerPtr  = smallerPtr + 1;
     U32 dummy32;   /* to be nullified at the end */
     U32 const windowLow = ms->window.lowLimit;
+    U32 const matchLow = windowLow ? windowLow : 1;
     U32 matchEndIdx = current+8+1;
     size_t bestLength = 8;
     U32 nbCompares = 1U << cParams->searchLog;
@@ -308,7 +401,7 @@
     assert(ip <= iend-8);   /* required for h calculation */
     hashTable[h] = current;   /* Update Hash Table */
 
-    while (nbCompares-- && (matchIndex > windowLow)) {
+    while (nbCompares-- && (matchIndex >= matchLow)) {
         U32* const nextPtr = bt + 2*(matchIndex & btMask);
         size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
         assert(matchIndex < current);
@@ -334,8 +427,8 @@
         }
 #endif
 
-        if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
-            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if extDict is incorrectly set to 0 */
+        if (!extDict || (matchIndex+matchLength >= dictLimit)) {
+            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if actually extDict */
             match = base + matchIndex;
             matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
         } else {
@@ -379,35 +472,33 @@
 
 FORCE_INLINE_TEMPLATE
 void ZSTD_updateTree_internal(
-                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                ZSTD_matchState_t* ms,
                 const BYTE* const ip, const BYTE* const iend,
-                const U32 mls, const U32 extDict)
+                const U32 mls, const ZSTD_dictMode_e dictMode)
 {
     const BYTE* const base = ms->window.base;
     U32 const target = (U32)(ip - base);
     U32 idx = ms->nextToUpdate;
-    DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u  (extDict:%u)",
-                idx, target, extDict);
+    DEBUGLOG(5, "ZSTD_updateTree_internal, from %u to %u  (dictMode:%u)",
+                idx, target, dictMode);
 
     while(idx < target)
-        idx += ZSTD_insertBt1(ms, cParams, base+idx, iend, mls, extDict);
+        idx += ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
     ms->nextToUpdate = target;
 }
 
-void ZSTD_updateTree(
-                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                const BYTE* ip, const BYTE* iend)
-{
-    ZSTD_updateTree_internal(ms, cParams, ip, iend, cParams->searchLength, 0 /*extDict*/);
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
+    ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.searchLength, ZSTD_noDict);
 }
 
 FORCE_INLINE_TEMPLATE
 U32 ZSTD_insertBtAndGetAllMatches (
-                    ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                    const BYTE* const ip, const BYTE* const iLimit, int const extDict,
+                    ZSTD_matchState_t* ms,
+                    const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
                     U32 rep[ZSTD_REP_NUM], U32 const ll0,
                     ZSTD_match_t* matches, const U32 lengthToBeat, U32 const mls /* template */)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
     const BYTE* const base = ms->window.base;
     U32 const current = (U32)(ip-base);
@@ -426,6 +517,7 @@
     const BYTE* const prefixStart = base + dictLimit;
     U32 const btLow = btMask >= current ? 0 : current - btMask;
     U32 const windowLow = ms->window.lowLimit;
+    U32 const matchLow = windowLow ? windowLow : 1;
     U32* smallerPtr = bt + 2*(current&btMask);
     U32* largerPtr  = bt + 2*(current&btMask) + 1;
     U32 matchEndIdx = current+8+1;   /* farthest referenced position of any match => detects repetitive patterns */
@@ -433,8 +525,21 @@
     U32 mnum = 0;
     U32 nbCompares = 1U << cParams->searchLog;
 
+    const ZSTD_matchState_t* dms    = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
+    const ZSTD_compressionParameters* const dmsCParams =
+                                      dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
+    const BYTE* const dmsBase       = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
+    const BYTE* const dmsEnd        = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
+    U32         const dmsHighLimit  = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
+    U32         const dmsLowLimit   = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
+    U32         const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
+    U32         const dmsHashLog    = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
+    U32         const dmsBtLog      = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
+    U32         const dmsBtMask     = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
+    U32         const dmsBtLow      = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
+
     size_t bestLength = lengthToBeat-1;
-    DEBUGLOG(7, "ZSTD_insertBtAndGetAllMatches");
+    DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", current);
 
     /* check repCode */
     {   U32 const lastR = ZSTD_REP_NUM + ll0;
@@ -449,18 +554,26 @@
                     repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
                 }
             } else {  /* repIndex < dictLimit || repIndex >= current */
-                const BYTE* const repMatch = dictBase + repIndex;
+                const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
+                                             dmsBase + repIndex - dmsIndexDelta :
+                                             dictBase + repIndex;
                 assert(current >= windowLow);
-                if ( extDict /* this case only valid in extDict mode */
+                if ( dictMode == ZSTD_extDict
                   && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow)  /* equivalent to `current > repIndex >= windowLow` */
                      & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
                   && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
                     repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
+                }
+                if (dictMode == ZSTD_dictMatchState
+                  && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta))  /* equivalent to `current > repIndex >= dmsLowLimit` */
+                     & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
+                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
             }   }
             /* save longer solution */
             if (repLen > bestLength) {
-                DEBUGLOG(8, "found rep-match %u of length %u",
-                            repCode - ll0, (U32)repLen);
+                DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
+                            repCode, ll0, repOffset, repLen);
                 bestLength = repLen;
                 matches[mnum].off = repCode - ll0;
                 matches[mnum].len = (U32)repLen;
@@ -473,10 +586,10 @@
     /* HC3 match finder */
     if ((mls == 3) /*static*/ && (bestLength < mls)) {
         U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, ip);
-        if ((matchIndex3 > windowLow)
+        if ((matchIndex3 >= matchLow)
           & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
             size_t mlen;
-            if ((!extDict) /*static*/ || (matchIndex3 >= dictLimit)) {
+            if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
                 const BYTE* const match = base + matchIndex3;
                 mlen = ZSTD_count(ip, match, iLimit);
             } else {
@@ -498,17 +611,21 @@
                      (ip+mlen == iLimit) ) {  /* best possible length */
                     ms->nextToUpdate = current+1;  /* skip insertion */
                     return 1;
-    }   }   }   }
+                }
+            }
+        }
+        /* no dictMatchState lookup: dicts don't have a populated HC3 table */
+    }
 
     hashTable[h] = current;   /* Update Hash Table */
 
-    while (nbCompares-- && (matchIndex > windowLow)) {
+    while (nbCompares-- && (matchIndex >= matchLow)) {
         U32* const nextPtr = bt + 2*(matchIndex & btMask);
         size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
         const BYTE* match;
         assert(current > matchIndex);
 
-        if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+        if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
             assert(matchIndex+matchLength >= dictLimit);  /* ensure the condition is correct when !extDict */
             match = base + matchIndex;
             matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
@@ -520,8 +637,8 @@
         }
 
         if (matchLength > bestLength) {
-            DEBUGLOG(8, "found match of length %u at distance %u",
-                    (U32)matchLength, current - matchIndex);
+            DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
+                    (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
             assert(matchEndIdx > matchIndex);
             if (matchLength > matchEndIdx - matchIndex)
                 matchEndIdx = matchIndex + (U32)matchLength;
@@ -529,9 +646,10 @@
             matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
             matches[mnum].len = (U32)matchLength;
             mnum++;
-            if (matchLength > ZSTD_OPT_NUM) break;
-            if (ip+matchLength == iLimit) {  /* equal : no way to know if inf or sup */
-                break;   /* drop, to preserve bt consistency (miss a little bit of compression) */
+            if ( (matchLength > ZSTD_OPT_NUM)
+               | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+                if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
+                break; /* drop, to preserve bt consistency (miss a little bit of compression) */
             }
         }
 
@@ -552,6 +670,47 @@
 
     *smallerPtr = *largerPtr = 0;
 
+    if (dictMode == ZSTD_dictMatchState && nbCompares) {
+        size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
+        U32 dictMatchIndex = dms->hashTable[dmsH];
+        const U32* const dmsBt = dms->chainTable;
+        commonLengthSmaller = commonLengthLarger = 0;
+        while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {
+            const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
+            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
+            const BYTE* match = dmsBase + dictMatchIndex;
+            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
+            if (dictMatchIndex+matchLength >= dmsHighLimit)
+                match = base + dictMatchIndex + dmsIndexDelta;   /* to prepare for next usage of match[matchLength] */
+
+            if (matchLength > bestLength) {
+                matchIndex = dictMatchIndex + dmsIndexDelta;
+                DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
+                        (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
+                if (matchLength > matchEndIdx - matchIndex)
+                    matchEndIdx = matchIndex + (U32)matchLength;
+                bestLength = matchLength;
+                matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
+                matches[mnum].len = (U32)matchLength;
+                mnum++;
+                if ( (matchLength > ZSTD_OPT_NUM)
+                   | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
+                }
+            }
+
+            if (dictMatchIndex <= dmsBtLow) { break; }   /* beyond tree size, stop the search */
+            if (match[matchLength] < ip[matchLength]) {
+                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
+                dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
+            } else {
+                /* match is larger than current */
+                commonLengthLarger = matchLength;
+                dictMatchIndex = nextPtr[0];
+            }
+        }
+    }
+
     assert(matchEndIdx > current+8);
     ms->nextToUpdate = matchEndIdx - 8;  /* skip repetitive patterns */
     return mnum;
@@ -559,23 +718,24 @@
 
 
 FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
-                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-                        const BYTE* ip, const BYTE* const iHighLimit, int const extDict,
+                        ZSTD_matchState_t* ms,
+                        const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
                         U32 rep[ZSTD_REP_NUM], U32 const ll0,
                         ZSTD_match_t* matches, U32 const lengthToBeat)
 {
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
     U32 const matchLengthSearch = cParams->searchLength;
-    DEBUGLOG(7, "ZSTD_BtGetAllMatches");
+    DEBUGLOG(8, "ZSTD_BtGetAllMatches");
     if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
-    ZSTD_updateTree_internal(ms, cParams, ip, iHighLimit, matchLengthSearch, extDict);
+    ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
     switch(matchLengthSearch)
     {
-    case 3 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 3);
+    case 3 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 3);
     default :
-    case 4 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 4);
-    case 5 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 5);
+    case 4 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 4);
+    case 5 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 5);
     case 7 :
-    case 6 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 6);
+    case 6 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 6);
     }
 }
 
@@ -587,7 +747,7 @@
     U32 rep[3];
 } repcodes_t;
 
-repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
+static repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
 {
     repcodes_t newReps;
     if (offset >= ZSTD_REP_NUM) {  /* full offset */
@@ -609,65 +769,17 @@
 }
 
 
-typedef struct {
-    const BYTE* anchor;
-    U32 litlen;
-    U32 rawLitCost;
-} cachedLiteralPrice_t;
-
-static U32 ZSTD_rawLiteralsCost_cached(
-                            cachedLiteralPrice_t* const cachedLitPrice,
-                            const BYTE* const anchor, U32 const litlen,
-                            const optState_t* const optStatePtr)
+static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
 {
-    U32 startCost;
-    U32 remainingLength;
-    const BYTE* startPosition;
-
-    if (anchor == cachedLitPrice->anchor) {
-        startCost = cachedLitPrice->rawLitCost;
-        startPosition = anchor + cachedLitPrice->litlen;
-        assert(litlen >= cachedLitPrice->litlen);
-        remainingLength = litlen - cachedLitPrice->litlen;
-    } else {
-        startCost = 0;
-        startPosition = anchor;
-        remainingLength = litlen;
-    }
-
-    {   U32 const rawLitCost = startCost + ZSTD_rawLiteralsCost(startPosition, remainingLength, optStatePtr);
-        cachedLitPrice->anchor = anchor;
-        cachedLitPrice->litlen = litlen;
-        cachedLitPrice->rawLitCost = rawLitCost;
-        return rawLitCost;
-    }
+    return sol.litlen + sol.mlen;
 }
 
-static U32 ZSTD_fullLiteralsCost_cached(
-                            cachedLiteralPrice_t* const cachedLitPrice,
-                            const BYTE* const anchor, U32 const litlen,
-                            const optState_t* const optStatePtr)
-{
-    return ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr)
-         + ZSTD_litLengthPrice(litlen, optStatePtr);
-}
-
-static int ZSTD_literalsContribution_cached(
-                            cachedLiteralPrice_t* const cachedLitPrice,
-                            const BYTE* const anchor, U32 const litlen,
-                            const optState_t* const optStatePtr)
-{
-    int const contribution = ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr)
-                           + ZSTD_litLengthContribution(litlen, optStatePtr);
-    return contribution;
-}
-
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,seqStore_t* seqStore,
-                                      U32 rep[ZSTD_REP_NUM],
-                                      ZSTD_compressionParameters const* cParams,
-                                      const void* src, size_t srcSize,
-                                      const int optLevel, const int extDict)
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+                               seqStore_t* seqStore,
+                               U32 rep[ZSTD_REP_NUM],
+                               const void* src, size_t srcSize,
+                               const int optLevel, const ZSTD_dictMode_e dictMode)
 {
     optState_t* const optStatePtr = &ms->opt;
     const BYTE* const istart = (const BYTE*)src;
@@ -677,72 +789,76 @@
     const BYTE* const ilimit = iend - 8;
     const BYTE* const base = ms->window.base;
     const BYTE* const prefixStart = base + ms->window.dictLimit;
+    const ZSTD_compressionParameters* const cParams = &ms->cParams;
 
     U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
     U32 const minMatch = (cParams->searchLength == 3) ? 3 : 4;
 
     ZSTD_optimal_t* const opt = optStatePtr->priceTable;
     ZSTD_match_t* const matches = optStatePtr->matchTable;
-    cachedLiteralPrice_t cachedLitPrice;
+    ZSTD_optimal_t lastSequence;
 
     /* init */
     DEBUGLOG(5, "ZSTD_compressBlock_opt_generic");
+    assert(optLevel <= 2);
     ms->nextToUpdate3 = ms->nextToUpdate;
-    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
+    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
     ip += (ip==prefixStart);
-    memset(&cachedLitPrice, 0, sizeof(cachedLitPrice));
 
     /* Match Loop */
     while (ip < ilimit) {
         U32 cur, last_pos = 0;
-        U32 best_mlen, best_off;
 
         /* find first match */
         {   U32 const litlen = (U32)(ip - anchor);
             U32 const ll0 = !litlen;
-            U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, ip, iend, extDict, rep, ll0, matches, minMatch);
+            U32 const nbMatches = ZSTD_BtGetAllMatches(ms, ip, iend, dictMode, rep, ll0, matches, minMatch);
             if (!nbMatches) { ip++; continue; }
 
             /* initialize opt[0] */
             { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
-            opt[0].mlen = 1;
+            opt[0].mlen = 0;  /* means is_a_literal */
             opt[0].litlen = litlen;
+            opt[0].price = ZSTD_literalsContribution(anchor, litlen, optStatePtr, optLevel);
 
             /* large match -> immediate encoding */
             {   U32 const maxML = matches[nbMatches-1].len;
-                DEBUGLOG(7, "found %u matches of maxLength=%u and offset=%u at cPos=%u => start new serie",
-                            nbMatches, maxML, matches[nbMatches-1].off, (U32)(ip-prefixStart));
+                U32 const maxOffset = matches[nbMatches-1].off;
+                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new serie",
+                            nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
 
                 if (maxML > sufficient_len) {
-                    best_mlen = maxML;
-                    best_off = matches[nbMatches-1].off;
-                    DEBUGLOG(7, "large match (%u>%u), immediate encoding",
-                                best_mlen, sufficient_len);
+                    lastSequence.litlen = litlen;
+                    lastSequence.mlen = maxML;
+                    lastSequence.off = maxOffset;
+                    DEBUGLOG(6, "large match (%u>%u), immediate encoding",
+                                maxML, sufficient_len);
                     cur = 0;
-                    last_pos = 1;
+                    last_pos = ZSTD_totalLen(lastSequence);
                     goto _shortestPath;
             }   }
 
             /* set prices for first matches starting position == 0 */
-            {   U32 const literalsPrice = ZSTD_fullLiteralsCost_cached(&cachedLitPrice, anchor, litlen, optStatePtr);
+            {   U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
                 U32 pos;
                 U32 matchNb;
-                for (pos = 0; pos < minMatch; pos++) {
-                    opt[pos].mlen = 1;
-                    opt[pos].price = ZSTD_MAX_PRICE;
+                for (pos = 1; pos < minMatch; pos++) {
+                    opt[pos].price = ZSTD_MAX_PRICE;   /* mlen, litlen and price will be fixed during forward scanning */
                 }
                 for (matchNb = 0; matchNb < nbMatches; matchNb++) {
                     U32 const offset = matches[matchNb].off;
                     U32 const end = matches[matchNb].len;
                     repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0);
                     for ( ; pos <= end ; pos++ ) {
-                        U32 const matchPrice = literalsPrice + ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
-                        DEBUGLOG(7, "rPos:%u => set initial price : %u",
-                                    pos, matchPrice);
+                        U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
+                        U32 const sequencePrice = literalsPrice + matchPrice;
+                        DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
+                                    pos, ZSTD_fCost(sequencePrice));
                         opt[pos].mlen = pos;
                         opt[pos].off = offset;
                         opt[pos].litlen = litlen;
-                        opt[pos].price = matchPrice;
+                        opt[pos].price = sequencePrice;
+                        ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
                         memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
                 }   }
                 last_pos = pos-1;
@@ -753,55 +869,67 @@
         for (cur = 1; cur <= last_pos; cur++) {
             const BYTE* const inr = ip + cur;
             assert(cur < ZSTD_OPT_NUM);
+            DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
 
             /* Fix current position with one literal if cheaper */
-            {   U32 const litlen = (opt[cur-1].mlen == 1) ? opt[cur-1].litlen + 1 : 1;
-                int price;  /* note : contribution can be negative */
-                if (cur > litlen) {
-                    price = opt[cur - litlen].price + ZSTD_literalsContribution(inr-litlen, litlen, optStatePtr);
-                } else {
-                    price = ZSTD_literalsContribution_cached(&cachedLitPrice, anchor, litlen, optStatePtr);
-                }
+            {   U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
+                int const price = opt[cur-1].price
+                                + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+                                + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
+                                - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
                 assert(price < 1000000000); /* overflow check */
                 if (price <= opt[cur].price) {
-                    DEBUGLOG(7, "rPos:%u : better price (%u<%u) using literal",
-                                cur, price, opt[cur].price);
-                    opt[cur].mlen = 1;
+                    DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
+                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
+                                opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
+                    opt[cur].mlen = 0;
                     opt[cur].off = 0;
                     opt[cur].litlen = litlen;
                     opt[cur].price = price;
                     memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep));
-            }   }
+                } else {
+                    DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
+                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
+                                opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
+                }
+            }
 
             /* last match must start at a minimum distance of 8 from oend */
             if (inr > ilimit) continue;
 
             if (cur == last_pos) break;
 
-             if ( (optLevel==0) /*static*/
-               && (opt[cur+1].price <= opt[cur].price) )
+            if ( (optLevel==0) /*static_test*/
+              && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
+                DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
                 continue;  /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
+            }
 
-            {   U32 const ll0 = (opt[cur].mlen != 1);
-                U32 const litlen = (opt[cur].mlen == 1) ? opt[cur].litlen : 0;
-                U32 const previousPrice = (cur > litlen) ? opt[cur-litlen].price : 0;
-                U32 const basePrice = previousPrice + ZSTD_fullLiteralsCost(inr-litlen, litlen, optStatePtr);
-                U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, inr, iend, extDict, opt[cur].rep, ll0, matches, minMatch);
+            {   U32 const ll0 = (opt[cur].mlen != 0);
+                U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
+                U32 const previousPrice = opt[cur].price;
+                U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+                U32 const nbMatches = ZSTD_BtGetAllMatches(ms, inr, iend, dictMode, opt[cur].rep, ll0, matches, minMatch);
                 U32 matchNb;
-                if (!nbMatches) continue;
+                if (!nbMatches) {
+                    DEBUGLOG(7, "rPos:%u : no match found", cur);
+                    continue;
+                }
 
                 {   U32 const maxML = matches[nbMatches-1].len;
-                    DEBUGLOG(7, "rPos:%u, found %u matches, of maxLength=%u",
-                                cur, nbMatches, maxML);
+                    DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
+                                inr-istart, cur, nbMatches, maxML);
 
                     if ( (maxML > sufficient_len)
-                       | (cur + maxML >= ZSTD_OPT_NUM) ) {
-                        best_mlen = maxML;
-                        best_off = matches[nbMatches-1].off;
-                        last_pos = cur + 1;
+                      || (cur + maxML >= ZSTD_OPT_NUM) ) {
+                        lastSequence.mlen = maxML;
+                        lastSequence.off = matches[nbMatches-1].off;
+                        lastSequence.litlen = litlen;
+                        cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0;  /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
+                        last_pos = cur + ZSTD_totalLen(lastSequence);
+                        if (cur > ZSTD_OPT_NUM) cur = 0;   /* underflow => first match */
                         goto _shortestPath;
-                    }
-                }
+                }   }
 
                 /* set prices using matches found at position == cur */
                 for (matchNb = 0; matchNb < nbMatches; matchNb++) {
@@ -811,81 +939,97 @@
                     U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
                     U32 mlen;
 
-                    DEBUGLOG(7, "testing match %u => offCode=%u, mlen=%u, llen=%u",
+                    DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
                                 matchNb, matches[matchNb].off, lastML, litlen);
 
-                    for (mlen = lastML; mlen >= startML; mlen--) {
+                    for (mlen = lastML; mlen >= startML; mlen--) {  /* scan downward */
                         U32 const pos = cur + mlen;
                         int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
 
                         if ((pos > last_pos) || (price < opt[pos].price)) {
-                            DEBUGLOG(7, "rPos:%u => new better price (%u<%u)",
-                                        pos, price, opt[pos].price);
-                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }
+                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
+                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }   /* fill empty positions */
                             opt[pos].mlen = mlen;
                             opt[pos].off = offset;
                             opt[pos].litlen = litlen;
                             opt[pos].price = price;
+                            ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
                             memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
                         } else {
-                            if (optLevel==0) break;  /* gets ~+10% speed for about -0.01 ratio loss */
+                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
+                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+                            if (optLevel==0) break;  /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
                         }
             }   }   }
         }  /* for (cur = 1; cur <= last_pos; cur++) */
 
-        best_mlen = opt[last_pos].mlen;
-        best_off = opt[last_pos].off;
-        cur = last_pos - best_mlen;
+        lastSequence = opt[last_pos];
+        cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0;  /* single sequence, and it starts before `ip` */
+        assert(cur < ZSTD_OPT_NUM);  /* control overflow*/
 
 _shortestPath:   /* cur, last_pos, best_mlen, best_off have to be set */
-        assert(opt[0].mlen == 1);
+        assert(opt[0].mlen == 0);
+
+        {   U32 const storeEnd = cur + 1;
+            U32 storeStart = storeEnd;
+            U32 seqPos = cur;
 
-        /* reverse traversal */
-        DEBUGLOG(7, "start reverse traversal (last_pos:%u, cur:%u)",
-                    last_pos, cur);
-        {   U32 selectedMatchLength = best_mlen;
-            U32 selectedOffset = best_off;
-            U32 pos = cur;
-            while (1) {
-                U32 const mlen = opt[pos].mlen;
-                U32 const off = opt[pos].off;
-                opt[pos].mlen = selectedMatchLength;
-                opt[pos].off = selectedOffset;
-                selectedMatchLength = mlen;
-                selectedOffset = off;
-                if (mlen > pos) break;
-                pos -= mlen;
-        }   }
+            DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
+                        last_pos, cur); (void)last_pos;
+            assert(storeEnd < ZSTD_OPT_NUM);
+            DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+                        storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
+            opt[storeEnd] = lastSequence;
+            while (seqPos > 0) {
+                U32 const backDist = ZSTD_totalLen(opt[seqPos]);
+                storeStart--;
+                DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+                            seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
+                opt[storeStart] = opt[seqPos];
+                seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
+            }
 
-        /* save sequences */
-        {   U32 pos;
-            for (pos=0; pos < last_pos; ) {
-                U32 const llen = (U32)(ip - anchor);
-                U32 const mlen = opt[pos].mlen;
-                U32 const offset = opt[pos].off;
-                if (mlen == 1) { ip++; pos++; continue; }  /* literal position => move on */
-                pos += mlen; ip += mlen;
+            /* save sequences */
+            DEBUGLOG(6, "sending selected sequences into seqStore")
+            {   U32 storePos;
+                for (storePos=storeStart; storePos <= storeEnd; storePos++) {
+                    U32 const llen = opt[storePos].litlen;
+                    U32 const mlen = opt[storePos].mlen;
+                    U32 const offCode = opt[storePos].off;
+                    U32 const advance = llen + mlen;
+                    DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
+                                anchor - istart, llen, mlen);
+
+                    if (mlen==0) {  /* only literals => must be last "sequence", actually starting a new stream of sequences */
+                        assert(storePos == storeEnd);   /* must be last sequence */
+                        ip = anchor + llen;     /* last "sequence" is a bunch of literals => don't progress anchor */
+                        continue;   /* will finish */
+                    }
 
-                /* repcodes update : like ZSTD_updateRep(), but update in place */
-                if (offset >= ZSTD_REP_NUM) {  /* full offset */
-                    rep[2] = rep[1];
-                    rep[1] = rep[0];
-                    rep[0] = offset - ZSTD_REP_MOVE;
-                } else {   /* repcode */
-                    U32 const repCode = offset + (llen==0);
-                    if (repCode) {  /* note : if repCode==0, no change */
-                        U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
-                        if (repCode >= 2) rep[2] = rep[1];
+                    /* repcodes update : like ZSTD_updateRep(), but update in place */
+                    if (offCode >= ZSTD_REP_NUM) {  /* full offset */
+                        rep[2] = rep[1];
                         rep[1] = rep[0];
-                        rep[0] = currentOffset;
-                    }
-                }
+                        rep[0] = offCode - ZSTD_REP_MOVE;
+                    } else {   /* repcode */
+                        U32 const repCode = offCode + (llen==0);
+                        if (repCode) {  /* note : if repCode==0, no change */
+                            U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+                            if (repCode >= 2) rep[2] = rep[1];
+                            rep[1] = rep[0];
+                            rep[0] = currentOffset;
+                    }   }
 
-                ZSTD_updateStats(optStatePtr, llen, anchor, offset, mlen);
-                ZSTD_storeSeq(seqStore, llen, anchor, offset, mlen-MINMATCH);
-                anchor = ip;
-        }   }
-        ZSTD_setLog2Prices(optStatePtr);
+                    assert(anchor + llen <= iend);
+                    ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
+                    ZSTD_storeSeq(seqStore, llen, anchor, offCode, mlen-MINMATCH);
+                    anchor += advance;
+                    ip = anchor;
+            }   }
+            ZSTD_setBasePrices(optStatePtr, optLevel);
+        }
+
     }   /* while (ip < ilimit) */
 
     /* Return the last literals size */
@@ -895,29 +1039,94 @@
 
 size_t ZSTD_compressBlock_btopt(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        const void* src, size_t srcSize)
 {
     DEBUGLOG(5, "ZSTD_compressBlock_btopt");
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 0 /*extDict*/);
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
+}
+
+
+/* used in 2-pass strategy */
+static U32 ZSTD_upscaleStat(U32* table, U32 lastEltIndex, int bonus)
+{
+    U32 s, sum=0;
+    assert(ZSTD_FREQ_DIV+bonus > 0);
+    for (s=0; s<=lastEltIndex; s++) {
+        table[s] <<= ZSTD_FREQ_DIV+bonus;
+        table[s]--;
+        sum += table[s];
+    }
+    return sum;
+}
+
+/* used in 2-pass strategy */
+MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
+{
+    optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
+    optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 1);
+    optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 1);
+    optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 1);
 }
 
 size_t ZSTD_compressBlock_btultra(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        const void* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 0 /*extDict*/);
+    DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
+#if 0
+    /* 2-pass strategy (disabled)
+     * this strategy makes a first pass over first block to collect statistics
+     * and seed next round's statistics with it.
+     * The compression ratio gain is generally small (~0.5% on first block),
+     * the cost is 2x cpu time on first block. */
+    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+    if ( (ms->opt.litLengthSum==0)   /* first block */
+      && (seqStore->sequences == seqStore->sequencesStart)   /* no ldm */
+      && (ms->window.dictLimit == ms->window.lowLimit) ) {   /* no dictionary */
+        U32 tmpRep[ZSTD_REP_NUM];
+        DEBUGLOG(5, "ZSTD_compressBlock_btultra: first block: collecting statistics");
+        assert(ms->nextToUpdate >= ms->window.dictLimit
+            && ms->nextToUpdate <= ms->window.dictLimit + 1);
+        memcpy(tmpRep, rep, sizeof(tmpRep));
+        ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);   /* generate stats into ms->opt*/
+        ZSTD_resetSeqStore(seqStore);
+        /* invalidate first scan from history */
+        ms->window.base -= srcSize;
+        ms->window.dictLimit += (U32)srcSize;
+        ms->window.lowLimit = ms->window.dictLimit;
+        ms->nextToUpdate = ms->window.dictLimit;
+        ms->nextToUpdate3 = ms->window.dictLimit;
+        /* re-inforce weight of collected statistics */
+        ZSTD_upscaleStats(&ms->opt);
+    }
+#endif
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        const void* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        const void* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
 }
 
 size_t ZSTD_compressBlock_btopt_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        const void* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 1 /*extDict*/);
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
 }
 
 size_t ZSTD_compressBlock_btultra_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+        const void* src, size_t srcSize)
 {
-    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 1 /*extDict*/);
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
 }
--- a/contrib/python-zstandard/zstd/compress/zstd_opt.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_opt.h	Mon Oct 22 14:46:06 2018 -0400
@@ -17,23 +17,29 @@
 
 #include "zstd_compress_internal.h"
 
-void ZSTD_updateTree(
-        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
-        const BYTE* ip, const BYTE* iend);  /* used in ZSTD_loadDictionaryContent() */
+/* used in ZSTD_loadDictionaryContent() */
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
 
 size_t ZSTD_compressBlock_btopt(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_btultra(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize);
 
 size_t ZSTD_compressBlock_btopt_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 size_t ZSTD_compressBlock_btultra_extDict(
         ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
-        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+        void const* src, size_t srcSize);
 
 #if defined (__cplusplus)
 }
--- a/contrib/python-zstandard/zstd/compress/zstdmt_compress.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstdmt_compress.c	Mon Oct 22 14:46:06 2018 -0400
@@ -37,18 +37,19 @@
 #define ZSTD_RESIZE_SEQPOOL 0
 
 /* ======   Debug   ====== */
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
+    && !defined(_MSC_VER) \
+    && !defined(__MINGW32__)
 
 #  include <stdio.h>
 #  include <unistd.h>
 #  include <sys/times.h>
-#  define DEBUGLOGRAW(l, ...) if (l<=ZSTD_DEBUG) { fprintf(stderr, __VA_ARGS__); }
 
 #  define DEBUG_PRINTHEX(l,p,n) {            \
     unsigned debug_u;                        \
     for (debug_u=0; debug_u<(n); debug_u++)  \
-        DEBUGLOGRAW(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
-    DEBUGLOGRAW(l, " \n");                   \
+        RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
+    RAWLOG(l, " \n");                        \
 }
 
 static unsigned long long GetCurrentClockTimeMicroseconds(void)
@@ -62,7 +63,7 @@
 
 #define MUTEX_WAIT_TIME_DLEVEL 6
 #define ZSTD_PTHREAD_MUTEX_LOCK(mutex) {          \
-    if (ZSTD_DEBUG >= MUTEX_WAIT_TIME_DLEVEL) {   \
+    if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) {   \
         unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
         ZSTD_pthread_mutex_lock(mutex);           \
         {   unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
@@ -160,6 +161,25 @@
     ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
 }
 
+
+static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers)
+{
+    unsigned const maxNbBuffers = 2*nbWorkers + 3;
+    if (srcBufPool==NULL) return NULL;
+    if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
+        return srcBufPool;
+    /* need a larger buffer pool */
+    {   ZSTD_customMem const cMem = srcBufPool->cMem;
+        size_t const bSize = srcBufPool->bufferSize;   /* forward parameters */
+        ZSTDMT_bufferPool* newBufPool;
+        ZSTDMT_freeBufferPool(srcBufPool);
+        newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+        if (newBufPool==NULL) return newBufPool;
+        ZSTDMT_setBufferSize(newBufPool, bSize);
+        return newBufPool;
+    }
+}
+
 /** ZSTDMT_getBuffer() :
  *  assumption : bufPool must be valid
  * @return : a buffer, with start pointer and size
@@ -229,8 +249,8 @@
 /* store buffer for later re-use, up to pool capacity */
 static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
 {
+    DEBUGLOG(5, "ZSTDMT_releaseBuffer");
     if (buf.start == NULL) return;   /* compatible with release on NULL */
-    DEBUGLOG(5, "ZSTDMT_releaseBuffer");
     ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
     if (bufPool->nbBuffers < bufPool->totalBuffers) {
         bufPool->bTable[bufPool->nbBuffers++] = buf;  /* stored for later use */
@@ -300,7 +320,8 @@
 
 static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
 {
-    ZSTDMT_seqPool* seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+    ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+    if (seqPool == NULL) return NULL;
     ZSTDMT_setNbSeq(seqPool, 0);
     return seqPool;
 }
@@ -310,6 +331,10 @@
     ZSTDMT_freeBufferPool(seqPool);
 }
 
+static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
+{
+    return ZSTDMT_expandBufferPool(pool, nbWorkers);
+}
 
 
 /* =====   CCtx Pool   ===== */
@@ -355,6 +380,18 @@
     return cctxPool;
 }
 
+static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
+                                              unsigned nbWorkers)
+{
+    if (srcPool==NULL) return NULL;
+    if (nbWorkers <= srcPool->totalCCtx) return srcPool;   /* good enough */
+    /* need a larger cctx pool */
+    {   ZSTD_customMem const cMem = srcPool->cMem;
+        ZSTDMT_freeCCtxPool(srcPool);
+        return ZSTDMT_createCCtxPool(nbWorkers, cMem);
+    }
+}
+
 /* only works during initialization phase, not during compression */
 static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
 {
@@ -425,12 +462,11 @@
     ZSTD_window_t ldmWindow;  /* A thread-safe copy of ldmState.window */
 } serialState_t;
 
-static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params)
+static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize)
 {
     /* Adjust parameters */
     if (params.ldmParams.enableLdm) {
         DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
-        params.ldmParams.windowLog = params.cParams.windowLog;
         ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
         assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
         assert(params.ldmParams.hashEveryLog < 32);
@@ -453,7 +489,7 @@
             serialState->params.ldmParams.hashLog -
             serialState->params.ldmParams.bucketSizeLog;
         /* Size the seq pool tables */
-        ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, params.jobSize));
+        ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
         /* Reset the window */
         ZSTD_window_clear(&serialState->ldmState.window);
         serialState->ldmWindow = serialState->ldmState.window;
@@ -473,6 +509,7 @@
         memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
     }
     serialState->params = params;
+    serialState->params.jobSize = (U32)jobSize;
     return 0;
 }
 
@@ -505,6 +542,7 @@
     /* Wait for our turn */
     ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
     while (serialState->nextJobID < jobID) {
+        DEBUGLOG(5, "wait for serialState->cond");
         ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
     }
     /* A future job may error and skip our job */
@@ -514,6 +552,7 @@
             size_t error;
             assert(seqStore.seq != NULL && seqStore.pos == 0 &&
                    seqStore.size == 0 && seqStore.capacity > 0);
+            assert(src.size <= serialState->params.jobSize);
             ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
             error = ZSTD_ldm_generateSequences(
                 &serialState->ldmState, &seqStore,
@@ -593,14 +632,32 @@
     unsigned frameChecksumNeeded;        /* used only by mtctx */
 } ZSTDMT_jobDescription;
 
+#define JOB_ERROR(e) {                          \
+    ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);   \
+    job->cSize = e;                             \
+    ZSTD_pthread_mutex_unlock(&job->job_mutex); \
+    goto _endJob;                               \
+}
+
 /* ZSTDMT_compressionJob() is a POOL_function type */
-void ZSTDMT_compressionJob(void* jobDescription)
+static void ZSTDMT_compressionJob(void* jobDescription)
 {
     ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
     ZSTD_CCtx_params jobParams = job->params;   /* do not modify job->params ! copy it, modify the copy */
     ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
     rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
     buffer_t dstBuff = job->dstBuff;
+    size_t lastCBlockSize = 0;
+
+    /* ressources */
+    if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
+    if (dstBuff.start == NULL) {   /* streaming job : doesn't provide a dstBuffer */
+        dstBuff = ZSTDMT_getBuffer(job->bufPool);
+        if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
+        job->dstBuff = dstBuff;   /* this value can be read in ZSTDMT_flush, when it copies the whole job */
+    }
+    if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL)
+        JOB_ERROR(ERROR(memory_allocation));
 
     /* Don't compute the checksum for chunks, since we compute it externally,
      * but write it in the header.
@@ -609,47 +666,31 @@
     /* Don't run LDM for the chunks, since we handle it externally */
     jobParams.ldmParams.enableLdm = 0;
 
-    /* ressources */
-    if (cctx==NULL) {
-        job->cSize = ERROR(memory_allocation);
-        goto _endJob;
-    }
-    if (dstBuff.start == NULL) {   /* streaming job : doesn't provide a dstBuffer */
-        dstBuff = ZSTDMT_getBuffer(job->bufPool);
-        if (dstBuff.start==NULL) {
-            job->cSize = ERROR(memory_allocation);
-            goto _endJob;
-        }
-        job->dstBuff = dstBuff;   /* this value can be read in ZSTDMT_flush, when it copies the whole job */
-    }
 
     /* init */
     if (job->cdict) {
-        size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, job->cdict, jobParams, job->fullFrameSize);
+        size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize);
         assert(job->firstJob);  /* only allowed for first job */
-        if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
+        if (ZSTD_isError(initError)) JOB_ERROR(initError);
     } else {  /* srcStart points at reloaded section */
         U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
         {   size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_p_forceMaxWindow, !job->firstJob);
-            if (ZSTD_isError(forceWindowError)) {
-                job->cSize = forceWindowError;
-                goto _endJob;
-        }   }
+            if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
+        }
         {   size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
                                         job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
+                                        ZSTD_dtlm_fast,
                                         NULL, /*cdict*/
                                         jobParams, pledgedSrcSize);
-            if (ZSTD_isError(initError)) {
-                job->cSize = initError;
-                goto _endJob;
-    }   }   }
+            if (ZSTD_isError(initError)) JOB_ERROR(initError);
+    }   }
 
     /* Perform serial step as early as possible, but after CCtx initialization */
     ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
 
     if (!job->firstJob) {  /* flush and overwrite frame header when it's not first job */
         size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
-        if (ZSTD_isError(hSize)) { job->cSize = hSize; /* save error code */ goto _endJob; }
+        if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
         DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
         ZSTD_invalidateRepCodes(cctx);
     }
@@ -667,7 +708,7 @@
         assert(job->cSize == 0);
         for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
             size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
-            if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
+            if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
             ip += chunkSize;
             op += cSize; assert(op < oend);
             /* stats */
@@ -680,18 +721,16 @@
             ZSTD_pthread_mutex_unlock(&job->job_mutex);
         }
         /* last block */
-        assert(chunkSize > 0); assert((chunkSize & (chunkSize - 1)) == 0);  /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
+        assert(chunkSize > 0);
+        assert((chunkSize & (chunkSize - 1)) == 0);  /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
         if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
             size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
             size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
             size_t const cSize = (job->lastJob) ?
                  ZSTD_compressEnd     (cctx, op, oend-op, ip, lastBlockSize) :
                  ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
-            if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
-            /* stats */
-            ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
-            job->cSize += cSize;
-            ZSTD_pthread_mutex_unlock(&job->job_mutex);
+            if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
+            lastCBlockSize = cSize;
     }   }
 
 _endJob:
@@ -704,7 +743,9 @@
     ZSTDMT_releaseCCtx(job->cctxPool, cctx);
     /* report */
     ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
-    job->consumed = job->src.size;
+    if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
+    job->cSize += lastCBlockSize;
+    job->consumed = job->src.size;  /* when job->consumed == job->src.size , compression job is presumed completed */
     ZSTD_pthread_cond_signal(&job->job_cond);
     ZSTD_pthread_mutex_unlock(&job->job_mutex);
 }
@@ -745,9 +786,9 @@
     ZSTD_CCtx_params params;
     size_t targetSectionSize;
     size_t targetPrefixSize;
-    roundBuff_t roundBuff;
+    int jobReady;        /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
     inBuff_t inBuff;
-    int jobReady;        /* 1 => one job is already prepared, but pool has shortage of workers. Don't create another one. */
+    roundBuff_t roundBuff;
     serialState_t serial;
     unsigned singleBlockingThread;
     unsigned jobIDMask;
@@ -798,6 +839,20 @@
     return jobTable;
 }
 
+static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
+    U32 nbJobs = nbWorkers + 2;
+    if (nbJobs > mtctx->jobIDMask+1) {  /* need more job capacity */
+        ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
+        mtctx->jobIDMask = 0;
+        mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
+        if (mtctx->jobs==NULL) return ERROR(memory_allocation);
+        assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0));  /* ensure nbJobs is a power of 2 */
+        mtctx->jobIDMask = nbJobs - 1;
+    }
+    return 0;
+}
+
+
 /* ZSTDMT_CCtxParam_setNbWorkers():
  * Internal use only */
 size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
@@ -875,7 +930,7 @@
         unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
         ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
         while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
-            DEBUGLOG(5, "waiting for jobCompleted signal from job %u", mtctx->doneJobID);   /* we want to block when waiting for data to flush */
+            DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID);   /* we want to block when waiting for data to flush */
             ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
         }
         ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
@@ -924,6 +979,8 @@
         if ( (value > 0)  /* value==0 => automatic job size */
            & (value < ZSTDMT_JOBSIZE_MIN) )
             value = ZSTDMT_JOBSIZE_MIN;
+        if (value > ZSTDMT_JOBSIZE_MAX)
+            value = ZSTDMT_JOBSIZE_MAX;
         params->jobSize = value;
         return value;
     case ZSTDMT_p_overlapSectionLog :
@@ -950,6 +1007,21 @@
     }
 }
 
+size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned* value)
+{
+    switch (parameter) {
+    case ZSTDMT_p_jobSize:
+        *value = mtctx->params.jobSize;
+        break;
+    case ZSTDMT_p_overlapSectionLog:
+        *value = mtctx->params.overlapSizeLog;
+        break;
+    default:
+        return ERROR(parameter_unsupported);
+    }
+    return 0;
+}
+
 /* Sets parameters relevant to the compression job,
  * initializing others to default values. */
 static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
@@ -960,13 +1032,30 @@
     jobParams.cParams = params.cParams;
     jobParams.fParams = params.fParams;
     jobParams.compressionLevel = params.compressionLevel;
-    jobParams.disableLiteralCompression = params.disableLiteralCompression;
 
     return jobParams;
 }
 
+
+/* ZSTDMT_resize() :
+ * @return : error code if fails, 0 on success */
+static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
+{
+    if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
+    CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
+    mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
+    if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
+    mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
+    if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
+    mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
+    if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
+    ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
+    return 0;
+}
+
+
 /*! ZSTDMT_updateCParams_whileCompressing() :
- *  Updates only a selected set of compression parameters, to remain compatible with current frame.
+ *  Updates a selected set of compression parameters, remaining compatible with currently active frame.
  *  New parameters will be applied to next compression job. */
 void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
 {
@@ -981,38 +1070,36 @@
     }
 }
 
-/* ZSTDMT_getNbWorkers():
- * @return nb threads currently active in mtctx.
- * mtctx must be valid */
-unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx)
-{
-    assert(mtctx != NULL);
-    return mtctx->params.nbWorkers;
-}
-
 /* ZSTDMT_getFrameProgression():
  * tells how much data has been consumed (input) and produced (output) for current frame.
  * able to count progression inside worker threads.
- * Note : mutex will be acquired during statistics collection. */
+ * Note : mutex will be acquired during statistics collection inside workers. */
 ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
 {
     ZSTD_frameProgression fps;
-    DEBUGLOG(6, "ZSTDMT_getFrameProgression");
+    DEBUGLOG(5, "ZSTDMT_getFrameProgression");
+    fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
     fps.consumed = mtctx->consumed;
-    fps.produced = mtctx->produced;
-    fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
+    fps.produced = fps.flushed = mtctx->produced;
+    fps.currentJobID = mtctx->nextJobID;
+    fps.nbActiveWorkers = 0;
     {   unsigned jobNb;
         unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
         DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
                     mtctx->doneJobID, lastJobNb, mtctx->jobReady)
         for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
             unsigned const wJobID = jobNb & mtctx->jobIDMask;
-            ZSTD_pthread_mutex_lock(&mtctx->jobs[wJobID].job_mutex);
-            {   size_t const cResult = mtctx->jobs[wJobID].cSize;
+            ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
+            ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
+            {   size_t const cResult = jobPtr->cSize;
                 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
-                fps.consumed += mtctx->jobs[wJobID].consumed;
-                fps.ingested += mtctx->jobs[wJobID].src.size;
+                size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
+                assert(flushed <= produced);
+                fps.ingested += jobPtr->src.size;
+                fps.consumed += jobPtr->consumed;
                 fps.produced += produced;
+                fps.flushed  += flushed;
+                fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
             }
             ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
         }
@@ -1021,6 +1108,34 @@
 }
 
 
+size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
+{
+    size_t toFlush;
+    unsigned const jobID = mtctx->doneJobID;
+    assert(jobID <= mtctx->nextJobID);
+    if (jobID == mtctx->nextJobID) return 0;   /* no active job => nothing to flush */
+
+    /* look into oldest non-fully-flushed job */
+    {   unsigned const wJobID = jobID & mtctx->jobIDMask;
+        ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];
+        ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
+        {   size_t const cResult = jobPtr->cSize;
+            size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
+            size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
+            assert(flushed <= produced);
+            toFlush = produced - flushed;
+            if (toFlush==0 && (jobPtr->consumed >= jobPtr->src.size)) {
+                /* doneJobID is not-fully-flushed, but toFlush==0 : doneJobID should be compressing some more data */
+                assert(jobPtr->consumed < jobPtr->src.size);
+            }
+        }
+        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+    }
+
+    return toFlush;
+}
+
+
 /* ------------------------------------------ */
 /* =====   Multi-threaded compression   ===== */
 /* ------------------------------------------ */
@@ -1087,18 +1202,10 @@
 
     assert(avgJobSize >= 256 KB);  /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
     ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
-    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params))
+    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
         return ERROR(memory_allocation);
 
-    if (nbJobs > mtctx->jobIDMask+1) {  /* enlarge job table */
-        U32 jobsTableSize = nbJobs;
-        ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
-        mtctx->jobIDMask = 0;
-        mtctx->jobs = ZSTDMT_createJobsTable(&jobsTableSize, mtctx->cMem);
-        if (mtctx->jobs==NULL) return ERROR(memory_allocation);
-        assert((jobsTableSize != 0) && ((jobsTableSize & (jobsTableSize - 1)) == 0));  /* ensure jobsTableSize is a power of 2 */
-        mtctx->jobIDMask = jobsTableSize - 1;
-    }
+    CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbJobs) );  /* only expands if necessary */
 
     {   unsigned u;
         for (u=0; u<nbJobs; u++) {
@@ -1221,17 +1328,18 @@
         const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
         unsigned long long pledgedSrcSize)
 {
-    DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u, disableLiteralCompression=%i)",
-                (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx, params.disableLiteralCompression);
-    /* params are supposed to be fully validated at this point */
+    DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
+                (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
+
+    /* params supposed partially fully validated at this point */
     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
-    assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
 
     /* init */
-    if (params.jobSize == 0) {
-        params.jobSize = 1U << ZSTDMT_computeTargetJobLog(params);
-    }
+    if (params.nbWorkers != mtctx->params.nbWorkers)
+        CHECK_F( ZSTDMT_resize(mtctx, params.nbWorkers) );
+
+    if (params.jobSize > 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
     if (params.jobSize > ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
 
     mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN);  /* do not trigger multi-threading when srcSize is too small */
@@ -1270,7 +1378,9 @@
     mtctx->targetPrefixSize = (size_t)1 << ZSTDMT_computeOverlapLog(params);
     DEBUGLOG(4, "overlapLog=%u => %u KB", params.overlapSizeLog, (U32)(mtctx->targetPrefixSize>>10));
     mtctx->targetSectionSize = params.jobSize;
-    if (mtctx->targetSectionSize < ZSTDMT_JOBSIZE_MIN) mtctx->targetSectionSize = ZSTDMT_JOBSIZE_MIN;
+    if (mtctx->targetSectionSize == 0) {
+        mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
+    }
     if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize;  /* job size must be >= overlap size */
     DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), params.jobSize);
     DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
@@ -1312,7 +1422,7 @@
     mtctx->allJobsCompleted = 0;
     mtctx->consumed = 0;
     mtctx->produced = 0;
-    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params))
+    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize))
         return ERROR(memory_allocation);
     return 0;
 }
@@ -1420,7 +1530,7 @@
         mtctx->jobs[jobID].jobID = mtctx->nextJobID;
         mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
         mtctx->jobs[jobID].lastJob = endFrame;
-        mtctx->jobs[jobID].frameChecksumNeeded = endFrame && (mtctx->nextJobID>0) && mtctx->params.fParams.checksumFlag;
+        mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
         mtctx->jobs[jobID].dstFlushed = 0;
 
         /* Update the round buffer pos and clear the input buffer to be reset */
@@ -1468,6 +1578,8 @@
 
 
 /*! ZSTDMT_flushProduced() :
+ *  flush whatever data has been produced but not yet flushed in current job.
+ *  move to next job if current one is fully flushed.
  * `output` : `pos` will be updated with amount of data flushed .
  * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
  * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
@@ -1496,7 +1608,7 @@
     /* try to flush something */
     {   size_t cSize = mtctx->jobs[wJobID].cSize;                  /* shared */
         size_t const srcConsumed = mtctx->jobs[wJobID].consumed;   /* shared */
-        size_t const srcSize = mtctx->jobs[wJobID].src.size;        /* read-only, could be done after mutex lock, but no-declaration-after-statement */
+        size_t const srcSize = mtctx->jobs[wJobID].src.size;       /* read-only, could be done after mutex lock, but no-declaration-after-statement */
         ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
         if (ZSTD_isError(cSize)) {
             DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
@@ -1516,6 +1628,7 @@
             mtctx->jobs[wJobID].cSize += 4;  /* can write this shared value, as worker is no longer active */
             mtctx->jobs[wJobID].frameChecksumNeeded = 0;
         }
+
         if (cSize > 0) {   /* compression is ongoing or completed */
             size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
             DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
@@ -1529,11 +1642,12 @@
             output->pos += toFlush;
             mtctx->jobs[wJobID].dstFlushed += toFlush;  /* can write : this value is only used by mtctx */
 
-            if ( (srcConsumed == srcSize)    /* job completed */
+            if ( (srcConsumed == srcSize)    /* job is completed */
               && (mtctx->jobs[wJobID].dstFlushed == cSize) ) {   /* output buffer fully flushed => free this job position */
                 DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
                         mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
                 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
+                DEBUGLOG(5, "dstBuffer released");
                 mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
                 mtctx->jobs[wJobID].cSize = 0;   /* ensure this job slot is considered "not started" in future check */
                 mtctx->consumed += srcSize;
@@ -1610,6 +1724,7 @@
     range_t extDict;
     range_t prefix;
 
+    DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
     extDict.start = window.dictBase + window.lowLimit;
     extDict.size = window.dictLimit - window.lowLimit;
 
@@ -1630,12 +1745,13 @@
 {
     if (mtctx->params.ldmParams.enableLdm) {
         ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
+        DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
         DEBUGLOG(5, "source  [0x%zx, 0x%zx)",
                     (size_t)buffer.start,
                     (size_t)buffer.start + buffer.capacity);
         ZSTD_PTHREAD_MUTEX_LOCK(mutex);
         while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
-            DEBUGLOG(6, "Waiting for LDM to finish...");
+            DEBUGLOG(5, "Waiting for LDM to finish...");
             ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
         }
         DEBUGLOG(6, "Done waiting for LDM to finish");
@@ -1655,6 +1771,7 @@
     size_t const target = mtctx->targetSectionSize;
     buffer_t buffer;
 
+    DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
     assert(mtctx->inBuff.buffer.start == NULL);
     assert(mtctx->roundBuff.capacity >= target);
 
@@ -1668,7 +1785,7 @@
         buffer.start = start;
         buffer.capacity = prefixSize;
         if (ZSTDMT_isOverlapped(buffer, inUse)) {
-            DEBUGLOG(6, "Waiting for buffer...");
+            DEBUGLOG(5, "Waiting for buffer...");
             return 0;
         }
         ZSTDMT_waitForLdmComplete(mtctx, buffer);
@@ -1680,7 +1797,7 @@
     buffer.capacity = target;
 
     if (ZSTDMT_isOverlapped(buffer, inUse)) {
-        DEBUGLOG(6, "Waiting for buffer...");
+        DEBUGLOG(5, "Waiting for buffer...");
         return 0;
     }
     assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
@@ -1753,8 +1870,10 @@
                 /* It is only possible for this operation to fail if there are
                  * still compression jobs ongoing.
                  */
+                DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
                 assert(mtctx->doneJobID != mtctx->nextJobID);
-            }
+            } else
+                DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
         }
         if (mtctx->inBuff.buffer.start != NULL) {
             size_t const toLoad = MIN(input->size - input->pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
@@ -1782,6 +1901,7 @@
     /* check for potential compressed data ready to be flushed */
     {   size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
         if (input->pos < input->size) return MAX(remainingToFlush, 1);  /* input not consumed : do not end flush yet */
+        DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
         return remainingToFlush;
     }
 }
--- a/contrib/python-zstandard/zstd/compress/zstdmt_compress.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstdmt_compress.h	Mon Oct 22 14:46:06 2018 -0400
@@ -95,6 +95,11 @@
  * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
 ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value);
 
+/* ZSTDMT_getMTCtxParameter() :
+ * Query the ZSTDMT_CCtx for a parameter value.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned* value);
+
 
 /*! ZSTDMT_compressStream_generic() :
  *  Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream()
@@ -114,11 +119,21 @@
  * ===  Not exposed in libzstd. Never invoke directly   ===
  * ======================================================== */
 
+ /*! ZSTDMT_toFlushNow()
+  *  Tell how many bytes are ready to be flushed immediately.
+  *  Probe the oldest active job (not yet entirely flushed) and check its output buffer.
+  *  If return 0, it means there is no active job,
+  *  or, it means oldest job is still active, but everything produced has been flushed so far,
+  *  therefore flushing is limited by speed of oldest job. */
+size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx);
+
+/*! ZSTDMT_CCtxParam_setMTCtxParameter()
+ *  like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */
 size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value);
 
-/* ZSTDMT_CCtxParam_setNbWorkers()
- * Set nbWorkers, and clamp it.
- * Also reset jobSize and overlapLog */
+/*! ZSTDMT_CCtxParam_setNbWorkers()
+ *  Set nbWorkers, and clamp it.
+ *  Also reset jobSize and overlapLog */
 size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers);
 
 /*! ZSTDMT_updateCParams_whileCompressing() :
@@ -126,14 +141,9 @@
  *  New parameters will be applied to next compression job. */
 void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
 
-/* ZSTDMT_getNbWorkers():
- * @return nb threads currently active in mtctx.
- * mtctx must be valid */
-unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx);
-
-/* ZSTDMT_getFrameProgression():
- * tells how much data has been consumed (input) and produced (output) for current frame.
- * able to count progression inside worker threads.
+/*! ZSTDMT_getFrameProgression():
+ *  tells how much data has been consumed (input) and produced (output) for current frame.
+ *  able to count progression inside worker threads.
  */
 ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
 
--- a/contrib/python-zstandard/zstd/decompress/huf_decompress.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/decompress/huf_decompress.c	Mon Oct 22 14:46:06 2018 -0400
@@ -1,6 +1,7 @@
 /* ******************************************************************
-   Huffman decoder, part of New Generation Entropy library
-   Copyright (C) 2013-2016, Yann Collet.
+   huff0 huffman decoder,
+   part of Finite State Entropy library
+   Copyright (C) 2013-present, Yann Collet.
 
    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
@@ -29,16 +30,15 @@
 
     You can contact the author at :
     - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
-    - Public forum : https://groups.google.com/forum/#!forum/lz4c
 ****************************************************************** */
 
 /* **************************************************************
 *  Dependencies
 ****************************************************************/
 #include <string.h>     /* memcpy, memset */
+#include "compiler.h"
 #include "bitstream.h"  /* BIT_* */
-#include "compiler.h"
-#include "fse.h"        /* header compression */
+#include "fse.h"        /* to compress headers */
 #define HUF_STATIC_LINKING_ONLY
 #include "huf.h"
 #include "error_private.h"
@@ -48,7 +48,6 @@
 *  Error Management
 ****************************************************************/
 #define HUF_isError ERR_isError
-#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
 #define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }
 
 
@@ -75,15 +74,15 @@
 /*-***************************/
 /*  single-symbol decoding   */
 /*-***************************/
-typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1;   /* single-symbol decoding */
 
-size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
+size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
 {
     U32 tableLog = 0;
     U32 nbSymbols = 0;
     size_t iSize;
     void* const dtPtr = DTable + 1;
-    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
+    HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
 
     U32* rankVal;
     BYTE* huffWeight;
@@ -96,7 +95,7 @@
 
     if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
 
-    HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
+    DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
     /* memset(huffWeight, 0, sizeof(huffWeight)); */   /* is not necessary, even though some analyzer complain ... */
 
     iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
@@ -124,7 +123,7 @@
             U32 const w = huffWeight[n];
             U32 const length = (1 << w) >> 1;
             U32 u;
-            HUF_DEltX2 D;
+            HUF_DEltX1 D;
             D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
             for (u = rankVal[w]; u < rankVal[w] + length; u++)
                 dt[u] = D;
@@ -134,17 +133,15 @@
     return iSize;
 }
 
-size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
+size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize)
 {
     U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_readDTableX2_wksp(DTable, src, srcSize,
+    return HUF_readDTableX1_wksp(DTable, src, srcSize,
                                  workSpace, sizeof(workSpace));
 }
 
-typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */
-
 FORCE_INLINE_TEMPLATE BYTE
-HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
+HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
 {
     size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
     BYTE const c = dt[val].byte;
@@ -152,44 +149,44 @@
     return c;
 }
 
-#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
-    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
+#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
+    *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
 
-#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr)  \
+#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr)  \
     if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
-        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
 
-#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
     if (MEM_64bits()) \
-        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
 
 HINT_INLINE size_t
-HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
+HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
 {
     BYTE* const pStart = p;
 
     /* up to 4 symbols at a time */
     while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
     }
 
     /* [0-3] symbols remaining */
     if (MEM_32bits())
         while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
-            HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+            HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
 
     /* no more data to retrieve from bitstream, no need to reload */
     while (p < pEnd)
-        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
 
     return pEnd-pStart;
 }
 
 FORCE_INLINE_TEMPLATE size_t
-HUF_decompress1X2_usingDTable_internal_body(
+HUF_decompress1X1_usingDTable_internal_body(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
     const HUF_DTable* DTable)
@@ -197,14 +194,14 @@
     BYTE* op = (BYTE*)dst;
     BYTE* const oend = op + dstSize;
     const void* dtPtr = DTable + 1;
-    const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+    const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
     BIT_DStream_t bitD;
     DTableDesc const dtd = HUF_getDTableDesc(DTable);
     U32 const dtLog = dtd.tableLog;
 
     CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
 
-    HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
+    HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
 
     if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
 
@@ -212,7 +209,7 @@
 }
 
 FORCE_INLINE_TEMPLATE size_t
-HUF_decompress4X2_usingDTable_internal_body(
+HUF_decompress4X1_usingDTable_internal_body(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
     const HUF_DTable* DTable)
@@ -224,7 +221,7 @@
         BYTE* const ostart = (BYTE*) dst;
         BYTE* const oend = ostart + dstSize;
         const void* const dtPtr = DTable + 1;
-        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+        const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
 
         /* Init */
         BIT_DStream_t bitD1;
@@ -260,22 +257,22 @@
         /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
         endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
         while ( (endSignal==BIT_DStream_unfinished) && (op4<(oend-3)) ) {
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
             BIT_reloadDStream(&bitD1);
             BIT_reloadDStream(&bitD2);
             BIT_reloadDStream(&bitD3);
@@ -291,191 +288,10 @@
         /* note : op4 supposed already verified within main loop */
 
         /* finish bitStreams one by one */
-        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
-
-        /* check */
-        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
-          if (!endCheck) return ERROR(corruption_detected); }
-
-        /* decoded size */
-        return dstSize;
-    }
-}
-
-
-FORCE_INLINE_TEMPLATE U32
-HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
-{
-    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 2);
-    BIT_skipBits(DStream, dt[val].nbBits);
-    return dt[val].length;
-}
-
-FORCE_INLINE_TEMPLATE U32
-HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
-{
-    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
-    memcpy(op, dt+val, 1);
-    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
-    else {
-        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
-            BIT_skipBits(DStream, dt[val].nbBits);
-            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
-                /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
-                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
-    }   }
-    return 1;
-}
-
-#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
-    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
-    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
-        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
-    if (MEM_64bits()) \
-        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
-
-HINT_INLINE size_t
-HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
-                const HUF_DEltX4* const dt, const U32 dtLog)
-{
-    BYTE* const pStart = p;
-
-    /* up to 8 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
-        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-    }
-
-    /* closer to end : up to 2 symbols at a time */
-    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
-
-    while (p <= pEnd-2)
-        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
-
-    if (p < pEnd)
-        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
-
-    return p-pStart;
-}
-
-FORCE_INLINE_TEMPLATE size_t
-HUF_decompress1X4_usingDTable_internal_body(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    BIT_DStream_t bitD;
-
-    /* Init */
-    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
-
-    /* decode */
-    {   BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
-        const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
-        DTableDesc const dtd = HUF_getDTableDesc(DTable);
-        HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
-    }
-
-    /* check */
-    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
-
-    /* decoded size */
-    return dstSize;
-}
-
-
-FORCE_INLINE_TEMPLATE size_t
-HUF_decompress4X4_usingDTable_internal_body(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
-
-    {   const BYTE* const istart = (const BYTE*) cSrc;
-        BYTE* const ostart = (BYTE*) dst;
-        BYTE* const oend = ostart + dstSize;
-        const void* const dtPtr = DTable+1;
-        const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
-
-        /* Init */
-        BIT_DStream_t bitD1;
-        BIT_DStream_t bitD2;
-        BIT_DStream_t bitD3;
-        BIT_DStream_t bitD4;
-        size_t const length1 = MEM_readLE16(istart);
-        size_t const length2 = MEM_readLE16(istart+2);
-        size_t const length3 = MEM_readLE16(istart+4);
-        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
-        const BYTE* const istart1 = istart + 6;  /* jumpTable */
-        const BYTE* const istart2 = istart1 + length1;
-        const BYTE* const istart3 = istart2 + length2;
-        const BYTE* const istart4 = istart3 + length3;
-        size_t const segmentSize = (dstSize+3) / 4;
-        BYTE* const opStart2 = ostart + segmentSize;
-        BYTE* const opStart3 = opStart2 + segmentSize;
-        BYTE* const opStart4 = opStart3 + segmentSize;
-        BYTE* op1 = ostart;
-        BYTE* op2 = opStart2;
-        BYTE* op3 = opStart3;
-        BYTE* op4 = opStart4;
-        U32 endSignal;
-        DTableDesc const dtd = HUF_getDTableDesc(DTable);
-        U32 const dtLog = dtd.tableLog;
-
-        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
-        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
-        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
-        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
-        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
-
-        /* 16-32 symbols per loop (4-8 symbols per stream) */
-        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
-            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
-            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
-            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
-            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
-            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
-
-            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
-        }
-
-        /* check corruption */
-        if (op1 > opStart2) return ERROR(corruption_detected);
-        if (op2 > opStart3) return ERROR(corruption_detected);
-        if (op3 > opStart4) return ERROR(corruption_detected);
-        /* note : op4 already verified within main loop */
-
-        /* finish bitStreams one by one */
-        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
-        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
-        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
-        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
+        HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX1(op4, &bitD4, oend,     dt, dtLog);
 
         /* check */
         { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
@@ -493,7 +309,7 @@
                                                const HUF_DTable *DTable);
 #if DYNAMIC_BMI2
 
-#define X(fn)                                                               \
+#define HUF_DGEN(fn)                                                               \
                                                                             \
     static size_t fn##_default(                                             \
                   void* dst,  size_t dstSize,                               \
@@ -522,7 +338,7 @@
 
 #else
 
-#define X(fn)                                                               \
+#define HUF_DGEN(fn)                                                               \
     static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
                      size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
     {                                                                       \
@@ -532,112 +348,114 @@
 
 #endif
 
-X(HUF_decompress1X2_usingDTable_internal)
-X(HUF_decompress4X2_usingDTable_internal)
-X(HUF_decompress1X4_usingDTable_internal)
-X(HUF_decompress4X4_usingDTable_internal)
+HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
+HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
 
-#undef X
 
 
-size_t HUF_decompress1X2_usingDTable(
+size_t HUF_decompress1X1_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    DTableDesc dtd = HUF_getDTableDesc(DTable);
+    if (dtd.tableType != 0) return ERROR(GENERIC);
+    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+                                   const void* cSrc, size_t cSrcSize,
+                                   void* workSpace, size_t wkspSize)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+                              const void* cSrc, size_t cSrcSize)
+{
+    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+    return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+                                       workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+size_t HUF_decompress4X1_usingDTable(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
     const HUF_DTable* DTable)
 {
     DTableDesc dtd = HUF_getDTableDesc(DTable);
     if (dtd.tableType != 0) return ERROR(GENERIC);
-    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
-
-size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
-                                   const void* cSrc, size_t cSrcSize,
-                                   void* workSpace, size_t wkspSize)
-{
-    const BYTE* ip = (const BYTE*) cSrc;
-
-    size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
-    if (HUF_isError(hSize)) return hSize;
-    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
-    ip += hSize; cSrcSize -= hSize;
-
-    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
 }
 
-
-size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
-                              const void* cSrc, size_t cSrcSize)
-{
-    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
-                                       workSpace, sizeof(workSpace));
-}
-
-size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
-{
-    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
-}
-
-size_t HUF_decompress4X2_usingDTable(
-          void* dst,  size_t dstSize,
-    const void* cSrc, size_t cSrcSize,
-    const HUF_DTable* DTable)
-{
-    DTableDesc dtd = HUF_getDTableDesc(DTable);
-    if (dtd.tableType != 0) return ERROR(GENERIC);
-    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
-
-static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
                                    const void* cSrc, size_t cSrcSize,
                                    void* workSpace, size_t wkspSize, int bmi2)
 {
     const BYTE* ip = (const BYTE*) cSrc;
 
-    size_t const hSize = HUF_readDTableX2_wksp (dctx, cSrc, cSrcSize,
+    size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize,
                                                 workSpace, wkspSize);
     if (HUF_isError(hSize)) return hSize;
     if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
     ip += hSize; cSrcSize -= hSize;
 
-    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
 }
 
-size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
                                    const void* cSrc, size_t cSrcSize,
                                    void* workSpace, size_t wkspSize)
 {
-    return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
+    return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
 }
 
 
-size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
 {
     U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+    return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
                                        workSpace, sizeof(workSpace));
 }
-size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
 {
-    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+    HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
 }
 
 
 /* *************************/
 /* double-symbols decoding */
 /* *************************/
-typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
 
-/* HUF_fillDTableX4Level2() :
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2;  /* double-symbols decoding */
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
+typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
+
+
+/* HUF_fillDTableX2Level2() :
  * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
-static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
                            const U32* rankValOrigin, const int minWeight,
                            const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
                            U32 nbBitsBaseline, U16 baseSeq)
 {
-    HUF_DEltX4 DElt;
+    HUF_DEltX2 DElt;
     U32 rankVal[HUF_TABLELOG_MAX + 1];
 
     /* get pre-calculated rankVal */
@@ -672,10 +490,8 @@
     }   }
 }
 
-typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
-typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
 
-static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
+static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
                            const sortedSymbol_t* sortedList, const U32 sortedListSize,
                            const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
                            const U32 nbBitsBaseline)
@@ -700,12 +516,12 @@
             int minWeight = nbBits + scaleLog;
             if (minWeight < 1) minWeight = 1;
             sortedRank = rankStart[minWeight];
-            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+            HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
                            rankValOrigin[nbBits], minWeight,
                            sortedList+sortedRank, sortedListSize-sortedRank,
                            nbBitsBaseline, symbol);
         } else {
-            HUF_DEltX4 DElt;
+            HUF_DEltX2 DElt;
             MEM_writeLE16(&(DElt.sequence), symbol);
             DElt.nbBits = (BYTE)(nbBits);
             DElt.length = 1;
@@ -717,16 +533,16 @@
     }
 }
 
-size_t HUF_readDTableX4_wksp(HUF_DTable* DTable, const void* src,
-                             size_t srcSize, void* workSpace,
-                             size_t wkspSize)
+size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
+                       const void* src, size_t srcSize,
+                             void* workSpace, size_t wkspSize)
 {
     U32 tableLog, maxW, sizeOfSort, nbSymbols;
     DTableDesc dtd = HUF_getDTableDesc(DTable);
     U32 const maxTableLog = dtd.maxTableLog;
     size_t iSize;
     void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */
-    HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr;
+    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
     U32 *rankStart;
 
     rankValCol_t* rankVal;
@@ -752,7 +568,7 @@
     rankStart = rankStart0 + 1;
     memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
 
-    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable));   /* if compiler fails here, assertion is wrong */
+    DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable));   /* if compiler fails here, assertion is wrong */
     if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
     /* memset(weightList, 0, sizeof(weightList)); */  /* is not necessary, even though some analyzer complain ... */
 
@@ -806,7 +622,7 @@
                     rankValPtr[w] = rankVal0[w] >> consumed;
     }   }   }   }
 
-    HUF_fillDTableX4(dt, maxTableLog,
+    HUF_fillDTableX2(dt, maxTableLog,
                    sortedSymbol, sizeOfSort,
                    rankStart0, rankVal, maxW,
                    tableLog+1);
@@ -817,112 +633,296 @@
     return iSize;
 }
 
-size_t HUF_readDTableX4(HUF_DTable* DTable, const void* src, size_t srcSize)
+size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
 {
   U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-  return HUF_readDTableX4_wksp(DTable, src, srcSize,
+  return HUF_readDTableX2_wksp(DTable, src, srcSize,
                                workSpace, sizeof(workSpace));
 }
 
-size_t HUF_decompress1X4_usingDTable(
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 2);
+    BIT_skipBits(DStream, dt[val].nbBits);
+    return dt[val].length;
+}
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 1);
+    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+    else {
+        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+            BIT_skipBits(DStream, dt[val].nbBits);
+            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+                /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
+    }   }
+    return 1;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+    ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+HINT_INLINE size_t
+HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
+                const HUF_DEltX2* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 8 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+    }
+
+    /* closer to end : up to 2 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    while (p <= pEnd-2)
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
+
+    if (p < pEnd)
+        p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
+
+    return p-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X2_usingDTable_internal_body(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    BIT_DStream_t bitD;
+
+    /* Init */
+    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+    /* decode */
+    {   BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
+        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+        DTableDesc const dtd = HUF_getDTableDesc(DTable);
+        HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
+    }
+
+    /* check */
+    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    /* decoded size */
+    return dstSize;
+}
+
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X2_usingDTable_internal_body(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {   const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable+1;
+        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        size_t const length1 = MEM_readLE16(istart);
+        size_t const length2 = MEM_readLE16(istart+2);
+        size_t const length3 = MEM_readLE16(istart+4);
+        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        size_t const segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+        DTableDesc const dtd = HUF_getDTableDesc(DTable);
+        U32 const dtLog = dtd.tableLog;
+
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+          if (!endCheck) return ERROR(corruption_detected); }
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
+HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
+
+size_t HUF_decompress1X2_usingDTable(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
     const HUF_DTable* DTable)
 {
     DTableDesc dtd = HUF_getDTableDesc(DTable);
     if (dtd.tableType != 1) return ERROR(GENERIC);
-    return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
 }
 
-size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
                                    const void* cSrc, size_t cSrcSize,
                                    void* workSpace, size_t wkspSize)
 {
     const BYTE* ip = (const BYTE*) cSrc;
 
-    size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize,
+    size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
                                                workSpace, wkspSize);
     if (HUF_isError(hSize)) return hSize;
     if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
     ip += hSize; cSrcSize -= hSize;
 
-    return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
 }
 
 
-size_t HUF_decompress1X4_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
                               const void* cSrc, size_t cSrcSize)
 {
     U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress1X4_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+    return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
                                        workSpace, sizeof(workSpace));
 }
 
-size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
 {
-    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
 }
 
-size_t HUF_decompress4X4_usingDTable(
+size_t HUF_decompress4X2_usingDTable(
           void* dst,  size_t dstSize,
     const void* cSrc, size_t cSrcSize,
     const HUF_DTable* DTable)
 {
     DTableDesc dtd = HUF_getDTableDesc(DTable);
     if (dtd.tableType != 1) return ERROR(GENERIC);
-    return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
 }
 
-static size_t HUF_decompress4X4_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
                                    const void* cSrc, size_t cSrcSize,
                                    void* workSpace, size_t wkspSize, int bmi2)
 {
     const BYTE* ip = (const BYTE*) cSrc;
 
-    size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize,
+    size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
                                          workSpace, wkspSize);
     if (HUF_isError(hSize)) return hSize;
     if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
     ip += hSize; cSrcSize -= hSize;
 
-    return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
 }
 
-size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
                                    const void* cSrc, size_t cSrcSize,
                                    void* workSpace, size_t wkspSize)
 {
-    return HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
+    return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
 }
 
 
-size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
                               const void* cSrc, size_t cSrcSize)
 {
     U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
-    return HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+    return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
                                        workSpace, sizeof(workSpace));
 }
 
-size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
 {
-    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
-    return HUF_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
 }
 
 
-/* ********************************/
-/* Generic decompression selector */
-/* ********************************/
+/* ***********************************/
+/* Universal decompression selectors */
+/* ***********************************/
 
 size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
                                     const void* cSrc, size_t cSrcSize,
                                     const HUF_DTable* DTable)
 {
     DTableDesc const dtd = HUF_getDTableDesc(DTable);
-    return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
-                           HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
 }
 
 size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
@@ -930,8 +930,8 @@
                                     const HUF_DTable* DTable)
 {
     DTableDesc const dtd = HUF_getDTableDesc(DTable);
-    return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
-                           HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
 }
 
 
@@ -960,12 +960,12 @@
 /** HUF_selectDecoder() :
  *  Tells which decoder is likely to decode faster,
  *  based on a set of pre-computed metrics.
- * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
  *  Assumption : 0 < dstSize <= 128 KB */
 U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
 {
     assert(dstSize > 0);
-    assert(dstSize <= 128 KB);
+    assert(dstSize <= 128*1024);
     /* decoder timing evaluation */
     {   U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 */
         U32 const D256 = (U32)(dstSize >> 8);
@@ -980,7 +980,7 @@
 
 size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
 {
-    static const decompressionAlgo decompress[2] = { HUF_decompress4X2, HUF_decompress4X4 };
+    static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 };
 
     /* validation checks */
     if (dstSize == 0) return ERROR(dstSize_tooSmall);
@@ -1002,8 +1002,8 @@
     if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
 
     {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-        return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
-                        HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+        return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+                        HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
     }
 }
 
@@ -1025,8 +1025,8 @@
     if (cSrcSize == 0) return ERROR(corruption_detected);
 
     {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-        return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize):
-                        HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+        return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize):
+                        HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
     }
 }
 
@@ -1041,9 +1041,9 @@
     if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
 
     {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-        return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc,
+        return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
                                 cSrcSize, workSpace, wkspSize):
-                        HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+                        HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
                                 cSrcSize, workSpace, wkspSize);
     }
 }
@@ -1060,27 +1060,27 @@
 size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
 {
     DTableDesc const dtd = HUF_getDTableDesc(DTable);
-    return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
-                           HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
 }
 
-size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
 {
     const BYTE* ip = (const BYTE*) cSrc;
 
-    size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);
+    size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);
     if (HUF_isError(hSize)) return hSize;
     if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
     ip += hSize; cSrcSize -= hSize;
 
-    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
 }
 
 size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
 {
     DTableDesc const dtd = HUF_getDTableDesc(DTable);
-    return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
-                           HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
 }
 
 size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
@@ -1090,7 +1090,7 @@
     if (cSrcSize == 0) return ERROR(corruption_detected);
 
     {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
-        return algoNb ? HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
-                        HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+        return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
+                        HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
     }
 }
--- a/contrib/python-zstandard/zstd/decompress/zstd_decompress.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/decompress/zstd_decompress.c	Mon Oct 22 14:46:06 2018 -0400
@@ -40,12 +40,24 @@
 #  define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_DEFAULTMAX) + 1)
 #endif
 
+/*!
+ *  NO_FORWARD_PROGRESS_MAX :
+ *  maximum allowed nb of calls to ZSTD_decompressStream() and ZSTD_decompress_generic()
+ *  without any forward progress
+ *  (defined as: no byte read from input, and no byte flushed to output)
+ *  before triggering an error.
+ */
+#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
+#  define ZSTD_NO_FORWARD_PROGRESS_MAX 16
+#endif
+
 
 /*-*******************************************************
 *  Dependencies
 *********************************************************/
 #include <string.h>      /* memcpy, memmove, memset */
-#include "cpu.h"
+#include "compiler.h"    /* prefetch */
+#include "cpu.h"         /* bmi2 */
 #include "mem.h"         /* low level memory routines */
 #define FSE_STATIC_LINKING_ONLY
 #include "fse.h"
@@ -57,6 +69,9 @@
 #  include "zstd_legacy.h"
 #endif
 
+static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict);
+static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict);
+
 
 /*-*************************************
 *  Errors
@@ -99,11 +114,10 @@
 #define SEQSYMBOL_TABLE_SIZE(log)   (1 + (1 << (log)))
 
 typedef struct {
-    ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];
-    ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];
-    ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];
+    ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];    /* Note : Space reserved for FSE Tables */
+    ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];   /* is also used as temporary workspace while building hufTable during DDict creation */
+    ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];    /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
     HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)];  /* can accommodate HUF_decompress4X */
-    U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
     U32 rep[ZSTD_REP_NUM];
 } ZSTD_entropyDTables_t;
 
@@ -114,9 +128,10 @@
     const ZSTD_seqSymbol* OFTptr;
     const HUF_DTable* HUFptr;
     ZSTD_entropyDTables_t entropy;
+    U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];   /* space needed when building huffman tables */
     const void* previousDstEnd;   /* detect continuity */
-    const void* base;             /* start of current segment */
-    const void* vBase;            /* virtual start of previous segment if it was just before current one */
+    const void* prefixStart;      /* start of current segment */
+    const void* virtualStart;     /* virtual start of previous segment if it was just before current one */
     const void* dictEnd;          /* end of previous segment */
     size_t expected;
     ZSTD_frameHeader fParams;
@@ -127,7 +142,6 @@
     U32 fseEntropy;
     XXH64_state_t xxhState;
     size_t headerSize;
-    U32 dictID;
     ZSTD_format_e format;
     const BYTE* litPtr;
     ZSTD_customMem customMem;
@@ -136,9 +150,13 @@
     size_t staticSize;
     int bmi2;                     /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
 
-    /* streaming */
+    /* dictionary */
     ZSTD_DDict* ddictLocal;
-    const ZSTD_DDict* ddict;
+    const ZSTD_DDict* ddict;     /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
+    U32 dictID;
+    int ddictIsCold;             /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
+
+    /* streaming */
     ZSTD_dStreamStage streamStage;
     char*  inBuff;
     size_t inBuffSize;
@@ -153,6 +171,7 @@
     U32 previousLegacyVersion;
     U32 legacyVersion;
     U32 hostageByte;
+    int noForwardProgress;
 
     /* workspace */
     BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
@@ -173,7 +192,7 @@
 static size_t ZSTD_startingInputLength(ZSTD_format_e format)
 {
     size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
-                    ZSTD_frameHeaderSize_prefix - ZSTD_frameIdSize :
+                    ZSTD_frameHeaderSize_prefix - ZSTD_FRAMEIDSIZE :
                     ZSTD_frameHeaderSize_prefix;
     ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
     /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
@@ -188,10 +207,15 @@
     dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
     dctx->ddict       = NULL;
     dctx->ddictLocal  = NULL;
+    dctx->dictEnd     = NULL;
+    dctx->ddictIsCold = 0;
     dctx->inBuff      = NULL;
     dctx->inBuffSize  = 0;
     dctx->outBuffSize = 0;
     dctx->streamStage = zdss_init;
+    dctx->legacyContext = NULL;
+    dctx->previousLegacyVersion = 0;
+    dctx->noForwardProgress = 0;
     dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
 }
 
@@ -215,8 +239,6 @@
     {   ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem);
         if (!dctx) return NULL;
         dctx->customMem = customMem;
-        dctx->legacyContext = NULL;
-        dctx->previousLegacyVersion = 0;
         ZSTD_initDCtx_internal(dctx);
         return dctx;
     }
@@ -265,7 +287,7 @@
  *  Note 3 : Skippable Frame Identifiers are considered valid. */
 unsigned ZSTD_isFrame(const void* buffer, size_t size)
 {
-    if (size < ZSTD_frameIdSize) return 0;
+    if (size < ZSTD_FRAMEIDSIZE) return 0;
     {   U32 const magic = MEM_readLE32(buffer);
         if (magic == ZSTD_MAGICNUMBER) return 1;
         if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
@@ -298,25 +320,28 @@
 
 /** ZSTD_frameHeaderSize() :
  *  srcSize must be >= ZSTD_frameHeaderSize_prefix.
- * @return : size of the Frame Header */
+ * @return : size of the Frame Header,
+ *           or an error code (if srcSize is too small) */
 size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
 {
     return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
 }
 
 
-/** ZSTD_getFrameHeader_internal() :
+/** ZSTD_getFrameHeader_advanced() :
  *  decode Frame Header, or require larger `srcSize`.
  *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
  * @return : 0, `zfhPtr` is correctly filled,
  *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
  *           or an error code, which can be tested using ZSTD_isError() */
-static size_t ZSTD_getFrameHeader_internal(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
+size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
 {
     const BYTE* ip = (const BYTE*)src;
     size_t const minInputSize = ZSTD_startingInputLength(format);
 
+    memset(zfhPtr, 0, sizeof(*zfhPtr));   /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
     if (srcSize < minInputSize) return minInputSize;
+    if (src==NULL) return ERROR(GENERIC);   /* invalid parameter */
 
     if ( (format != ZSTD_f_zstd1_magicless)
       && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
@@ -325,7 +350,7 @@
             if (srcSize < ZSTD_skippableHeaderSize)
                 return ZSTD_skippableHeaderSize; /* magic number + frame length */
             memset(zfhPtr, 0, sizeof(*zfhPtr));
-            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_frameIdSize);
+            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
             zfhPtr->frameType = ZSTD_skippableFrame;
             return 0;
         }
@@ -394,7 +419,7 @@
  *           or an error code, which can be tested using ZSTD_isError() */
 size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
 {
-    return ZSTD_getFrameHeader_internal(zfhPtr, src, srcSize, ZSTD_f_zstd1);
+    return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
 }
 
 
@@ -437,7 +462,7 @@
             size_t skippableSize;
             if (srcSize < ZSTD_skippableHeaderSize)
                 return ERROR(srcSize_wrong);
-            skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_frameIdSize)
+            skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_FRAMEIDSIZE)
                           + ZSTD_skippableHeaderSize;
             if (srcSize < skippableSize) {
                 return ZSTD_CONTENTSIZE_ERROR;
@@ -491,7 +516,7 @@
 *   @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
 static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
 {
-    size_t const result = ZSTD_getFrameHeader_internal(&(dctx->fParams), src, headerSize, dctx->format);
+    size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
     if (ZSTD_isError(result)) return result;    /* invalid header */
     if (result>0) return ERROR(srcSize_wrong);  /* headerSize too small */
     if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
@@ -526,6 +551,7 @@
 static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
                           const void* src, size_t srcSize)
 {
+    if (dst==NULL) return ERROR(dstSize_tooSmall);
     if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
     memcpy(dst, src, srcSize);
     return srcSize;
@@ -542,6 +568,9 @@
     return regenSize;
 }
 
+/* Hidden declaration for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+                          const void* src, size_t srcSize);
 /*! ZSTD_decodeLiteralsBlock() :
  * @return : nb of bytes read from src (< srcSize )
  *  note : symbol not declared but exposed for fullbench */
@@ -558,6 +587,7 @@
         case set_repeat:
             if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
             /* fall-through */
+
         case set_compressed:
             if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
             {   size_t lhSize, litSize, litCSize;
@@ -589,15 +619,20 @@
                 if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
                 if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
 
+                /* prefetch huffman table if cold */
+                if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
+                    PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
+                }
+
                 if (HUF_isError((litEncType==set_repeat) ?
                                     ( singleStream ?
                                         HUF_decompress1X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) :
                                         HUF_decompress4X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) ) :
                                     ( singleStream ?
-                                        HUF_decompress1X2_DCtx_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
-                                                                         dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2) :
+                                        HUF_decompress1X1_DCtx_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
+                                                                         dctx->workspace, sizeof(dctx->workspace), dctx->bmi2) :
                                         HUF_decompress4X_hufOnly_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
-                                                                           dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2))))
+                                                                           dctx->workspace, sizeof(dctx->workspace), dctx->bmi2))))
                     return ERROR(corruption_detected);
 
                 dctx->litPtr = dctx->litBuffer;
@@ -869,7 +904,8 @@
                                  symbolEncodingType_e type, U32 max, U32 maxLog,
                                  const void* src, size_t srcSize,
                                  const U32* baseValue, const U32* nbAdditionalBits,
-                                 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable)
+                                 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
+                                 int ddictIsCold, int nbSeq)
 {
     switch(type)
     {
@@ -888,6 +924,12 @@
         return 0;
     case set_repeat:
         if (!flagRepeatTable) return ERROR(corruption_detected);
+        /* prefetch FSE table if used */
+        if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
+            const void* const pStart = *DTablePtr;
+            size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
+            PREFETCH_AREA(pStart, pSize);
+        }
         return 0;
     case set_compressed :
         {   U32 tableLog;
@@ -933,6 +975,9 @@
                     67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
                     0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
 
+/* Hidden delcaration for fullbench */
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+                             const void* src, size_t srcSize);
 
 size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
                              const void* src, size_t srcSize)
@@ -940,25 +985,25 @@
     const BYTE* const istart = (const BYTE* const)src;
     const BYTE* const iend = istart + srcSize;
     const BYTE* ip = istart;
+    int nbSeq;
     DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
 
     /* check */
     if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
 
     /* SeqHead */
-    {   int nbSeq = *ip++;
-        if (!nbSeq) { *nbSeqPtr=0; return 1; }
-        if (nbSeq > 0x7F) {
-            if (nbSeq == 0xFF) {
-                if (ip+2 > iend) return ERROR(srcSize_wrong);
-                nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
-            } else {
-                if (ip >= iend) return ERROR(srcSize_wrong);
-                nbSeq = ((nbSeq-0x80)<<8) + *ip++;
-            }
+    nbSeq = *ip++;
+    if (!nbSeq) { *nbSeqPtr=0; return 1; }
+    if (nbSeq > 0x7F) {
+        if (nbSeq == 0xFF) {
+            if (ip+2 > iend) return ERROR(srcSize_wrong);
+            nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
+        } else {
+            if (ip >= iend) return ERROR(srcSize_wrong);
+            nbSeq = ((nbSeq-0x80)<<8) + *ip++;
         }
-        *nbSeqPtr = nbSeq;
     }
+    *nbSeqPtr = nbSeq;
 
     /* FSE table descriptors */
     if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
@@ -972,7 +1017,8 @@
                                                       LLtype, MaxLL, LLFSELog,
                                                       ip, iend-ip,
                                                       LL_base, LL_bits,
-                                                      LL_defaultDTable, dctx->fseEntropy);
+                                                      LL_defaultDTable, dctx->fseEntropy,
+                                                      dctx->ddictIsCold, nbSeq);
             if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
             ip += llhSize;
         }
@@ -981,7 +1027,8 @@
                                                       OFtype, MaxOff, OffFSELog,
                                                       ip, iend-ip,
                                                       OF_base, OF_bits,
-                                                      OF_defaultDTable, dctx->fseEntropy);
+                                                      OF_defaultDTable, dctx->fseEntropy,
+                                                      dctx->ddictIsCold, nbSeq);
             if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
             ip += ofhSize;
         }
@@ -990,12 +1037,23 @@
                                                       MLtype, MaxML, MLFSELog,
                                                       ip, iend-ip,
                                                       ML_base, ML_bits,
-                                                      ML_defaultDTable, dctx->fseEntropy);
+                                                      ML_defaultDTable, dctx->fseEntropy,
+                                                      dctx->ddictIsCold, nbSeq);
             if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
             ip += mlhSize;
         }
     }
 
+    /* prefetch dictionary content */
+    if (dctx->ddictIsCold) {
+        size_t const dictSize = (const char*)dctx->prefixStart - (const char*)dctx->virtualStart;
+        size_t const psmin = MIN(dictSize, (size_t)(64*nbSeq) /* heuristic */ );
+        size_t const pSize = MIN(psmin, 128 KB /* protection */ );
+        const void* const pStart = (const char*)dctx->dictEnd - pSize;
+        PREFETCH_AREA(pStart, pSize);
+        dctx->ddictIsCold = 0;
+    }
+
     return ip-istart;
 }
 
@@ -1075,7 +1133,7 @@
 size_t ZSTD_execSequence(BYTE* op,
                          BYTE* const oend, seq_t sequence,
                          const BYTE** litPtr, const BYTE* const litLimit,
-                         const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+                         const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
 {
     BYTE* const oLitEnd = op + sequence.litLength;
     size_t const sequenceLength = sequence.litLength + sequence.matchLength;
@@ -1087,7 +1145,7 @@
     /* check */
     if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
     if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
-    if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
+    if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
 
     /* copy Literals */
     ZSTD_copy8(op, *litPtr);
@@ -1097,11 +1155,11 @@
     *litPtr = iLitEnd;   /* update for next sequence */
 
     /* copy Match */
-    if (sequence.offset > (size_t)(oLitEnd - base)) {
+    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
         /* offset beyond prefix -> go into extDict */
-        if (sequence.offset > (size_t)(oLitEnd - vBase))
+        if (sequence.offset > (size_t)(oLitEnd - virtualStart))
             return ERROR(corruption_detected);
-        match = dictEnd + (match - base);
+        match = dictEnd + (match - prefixStart);
         if (match + sequence.matchLength <= dictEnd) {
             memmove(oLitEnd, match, sequence.matchLength);
             return sequenceLength;
@@ -1111,7 +1169,7 @@
             memmove(oLitEnd, match, length1);
             op = oLitEnd + length1;
             sequence.matchLength -= length1;
-            match = base;
+            match = prefixStart;
             if (op > oend_w || sequence.matchLength < MINMATCH) {
               U32 i;
               for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
@@ -1354,10 +1412,10 @@
     BYTE* op = ostart;
     const BYTE* litPtr = dctx->litPtr;
     const BYTE* const litEnd = litPtr + dctx->litSize;
-    const BYTE* const base = (const BYTE*) (dctx->base);
-    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+    const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
     const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
-    DEBUGLOG(5, "ZSTD_decompressSequences");
+    DEBUGLOG(5, "ZSTD_decompressSequences_body");
 
     /* Regen sequences */
     if (nbSeq) {
@@ -1372,14 +1430,14 @@
         for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
             nbSeq--;
             {   seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
-                size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+                size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
                 DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
                 if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
                 op += oneSeqSize;
         }   }
 
         /* check if reached exact end */
-        DEBUGLOG(5, "ZSTD_decompressSequences: after decode loop, remaining nbSeq : %i", nbSeq);
+        DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
         if (nbSeq) return ERROR(corruption_detected);
         /* save reps for next block */
         { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
@@ -1498,8 +1556,8 @@
     BYTE* op = ostart;
     const BYTE* litPtr = dctx->litPtr;
     const BYTE* const litEnd = litPtr + dctx->litSize;
-    const BYTE* const prefixStart = (const BYTE*) (dctx->base);
-    const BYTE* const dictStart = (const BYTE*) (dctx->vBase);
+    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+    const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
     const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
 
     /* Regen sequences */
@@ -1662,7 +1720,8 @@
     /* isLongOffset must be true if there are long offsets.
      * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
      * We don't expect that to be the case in 64-bit mode.
-     * In block mode, window size is not known, so we have to be conservative. (note: but it could be evaluated from current-lowLimit)
+     * In block mode, window size is not known, so we have to be conservative.
+     * (note: but it could be evaluated from current-lowLimit)
      */
     ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)));
     DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
@@ -1701,8 +1760,8 @@
 {
     if (dst != dctx->previousDstEnd) {   /* not contiguous */
         dctx->dictEnd = dctx->previousDstEnd;
-        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
-        dctx->base = dst;
+        dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+        dctx->prefixStart = dst;
         dctx->previousDstEnd = dst;
     }
 }
@@ -1729,10 +1788,10 @@
 }
 
 
-static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
+static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE value, size_t length)
 {
     if (length > dstCapacity) return ERROR(dstSize_tooSmall);
-    memset(dst, byte, length);
+    memset(dst, value, length);
     return length;
 }
 
@@ -1749,7 +1808,7 @@
 #endif
     if ( (srcSize >= ZSTD_skippableHeaderSize)
       && (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START ) {
-        return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize);
+        return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_FRAMEIDSIZE);
     } else {
         const BYTE* ip = (const BYTE*)src;
         const BYTE* const ipstart = ip;
@@ -1783,7 +1842,6 @@
         if (zfh.checksumFlag) {   /* Final frame content checksum */
             if (remainingSize < 4) return ERROR(srcSize_wrong);
             ip += 4;
-            remainingSize -= 4;
         }
 
         return ip - ipstart;
@@ -1871,9 +1929,6 @@
     return op-ostart;
 }
 
-static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict);
-static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict);
-
 static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
                                         void* dst, size_t dstCapacity,
                                   const void* src, size_t srcSize,
@@ -1881,6 +1936,9 @@
                                   const ZSTD_DDict* ddict)
 {
     void* const dststart = dst;
+    int moreThan1Frame = 0;
+
+    DEBUGLOG(5, "ZSTD_decompressMultiFrame");
     assert(dict==NULL || ddict==NULL);  /* either dict or ddict set, not both */
 
     if (ddict) {
@@ -1889,7 +1947,6 @@
     }
 
     while (srcSize >= ZSTD_frameHeaderSize_prefix) {
-        U32 magicNumber;
 
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
         if (ZSTD_isLegacy(src, srcSize)) {
@@ -1911,24 +1968,21 @@
         }
 #endif
 
-        magicNumber = MEM_readLE32(src);
-        DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
-                    (U32)magicNumber, (U32)ZSTD_MAGICNUMBER);
-        if (magicNumber != ZSTD_MAGICNUMBER) {
+        {   U32 const magicNumber = MEM_readLE32(src);
+            DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
+                        (U32)magicNumber, (U32)ZSTD_MAGICNUMBER);
             if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
                 size_t skippableSize;
                 if (srcSize < ZSTD_skippableHeaderSize)
                     return ERROR(srcSize_wrong);
-                skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize)
+                skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_FRAMEIDSIZE)
                               + ZSTD_skippableHeaderSize;
                 if (srcSize < skippableSize) return ERROR(srcSize_wrong);
 
                 src = (const BYTE *)src + skippableSize;
                 srcSize -= skippableSize;
                 continue;
-            }
-            return ERROR(prefix_unknown);
-        }
+        }   }
 
         if (ddict) {
             /* we were called from ZSTD_decompress_usingDDict */
@@ -1942,11 +1996,25 @@
 
         {   const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
                                                     &src, &srcSize);
+            if ( (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
+              && (moreThan1Frame==1) ) {
+                /* at least one frame successfully completed,
+                 * but following bytes are garbage :
+                 * it's more likely to be a srcSize error,
+                 * specifying more bytes than compressed size of frame(s).
+                 * This error message replaces ERROR(prefix_unknown),
+                 * which would be confusing, as the first header is actually correct.
+                 * Note that one could be unlucky, it might be a corruption error instead,
+                 * happening right at the place where we expect zstd magic bytes.
+                 * But this is _much_ less likely than a srcSize field error. */
+                return ERROR(srcSize_wrong);
+            }
             if (ZSTD_isError(res)) return res;
             /* no need to bound check, ZSTD_decompressFrame already has */
             dst = (BYTE*)dst + res;
             dstCapacity -= res;
         }
+        moreThan1Frame = 1;
     }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
 
     if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */
@@ -1980,6 +2048,7 @@
     return regenSize;
 #else   /* stack mode */
     ZSTD_DCtx dctx;
+    ZSTD_initDCtx_internal(&dctx);
     return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
 #endif
 }
@@ -2031,7 +2100,7 @@
     case ZSTDds_getFrameHeaderSize :
         assert(src != NULL);
         if (dctx->format == ZSTD_f_zstd1) {  /* allows header */
-            assert(srcSize >= ZSTD_frameIdSize);  /* to read skippable magic number */
+            assert(srcSize >= ZSTD_FRAMEIDSIZE);  /* to read skippable magic number */
             if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {        /* skippable frame */
                 memcpy(dctx->headerBuffer, src, srcSize);
                 dctx->expected = ZSTD_skippableHeaderSize - srcSize;  /* remaining to load to get full skippable frame header */
@@ -2141,7 +2210,7 @@
         assert(src != NULL);
         assert(srcSize <= ZSTD_skippableHeaderSize);
         memcpy(dctx->headerBuffer + (ZSTD_skippableHeaderSize - srcSize), src, srcSize);   /* complete skippable header */
-        dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_frameIdSize);   /* note : dctx->expected can grow seriously large, beyond local buffer size */
+        dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE);   /* note : dctx->expected can grow seriously large, beyond local buffer size */
         dctx->stage = ZSTDds_skipFrame;
         return 0;
 
@@ -2159,27 +2228,33 @@
 static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
 {
     dctx->dictEnd = dctx->previousDstEnd;
-    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
-    dctx->base = dict;
+    dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+    dctx->prefixStart = dict;
     dctx->previousDstEnd = (const char*)dict + dictSize;
     return 0;
 }
 
-/* ZSTD_loadEntropy() :
- * dict : must point at beginning of a valid zstd dictionary
+/*! ZSTD_loadEntropy() :
+ *  dict : must point at beginning of a valid zstd dictionary.
  * @return : size of entropy tables read */
-static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize)
+static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy,
+                         const void* const dict, size_t const dictSize)
 {
     const BYTE* dictPtr = (const BYTE*)dict;
     const BYTE* const dictEnd = dictPtr + dictSize;
 
     if (dictSize <= 8) return ERROR(dictionary_corrupted);
+    assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY);   /* dict must be valid */
     dictPtr += 8;   /* skip header = magic + dictID */
 
-
-    {   size_t const hSize = HUF_readDTableX4_wksp(
-            entropy->hufTable, dictPtr, dictEnd - dictPtr,
-            entropy->workspace, sizeof(entropy->workspace));
+    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
+    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
+    ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
+    {   void* const workspace = &entropy->LLTable;   /* use fse tables as temporary workspace; implies fse tables are grouped together */
+        size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
+        size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
+                                                dictPtr, dictEnd - dictPtr,
+                                                workspace, workspaceSize);
         if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
         dictPtr += hSize;
     }
@@ -2190,7 +2265,7 @@
         if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
         if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
         if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
-        ZSTD_buildFSETable(entropy->OFTable,
+        ZSTD_buildFSETable( entropy->OFTable,
                             offcodeNCount, offcodeMaxValue,
                             OF_base, OF_bits,
                             offcodeLog);
@@ -2203,7 +2278,7 @@
         if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
         if (matchlengthMaxValue > MaxML) return ERROR(dictionary_corrupted);
         if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
-        ZSTD_buildFSETable(entropy->MLTable,
+        ZSTD_buildFSETable( entropy->MLTable,
                             matchlengthNCount, matchlengthMaxValue,
                             ML_base, ML_bits,
                             matchlengthLog);
@@ -2216,7 +2291,7 @@
         if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
         if (litlengthMaxValue > MaxLL) return ERROR(dictionary_corrupted);
         if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
-        ZSTD_buildFSETable(entropy->LLTable,
+        ZSTD_buildFSETable( entropy->LLTable,
                             litlengthNCount, litlengthMaxValue,
                             LL_base, LL_bits,
                             litlengthLog);
@@ -2242,7 +2317,7 @@
         if (magic != ZSTD_MAGIC_DICTIONARY) {
             return ZSTD_refDictContent(dctx, dict, dictSize);   /* pure content mode */
     }   }
-    dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
+    dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
 
     /* load entropy tables */
     {   size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
@@ -2256,7 +2331,6 @@
     return ZSTD_refDictContent(dctx, dict, dictSize);
 }
 
-/* Note : this function cannot fail */
 size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
 {
     assert(dctx != NULL);
@@ -2264,8 +2338,8 @@
     dctx->stage = ZSTDds_getFrameHeaderSize;
     dctx->decodedSize = 0;
     dctx->previousDstEnd = NULL;
-    dctx->base = NULL;
-    dctx->vBase = NULL;
+    dctx->prefixStart = NULL;
+    dctx->virtualStart = NULL;
     dctx->dictEnd = NULL;
     dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
     dctx->litEntropy = dctx->fseEntropy = 0;
@@ -2302,42 +2376,53 @@
 
 static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict)
 {
+    assert(ddict != NULL);
     return ddict->dictContent;
 }
 
 static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict)
 {
+    assert(ddict != NULL);
     return ddict->dictSize;
 }
 
-size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict)
+size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
 {
-    CHECK_F( ZSTD_decompressBegin(dstDCtx) );
-    if (ddict) {   /* support begin on NULL */
-        dstDCtx->dictID = ddict->dictID;
-        dstDCtx->base = ddict->dictContent;
-        dstDCtx->vBase = ddict->dictContent;
-        dstDCtx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
-        dstDCtx->previousDstEnd = dstDCtx->dictEnd;
+    DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
+    assert(dctx != NULL);
+    if (ddict) {
+        dctx->ddictIsCold = (dctx->dictEnd != (const char*)ddict->dictContent + ddict->dictSize);
+        DEBUGLOG(4, "DDict is %s",
+                    dctx->ddictIsCold ? "~cold~" : "hot!");
+    }
+    CHECK_F( ZSTD_decompressBegin(dctx) );
+    if (ddict) {   /* NULL ddict is equivalent to no dictionary */
+        dctx->dictID = ddict->dictID;
+        dctx->prefixStart = ddict->dictContent;
+        dctx->virtualStart = ddict->dictContent;
+        dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
+        dctx->previousDstEnd = dctx->dictEnd;
         if (ddict->entropyPresent) {
-            dstDCtx->litEntropy = 1;
-            dstDCtx->fseEntropy = 1;
-            dstDCtx->LLTptr = ddict->entropy.LLTable;
-            dstDCtx->MLTptr = ddict->entropy.MLTable;
-            dstDCtx->OFTptr = ddict->entropy.OFTable;
-            dstDCtx->HUFptr = ddict->entropy.hufTable;
-            dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
-            dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
-            dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
+            dctx->litEntropy = 1;
+            dctx->fseEntropy = 1;
+            dctx->LLTptr = ddict->entropy.LLTable;
+            dctx->MLTptr = ddict->entropy.MLTable;
+            dctx->OFTptr = ddict->entropy.OFTable;
+            dctx->HUFptr = ddict->entropy.hufTable;
+            dctx->entropy.rep[0] = ddict->entropy.rep[0];
+            dctx->entropy.rep[1] = ddict->entropy.rep[1];
+            dctx->entropy.rep[2] = ddict->entropy.rep[2];
         } else {
-            dstDCtx->litEntropy = 0;
-            dstDCtx->fseEntropy = 0;
+            dctx->litEntropy = 0;
+            dctx->fseEntropy = 0;
         }
     }
     return 0;
 }
 
-static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict, ZSTD_dictContentType_e dictContentType)
+static size_t
+ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict,
+                         ZSTD_dictContentType_e dictContentType)
 {
     ddict->dictID = 0;
     ddict->entropyPresent = 0;
@@ -2355,10 +2440,12 @@
             return 0;   /* pure content mode */
         }
     }
-    ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_frameIdSize);
+    ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
 
     /* load entropy tables */
-    CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted );
+    CHECK_E( ZSTD_loadEntropy(&ddict->entropy,
+                              ddict->dictContent, ddict->dictSize),
+             dictionary_corrupted );
     ddict->entropyPresent = 1;
     return 0;
 }
@@ -2372,6 +2459,7 @@
     if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
         ddict->dictBuffer = NULL;
         ddict->dictContent = dict;
+        if (!dict) dictSize = 0;
     } else {
         void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
         ddict->dictBuffer = internalBuffer;
@@ -2396,14 +2484,15 @@
     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
 
     {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
-        if (!ddict) return NULL;
+        if (ddict == NULL) return NULL;
         ddict->cMem = customMem;
-
-        if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod, dictContentType) )) {
-            ZSTD_freeDDict(ddict);
-            return NULL;
-        }
-
+        {   size_t const initResult = ZSTD_initDDict_internal(ddict,
+                                            dict, dictSize,
+                                            dictLoadMethod, dictContentType);
+            if (ZSTD_isError(initResult)) {
+                ZSTD_freeDDict(ddict);
+                return NULL;
+        }   }
         return ddict;
     }
 }
@@ -2430,23 +2519,25 @@
 
 
 const ZSTD_DDict* ZSTD_initStaticDDict(
-                                void* workspace, size_t workspaceSize,
+                                void* sBuffer, size_t sBufferSize,
                                 const void* dict, size_t dictSize,
                                 ZSTD_dictLoadMethod_e dictLoadMethod,
                                 ZSTD_dictContentType_e dictContentType)
 {
-    size_t const neededSpace =
-            sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
-    ZSTD_DDict* const ddict = (ZSTD_DDict*)workspace;
-    assert(workspace != NULL);
+    size_t const neededSpace = sizeof(ZSTD_DDict)
+                             + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+    ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
+    assert(sBuffer != NULL);
     assert(dict != NULL);
-    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
-    if (workspaceSize < neededSpace) return NULL;
+    if ((size_t)sBuffer & 7) return NULL;   /* 8-aligned */
+    if (sBufferSize < neededSpace) return NULL;
     if (dictLoadMethod == ZSTD_dlm_byCopy) {
         memcpy(ddict+1, dict, dictSize);  /* local copy */
         dict = ddict+1;
     }
-    if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dlm_byRef, dictContentType) ))
+    if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
+                                              dict, dictSize,
+                                              ZSTD_dlm_byRef, dictContentType) ))
         return NULL;
     return ddict;
 }
@@ -2484,7 +2575,7 @@
 {
     if (dictSize < 8) return 0;
     if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
-    return MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
+    return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
 }
 
 /*! ZSTD_getDictID_fromDDict() :
@@ -2560,12 +2651,15 @@
 }
 
 
-/* *** Initialization *** */
+/* ***  Initialization  *** */
 
 size_t ZSTD_DStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
 size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
 
-size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
+size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
+                                   const void* dict, size_t dictSize,
+                                         ZSTD_dictLoadMethod_e dictLoadMethod,
+                                         ZSTD_dictContentType_e dictContentType)
 {
     if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
     ZSTD_freeDDict(dctx->ddictLocal);
@@ -2607,6 +2701,7 @@
 {
     DEBUGLOG(4, "ZSTD_initDStream_usingDict");
     zds->streamStage = zdss_init;
+    zds->noForwardProgress = 0;
     CHECK_F( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
     return ZSTD_frameHeaderSize_prefix;
 }
@@ -2618,13 +2713,6 @@
     return ZSTD_initDStream_usingDict(zds, NULL, 0);
 }
 
-size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
-{
-    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
-    dctx->ddict = ddict;
-    return 0;
-}
-
 /* ZSTD_initDStream_usingDDict() :
  * ddict will just be referenced, and must outlive decompression session
  * this function cannot fail */
@@ -2663,6 +2751,13 @@
     return 0;
 }
 
+size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+    dctx->ddict = ddict;
+    return 0;
+}
+
 size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
 {
     if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
@@ -2767,7 +2862,7 @@
                     return hint;
             }   }
 #endif
-            {   size_t const hSize = ZSTD_getFrameHeader_internal(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
+            {   size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
                 DEBUGLOG(5, "header size : %u", (U32)hSize);
                 if (ZSTD_isError(hSize)) {
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
@@ -2828,7 +2923,7 @@
             CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
 
             if ((MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {  /* skippable frame */
-                zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_frameIdSize);
+                zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
                 zds->stage = ZSTDds_skipFrame;
             } else {
                 CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
@@ -2947,8 +3042,18 @@
     }   }
 
     /* result */
-    input->pos += (size_t)(ip-istart);
-    output->pos += (size_t)(op-ostart);
+    input->pos = (size_t)(ip - (const char*)(input->src));
+    output->pos = (size_t)(op - (char*)(output->dst));
+    if ((ip==istart) && (op==ostart)) {  /* no forward progress */
+        zds->noForwardProgress ++;
+        if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
+            if (op==oend) return ERROR(dstSize_tooSmall);
+            if (ip==iend) return ERROR(srcSize_wrong);
+            assert(0);
+        }
+    } else {
+        zds->noForwardProgress = 0;
+    }
     {   size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
         if (!nextSrcSizeHint) {   /* frame fully decoded */
             if (zds->outEnd == zds->outStart) {  /* output fully flushed */
--- a/contrib/python-zstandard/zstd/dictBuilder/cover.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/dictBuilder/cover.c	Mon Oct 22 14:46:06 2018 -0400
@@ -29,6 +29,7 @@
 #include "mem.h" /* read */
 #include "pool.h"
 #include "threading.h"
+#include "cover.h"
 #include "zstd_internal.h" /* includes zstd.h */
 #ifndef ZDICT_STATIC_LINKING_ONLY
 #define ZDICT_STATIC_LINKING_ONLY
@@ -39,6 +40,7 @@
 *  Constants
 ***************************************/
 #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB))
+#define DEFAULT_SPLITPOINT 1.0
 
 /*-*************************************
 *  Console display
@@ -184,7 +186,7 @@
 }
 
 /**
- * Destroyes a map that is inited with COVER_map_init().
+ * Destroys a map that is inited with COVER_map_init().
  */
 static void COVER_map_destroy(COVER_map_t *map) {
   if (map->data) {
@@ -203,6 +205,8 @@
   size_t *offsets;
   const size_t *samplesSizes;
   size_t nbSamples;
+  size_t nbTrainSamples;
+  size_t nbTestSamples;
   U32 *suffix;
   size_t suffixSize;
   U32 *freqs;
@@ -220,9 +224,9 @@
 /**
  * Returns the sum of the sample sizes.
  */
-static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
+size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
   size_t sum = 0;
-  size_t i;
+  unsigned i;
   for (i = 0; i < nbSamples; ++i) {
     sum += samplesSizes[i];
   }
@@ -377,14 +381,6 @@
   ctx->suffix[dmerId] = freq;
 }
 
-/**
- * A segment is a range in the source as well as the score of the segment.
- */
-typedef struct {
-  U32 begin;
-  U32 end;
-  U32 score;
-} COVER_segment_t;
 
 /**
  * Selects the best segment in an epoch.
@@ -494,6 +490,10 @@
   if (parameters.d > parameters.k) {
     return 0;
   }
+  /* 0 < splitPoint <= 1 */
+  if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){
+    return 0;
+  }
   return 1;
 }
 
@@ -531,9 +531,14 @@
  */
 static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
                           const size_t *samplesSizes, unsigned nbSamples,
-                          unsigned d) {
+                          unsigned d, double splitPoint) {
   const BYTE *const samples = (const BYTE *)samplesBuffer;
   const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
+  /* Split samples into testing and training sets */
+  const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
+  const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
+  const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
+  const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
   /* Checks */
   if (totalSamplesSize < MAX(d, sizeof(U64)) ||
       totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
@@ -541,15 +546,29 @@
                  (U32)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
     return 0;
   }
+  /* Check if there are at least 5 training samples */
+  if (nbTrainSamples < 5) {
+    DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples);
+    return 0;
+  }
+  /* Check if there's testing sample */
+  if (nbTestSamples < 1) {
+    DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples);
+    return 0;
+  }
   /* Zero the context */
   memset(ctx, 0, sizeof(*ctx));
-  DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples,
-               (U32)totalSamplesSize);
+  DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
+               (U32)trainingSamplesSize);
+  DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
+               (U32)testSamplesSize);
   ctx->samples = samples;
   ctx->samplesSizes = samplesSizes;
   ctx->nbSamples = nbSamples;
+  ctx->nbTrainSamples = nbTrainSamples;
+  ctx->nbTestSamples = nbTestSamples;
   /* Partial suffix array */
-  ctx->suffixSize = totalSamplesSize - MAX(d, sizeof(U64)) + 1;
+  ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
   ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
   /* Maps index to the dmerID */
   ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
@@ -563,7 +582,7 @@
   ctx->freqs = NULL;
   ctx->d = d;
 
-  /* Fill offsets from the samlesSizes */
+  /* Fill offsets from the samplesSizes */
   {
     U32 i;
     ctx->offsets[0] = 0;
@@ -581,10 +600,17 @@
     for (i = 0; i < ctx->suffixSize; ++i) {
       ctx->suffix[i] = i;
     }
-    /* qsort doesn't take an opaque pointer, so pass as a global */
+    /* qsort doesn't take an opaque pointer, so pass as a global.
+     * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is.
+     */
     g_ctx = ctx;
+#if defined(__OpenBSD__)
+    mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
+          (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
+#else
     qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
           (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
+#endif
   }
   DISPLAYLEVEL(2, "Computing frequencies\n");
   /* For each dmer group (group of positions with the same first d bytes):
@@ -613,7 +639,7 @@
   /* Divide the data up into epochs of equal size.
    * We will select at least one segment from each epoch.
    */
-  const U32 epochs = (U32)(dictBufferCapacity / parameters.k);
+  const U32 epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k / 4));
   const U32 epochSize = (U32)(ctx->suffixSize / epochs);
   size_t epoch;
   DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs,
@@ -658,7 +684,7 @@
   BYTE* const dict = (BYTE*)dictBuffer;
   COVER_ctx_t ctx;
   COVER_map_t activeDmers;
-
+  parameters.splitPoint = 1.0;
   /* Initialize global data */
   g_displayLevel = parameters.zParams.notificationLevel;
   /* Checks */
@@ -677,7 +703,7 @@
   }
   /* Initialize context and activeDmers */
   if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
-                      parameters.d)) {
+                      parameters.d, parameters.splitPoint)) {
     return ERROR(GENERIC);
   }
   if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
@@ -704,28 +730,65 @@
   }
 }
 
-/**
- * COVER_best_t is used for two purposes:
- * 1. Synchronizing threads.
- * 2. Saving the best parameters and dictionary.
- *
- * All of the methods except COVER_best_init() are thread safe if zstd is
- * compiled with multithreaded support.
- */
-typedef struct COVER_best_s {
-  ZSTD_pthread_mutex_t mutex;
-  ZSTD_pthread_cond_t cond;
-  size_t liveJobs;
-  void *dict;
-  size_t dictSize;
-  ZDICT_cover_params_t parameters;
-  size_t compressedSize;
-} COVER_best_t;
+
+
+size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
+                                    const size_t *samplesSizes, const BYTE *samples,
+                                    size_t *offsets,
+                                    size_t nbTrainSamples, size_t nbSamples,
+                                    BYTE *const dict, size_t dictBufferCapacity) {
+  size_t totalCompressedSize = ERROR(GENERIC);
+  /* Pointers */
+  ZSTD_CCtx *cctx;
+  ZSTD_CDict *cdict;
+  void *dst;
+  /* Local variables */
+  size_t dstCapacity;
+  size_t i;
+  /* Allocate dst with enough space to compress the maximum sized sample */
+  {
+    size_t maxSampleSize = 0;
+    i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
+    for (; i < nbSamples; ++i) {
+      maxSampleSize = MAX(samplesSizes[i], maxSampleSize);
+    }
+    dstCapacity = ZSTD_compressBound(maxSampleSize);
+    dst = malloc(dstCapacity);
+  }
+  /* Create the cctx and cdict */
+  cctx = ZSTD_createCCtx();
+  cdict = ZSTD_createCDict(dict, dictBufferCapacity,
+                           parameters.zParams.compressionLevel);
+  if (!dst || !cctx || !cdict) {
+    goto _compressCleanup;
+  }
+  /* Compress each sample and sum their sizes (or error) */
+  totalCompressedSize = dictBufferCapacity;
+  i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
+  for (; i < nbSamples; ++i) {
+    const size_t size = ZSTD_compress_usingCDict(
+        cctx, dst, dstCapacity, samples + offsets[i],
+        samplesSizes[i], cdict);
+    if (ZSTD_isError(size)) {
+      totalCompressedSize = ERROR(GENERIC);
+      goto _compressCleanup;
+    }
+    totalCompressedSize += size;
+  }
+_compressCleanup:
+  ZSTD_freeCCtx(cctx);
+  ZSTD_freeCDict(cdict);
+  if (dst) {
+    free(dst);
+  }
+  return totalCompressedSize;
+}
+
 
 /**
  * Initialize the `COVER_best_t`.
  */
-static void COVER_best_init(COVER_best_t *best) {
+void COVER_best_init(COVER_best_t *best) {
   if (best==NULL) return; /* compatible with init on NULL */
   (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
   (void)ZSTD_pthread_cond_init(&best->cond, NULL);
@@ -739,7 +802,7 @@
 /**
  * Wait until liveJobs == 0.
  */
-static void COVER_best_wait(COVER_best_t *best) {
+void COVER_best_wait(COVER_best_t *best) {
   if (!best) {
     return;
   }
@@ -753,7 +816,7 @@
 /**
  * Call COVER_best_wait() and then destroy the COVER_best_t.
  */
-static void COVER_best_destroy(COVER_best_t *best) {
+void COVER_best_destroy(COVER_best_t *best) {
   if (!best) {
     return;
   }
@@ -769,7 +832,7 @@
  * Called when a thread is about to be launched.
  * Increments liveJobs.
  */
-static void COVER_best_start(COVER_best_t *best) {
+void COVER_best_start(COVER_best_t *best) {
   if (!best) {
     return;
   }
@@ -783,7 +846,7 @@
  * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
  * If this dictionary is the best so far save it and its parameters.
  */
-static void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
+void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
                               ZDICT_cover_params_t parameters, void *dict,
                               size_t dictSize) {
   if (!best) {
@@ -814,10 +877,10 @@
       best->parameters = parameters;
       best->compressedSize = compressedSize;
     }
-    ZSTD_pthread_mutex_unlock(&best->mutex);
     if (liveJobs == 0) {
       ZSTD_pthread_cond_broadcast(&best->cond);
     }
+    ZSTD_pthread_mutex_unlock(&best->mutex);
   }
 }
 
@@ -832,7 +895,7 @@
 } COVER_tryParameters_data_t;
 
 /**
- * Tries a set of parameters and upates the COVER_best_t with the results.
+ * Tries a set of parameters and updates the COVER_best_t with the results.
  * This function is thread safe if zstd is compiled with multithreaded support.
  * It takes its parameters as an *OWNING* opaque pointer to support threading.
  */
@@ -863,7 +926,7 @@
                                               dictBufferCapacity, parameters);
     dictBufferCapacity = ZDICT_finalizeDictionary(
         dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
-        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples,
+        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples,
         parameters.zParams);
     if (ZDICT_isError(dictBufferCapacity)) {
       DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
@@ -871,49 +934,10 @@
     }
   }
   /* Check total compressed size */
-  {
-    /* Pointers */
-    ZSTD_CCtx *cctx;
-    ZSTD_CDict *cdict;
-    void *dst;
-    /* Local variables */
-    size_t dstCapacity;
-    size_t i;
-    /* Allocate dst with enough space to compress the maximum sized sample */
-    {
-      size_t maxSampleSize = 0;
-      for (i = 0; i < ctx->nbSamples; ++i) {
-        maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize);
-      }
-      dstCapacity = ZSTD_compressBound(maxSampleSize);
-      dst = malloc(dstCapacity);
-    }
-    /* Create the cctx and cdict */
-    cctx = ZSTD_createCCtx();
-    cdict = ZSTD_createCDict(dict, dictBufferCapacity,
-                             parameters.zParams.compressionLevel);
-    if (!dst || !cctx || !cdict) {
-      goto _compressCleanup;
-    }
-    /* Compress each sample and sum their sizes (or error) */
-    totalCompressedSize = dictBufferCapacity;
-    for (i = 0; i < ctx->nbSamples; ++i) {
-      const size_t size = ZSTD_compress_usingCDict(
-          cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i],
-          ctx->samplesSizes[i], cdict);
-      if (ZSTD_isError(size)) {
-        totalCompressedSize = ERROR(GENERIC);
-        goto _compressCleanup;
-      }
-      totalCompressedSize += size;
-    }
-  _compressCleanup:
-    ZSTD_freeCCtx(cctx);
-    ZSTD_freeCDict(cdict);
-    if (dst) {
-      free(dst);
-    }
-  }
+  totalCompressedSize = COVER_checkTotalCompressedSize(parameters, ctx->samplesSizes,
+                                                       ctx->samples, ctx->offsets,
+                                                       ctx->nbTrainSamples, ctx->nbSamples,
+                                                       dict, dictBufferCapacity);
 
 _cleanup:
   COVER_best_finish(data->best, totalCompressedSize, parameters, dict,
@@ -934,6 +958,8 @@
     ZDICT_cover_params_t *parameters) {
   /* constants */
   const unsigned nbThreads = parameters->nbThreads;
+  const double splitPoint =
+      parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;
   const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
   const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
   const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
@@ -951,6 +977,10 @@
   POOL_ctx *pool = NULL;
 
   /* Checks */
+  if (splitPoint <= 0 || splitPoint > 1) {
+    LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
+    return ERROR(GENERIC);
+  }
   if (kMinK < kMaxD || kMaxK < kMinK) {
     LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
     return ERROR(GENERIC);
@@ -981,7 +1011,7 @@
     /* Initialize the context for this value of d */
     COVER_ctx_t ctx;
     LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
-    if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) {
+    if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint)) {
       LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
       COVER_best_destroy(&best);
       POOL_free(pool);
@@ -1006,6 +1036,7 @@
       data->parameters = *parameters;
       data->parameters.k = k;
       data->parameters.d = d;
+      data->parameters.splitPoint = splitPoint;
       data->parameters.steps = kSteps;
       data->parameters.zParams.notificationLevel = g_displayLevel;
       /* Check the parameters */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/dictBuilder/cover.h	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,83 @@
+#include <stdio.h>  /* fprintf */
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memset */
+#include <time.h>   /* clock */
+#include "mem.h" /* read */
+#include "pool.h"
+#include "threading.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+/**
+ * COVER_best_t is used for two purposes:
+ * 1. Synchronizing threads.
+ * 2. Saving the best parameters and dictionary.
+ *
+ * All of the methods except COVER_best_init() are thread safe if zstd is
+ * compiled with multithreaded support.
+ */
+typedef struct COVER_best_s {
+  ZSTD_pthread_mutex_t mutex;
+  ZSTD_pthread_cond_t cond;
+  size_t liveJobs;
+  void *dict;
+  size_t dictSize;
+  ZDICT_cover_params_t parameters;
+  size_t compressedSize;
+} COVER_best_t;
+
+/**
+ * A segment is a range in the source as well as the score of the segment.
+ */
+typedef struct {
+  U32 begin;
+  U32 end;
+  U32 score;
+} COVER_segment_t;
+
+/**
+ *  Checks total compressed size of a dictionary
+ */
+size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
+                                      const size_t *samplesSizes, const BYTE *samples,
+                                      size_t *offsets,
+                                      size_t nbTrainSamples, size_t nbSamples,
+                                      BYTE *const dict, size_t dictBufferCapacity);
+
+/**
+ * Returns the sum of the sample sizes.
+ */
+size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) ;
+
+/**
+ * Initialize the `COVER_best_t`.
+ */
+void COVER_best_init(COVER_best_t *best);
+
+/**
+ * Wait until liveJobs == 0.
+ */
+void COVER_best_wait(COVER_best_t *best);
+
+/**
+ * Call COVER_best_wait() and then destroy the COVER_best_t.
+ */
+void COVER_best_destroy(COVER_best_t *best);
+
+/**
+ * Called when a thread is about to be launched.
+ * Increments liveJobs.
+ */
+void COVER_best_start(COVER_best_t *best);
+
+/**
+ * Called when a thread finishes executing, both on error or success.
+ * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
+ * If this dictionary is the best so far save it and its parameters.
+ */
+void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
+                       ZDICT_cover_params_t parameters, void *dict,
+                       size_t dictSize);
--- a/contrib/python-zstandard/zstd/dictBuilder/divsufsort.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/dictBuilder/divsufsort.c	Mon Oct 22 14:46:06 2018 -0400
@@ -1637,7 +1637,7 @@
             if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
             k = SA + BUCKET_B(c2 = c0, c1);
           }
-          assert(k < j);
+          assert(k < j); assert(k != NULL);
           *k-- = s;
         } else {
           assert(((s == 0) && (T[s] == c1)) || (s < 0));
@@ -1701,7 +1701,7 @@
             if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
             k = SA + BUCKET_B(c2 = c0, c1);
           }
-          assert(k < j);
+          assert(k < j); assert(k != NULL);
           *k-- = s;
         } else if(s != 0) {
           *j = ~s;
@@ -1785,7 +1785,7 @@
             if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
             k = SA + BUCKET_B(c2 = c0, c1);
           }
-          assert(k < j);
+          assert(k < j); assert(k != NULL);
           *k-- = s;
         } else if(s != 0) {
           *j = ~s;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/dictBuilder/fastcover.c	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,728 @@
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <stdio.h>  /* fprintf */
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memset */
+#include <time.h>   /* clock */
+
+#include "mem.h" /* read */
+#include "pool.h"
+#include "threading.h"
+#include "cover.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+
+/*-*************************************
+*  Constants
+***************************************/
+#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB))
+#define FASTCOVER_MAX_F 31
+#define FASTCOVER_MAX_ACCEL 10
+#define DEFAULT_SPLITPOINT 0.75
+#define DEFAULT_F 20
+#define DEFAULT_ACCEL 1
+
+
+/*-*************************************
+*  Console display
+***************************************/
+static int g_displayLevel = 2;
+#define DISPLAY(...)                                                           \
+  {                                                                            \
+    fprintf(stderr, __VA_ARGS__);                                              \
+    fflush(stderr);                                                            \
+  }
+#define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \
+  if (displayLevel >= l) {                                                     \
+    DISPLAY(__VA_ARGS__);                                                      \
+  } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
+#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
+
+#define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \
+  if (displayLevel >= l) {                                                     \
+    if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) {             \
+      g_time = clock();                                                        \
+      DISPLAY(__VA_ARGS__);                                                    \
+    }                                                                          \
+  }
+#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
+static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
+static clock_t g_time = 0;
+
+
+/*-*************************************
+* Hash Functions
+***************************************/
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+
+
+/**
+ * Hash the d-byte value pointed to by p and mod 2^f
+ */
+static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) {
+  if (d == 6) {
+    return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1);
+  }
+  return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1);
+}
+
+
+/*-*************************************
+* Acceleration
+***************************************/
+typedef struct {
+  unsigned finalize;    /* Percentage of training samples used for ZDICT_finalizeDictionary */
+  unsigned skip;        /* Number of dmer skipped between each dmer counted in computeFrequency */
+} FASTCOVER_accel_t;
+
+
+static const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = {
+  { 100, 0 },   /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */
+  { 100, 0 },   /* accel = 1 */
+  { 50, 1 },   /* accel = 2 */
+  { 34, 2 },   /* accel = 3 */
+  { 25, 3 },   /* accel = 4 */
+  { 20, 4 },   /* accel = 5 */
+  { 17, 5 },   /* accel = 6 */
+  { 14, 6 },   /* accel = 7 */
+  { 13, 7 },   /* accel = 8 */
+  { 11, 8 },   /* accel = 9 */
+  { 10, 9 },   /* accel = 10 */
+};
+
+
+/*-*************************************
+* Context
+***************************************/
+typedef struct {
+  const BYTE *samples;
+  size_t *offsets;
+  const size_t *samplesSizes;
+  size_t nbSamples;
+  size_t nbTrainSamples;
+  size_t nbTestSamples;
+  size_t nbDmers;
+  U32 *freqs;
+  unsigned d;
+  unsigned f;
+  FASTCOVER_accel_t accelParams;
+} FASTCOVER_ctx_t;
+
+
+/*-*************************************
+*  Helper functions
+***************************************/
+/**
+ * Selects the best segment in an epoch.
+ * Segments of are scored according to the function:
+ *
+ * Let F(d) be the frequency of all dmers with hash value d.
+ * Let S_i be hash value of the dmer at position i of segment S which has length k.
+ *
+ *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
+ *
+ * Once the dmer with hash value d is in the dictionay we set F(d) = 0.
+ */
+static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx,
+                                              U32 *freqs, U32 begin, U32 end,
+                                              ZDICT_cover_params_t parameters,
+                                              U16* segmentFreqs) {
+  /* Constants */
+  const U32 k = parameters.k;
+  const U32 d = parameters.d;
+  const U32 f = ctx->f;
+  const U32 dmersInK = k - d + 1;
+
+  /* Try each segment (activeSegment) and save the best (bestSegment) */
+  COVER_segment_t bestSegment = {0, 0, 0};
+  COVER_segment_t activeSegment;
+
+  /* Reset the activeDmers in the segment */
+  /* The activeSegment starts at the beginning of the epoch. */
+  activeSegment.begin = begin;
+  activeSegment.end = begin;
+  activeSegment.score = 0;
+
+  /* Slide the activeSegment through the whole epoch.
+   * Save the best segment in bestSegment.
+   */
+  while (activeSegment.end < end) {
+    /* Get hash value of current dmer */
+    const size_t index = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d);
+
+    /* Add frequency of this index to score if this is the first occurence of index in active segment */
+    if (segmentFreqs[index] == 0) {
+      activeSegment.score += freqs[index];
+    }
+    /* Increment end of segment and segmentFreqs*/
+    activeSegment.end += 1;
+    segmentFreqs[index] += 1;
+    /* If the window is now too large, drop the first position */
+    if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
+      /* Get hash value of the dmer to be eliminated from active segment */
+      const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
+      segmentFreqs[delIndex] -= 1;
+      /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */
+      if (segmentFreqs[delIndex] == 0) {
+        activeSegment.score -= freqs[delIndex];
+      }
+      /* Increment start of segment */
+      activeSegment.begin += 1;
+    }
+
+    /* If this segment is the best so far save it */
+    if (activeSegment.score > bestSegment.score) {
+      bestSegment = activeSegment;
+    }
+  }
+
+  /* Zero out rest of segmentFreqs array */
+  while (activeSegment.begin < end) {
+    const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d);
+    segmentFreqs[delIndex] -= 1;
+    activeSegment.begin += 1;
+  }
+
+  {
+    /*  Zero the frequency of hash value of each dmer covered by the chosen segment. */
+    U32 pos;
+    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+      const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d);
+      freqs[i] = 0;
+    }
+  }
+
+  return bestSegment;
+}
+
+
+static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters,
+                                     size_t maxDictSize, unsigned f,
+                                     unsigned accel) {
+  /* k, d, and f are required parameters */
+  if (parameters.d == 0 || parameters.k == 0) {
+    return 0;
+  }
+  /* d has to be 6 or 8 */
+  if (parameters.d != 6 && parameters.d != 8) {
+    return 0;
+  }
+  /* k <= maxDictSize */
+  if (parameters.k > maxDictSize) {
+    return 0;
+  }
+  /* d <= k */
+  if (parameters.d > parameters.k) {
+    return 0;
+  }
+  /* 0 < f <= FASTCOVER_MAX_F*/
+  if (f > FASTCOVER_MAX_F || f == 0) {
+    return 0;
+  }
+  /* 0 < splitPoint <= 1 */
+  if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) {
+    return 0;
+  }
+  /* 0 < accel <= 10 */
+  if (accel > 10 || accel == 0) {
+    return 0;
+  }
+  return 1;
+}
+
+
+/**
+ * Clean up a context initialized with `FASTCOVER_ctx_init()`.
+ */
+static void
+FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx)
+{
+    if (!ctx) return;
+
+    free(ctx->freqs);
+    ctx->freqs = NULL;
+
+    free(ctx->offsets);
+    ctx->offsets = NULL;
+}
+
+
+/**
+ * Calculate for frequency of hash value of each dmer in ctx->samples
+ */
+static void
+FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx)
+{
+    const unsigned f = ctx->f;
+    const unsigned d = ctx->d;
+    const unsigned skip = ctx->accelParams.skip;
+    const unsigned readLength = MAX(d, 8);
+    size_t i;
+    assert(ctx->nbTrainSamples >= 5);
+    assert(ctx->nbTrainSamples <= ctx->nbSamples);
+    for (i = 0; i < ctx->nbTrainSamples; i++) {
+        size_t start = ctx->offsets[i];  /* start of current dmer */
+        size_t const currSampleEnd = ctx->offsets[i+1];
+        while (start + readLength <= currSampleEnd) {
+            const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d);
+            freqs[dmerIndex]++;
+            start = start + skip + 1;
+        }
+    }
+}
+
+
+/**
+ * Prepare a context for dictionary building.
+ * The context is only dependent on the parameter `d` and can used multiple
+ * times.
+ * Returns 1 on success or zero on error.
+ * The context must be destroyed with `FASTCOVER_ctx_destroy()`.
+ */
+static int
+FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx,
+                   const void* samplesBuffer,
+                   const size_t* samplesSizes, unsigned nbSamples,
+                   unsigned d, double splitPoint, unsigned f,
+                   FASTCOVER_accel_t accelParams)
+{
+    const BYTE* const samples = (const BYTE*)samplesBuffer;
+    const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
+    /* Split samples into testing and training sets */
+    const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
+    const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
+    const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
+    const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
+
+    /* Checks */
+    if (totalSamplesSize < MAX(d, sizeof(U64)) ||
+        totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) {
+        DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
+                    (U32)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20));
+        return 0;
+    }
+
+    /* Check if there are at least 5 training samples */
+    if (nbTrainSamples < 5) {
+        DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples);
+        return 0;
+    }
+
+    /* Check if there's testing sample */
+    if (nbTestSamples < 1) {
+        DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples);
+        return 0;
+    }
+
+    /* Zero the context */
+    memset(ctx, 0, sizeof(*ctx));
+    DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
+                    (U32)trainingSamplesSize);
+    DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
+                    (U32)testSamplesSize);
+
+    ctx->samples = samples;
+    ctx->samplesSizes = samplesSizes;
+    ctx->nbSamples = nbSamples;
+    ctx->nbTrainSamples = nbTrainSamples;
+    ctx->nbTestSamples = nbTestSamples;
+    ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
+    ctx->d = d;
+    ctx->f = f;
+    ctx->accelParams = accelParams;
+
+    /* The offsets of each file */
+    ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t));
+    if (ctx->offsets == NULL) {
+        DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n");
+        FASTCOVER_ctx_destroy(ctx);
+        return 0;
+    }
+
+    /* Fill offsets from the samplesSizes */
+    {   U32 i;
+        ctx->offsets[0] = 0;
+        assert(nbSamples >= 5);
+        for (i = 1; i <= nbSamples; ++i) {
+            ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
+        }
+    }
+
+    /* Initialize frequency array of size 2^f */
+    ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32));
+    if (ctx->freqs == NULL) {
+        DISPLAYLEVEL(1, "Failed to allocate frequency table \n");
+        FASTCOVER_ctx_destroy(ctx);
+        return 0;
+    }
+
+    DISPLAYLEVEL(2, "Computing frequencies\n");
+    FASTCOVER_computeFrequency(ctx->freqs, ctx);
+
+    return 1;
+}
+
+
+/**
+ * Given the prepared context build the dictionary.
+ */
+static size_t
+FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx,
+                          U32* freqs,
+                          void* dictBuffer, size_t dictBufferCapacity,
+                          ZDICT_cover_params_t parameters,
+                          U16* segmentFreqs)
+{
+  BYTE *const dict = (BYTE *)dictBuffer;
+  size_t tail = dictBufferCapacity;
+  /* Divide the data up into epochs of equal size.
+   * We will select at least one segment from each epoch.
+   */
+  const U32 epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k));
+  const U32 epochSize = (U32)(ctx->nbDmers / epochs);
+  size_t epoch;
+  DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs,
+               epochSize);
+  /* Loop through the epochs until there are no more segments or the dictionary
+   * is full.
+   */
+  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) {
+    const U32 epochBegin = (U32)(epoch * epochSize);
+    const U32 epochEnd = epochBegin + epochSize;
+    size_t segmentSize;
+    /* Select a segment */
+    COVER_segment_t segment = FASTCOVER_selectSegment(
+        ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs);
+
+    /* If the segment covers no dmers, then we are out of content */
+    if (segment.score == 0) {
+      break;
+    }
+
+    /* Trim the segment if necessary and if it is too small then we are done */
+    segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
+    if (segmentSize < parameters.d) {
+      break;
+    }
+
+    /* We fill the dictionary from the back to allow the best segments to be
+     * referenced with the smallest offsets.
+     */
+    tail -= segmentSize;
+    memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
+    DISPLAYUPDATE(
+        2, "\r%u%%       ",
+        (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
+  }
+  DISPLAYLEVEL(2, "\r%79s\r", "");
+  return tail;
+}
+
+
+/**
+ * Parameters for FASTCOVER_tryParameters().
+ */
+typedef struct FASTCOVER_tryParameters_data_s {
+    const FASTCOVER_ctx_t* ctx;
+    COVER_best_t* best;
+    size_t dictBufferCapacity;
+    ZDICT_cover_params_t parameters;
+} FASTCOVER_tryParameters_data_t;
+
+
+/**
+ * Tries a set of parameters and updates the COVER_best_t with the results.
+ * This function is thread safe if zstd is compiled with multithreaded support.
+ * It takes its parameters as an *OWNING* opaque pointer to support threading.
+ */
+static void FASTCOVER_tryParameters(void *opaque)
+{
+  /* Save parameters as local variables */
+  FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque;
+  const FASTCOVER_ctx_t *const ctx = data->ctx;
+  const ZDICT_cover_params_t parameters = data->parameters;
+  size_t dictBufferCapacity = data->dictBufferCapacity;
+  size_t totalCompressedSize = ERROR(GENERIC);
+  /* Initialize array to keep track of frequency of dmer within activeSegment */
+  U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16));
+  /* Allocate space for hash table, dict, and freqs */
+  BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+  U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32));
+  if (!segmentFreqs || !dict || !freqs) {
+    DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
+    goto _cleanup;
+  }
+  /* Copy the frequencies because we need to modify them */
+  memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32));
+  /* Build the dictionary */
+  { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity,
+                                                  parameters, segmentFreqs);
+    const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100);
+    dictBufferCapacity = ZDICT_finalizeDictionary(
+        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+        ctx->samples, ctx->samplesSizes, nbFinalizeSamples, parameters.zParams);
+    if (ZDICT_isError(dictBufferCapacity)) {
+      DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
+      goto _cleanup;
+    }
+  }
+  /* Check total compressed size */
+  totalCompressedSize = COVER_checkTotalCompressedSize(parameters, ctx->samplesSizes,
+                                                       ctx->samples, ctx->offsets,
+                                                       ctx->nbTrainSamples, ctx->nbSamples,
+                                                       dict, dictBufferCapacity);
+_cleanup:
+  COVER_best_finish(data->best, totalCompressedSize, parameters, dict,
+                    dictBufferCapacity);
+  free(data);
+  free(segmentFreqs);
+  free(dict);
+  free(freqs);
+}
+
+
+static void
+FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams,
+                               ZDICT_cover_params_t* coverParams)
+{
+    coverParams->k = fastCoverParams.k;
+    coverParams->d = fastCoverParams.d;
+    coverParams->steps = fastCoverParams.steps;
+    coverParams->nbThreads = fastCoverParams.nbThreads;
+    coverParams->splitPoint = fastCoverParams.splitPoint;
+    coverParams->zParams = fastCoverParams.zParams;
+}
+
+
+static void
+FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams,
+                                   ZDICT_fastCover_params_t* fastCoverParams,
+                                   unsigned f, unsigned accel)
+{
+    fastCoverParams->k = coverParams.k;
+    fastCoverParams->d = coverParams.d;
+    fastCoverParams->steps = coverParams.steps;
+    fastCoverParams->nbThreads = coverParams.nbThreads;
+    fastCoverParams->splitPoint = coverParams.splitPoint;
+    fastCoverParams->f = f;
+    fastCoverParams->accel = accel;
+    fastCoverParams->zParams = coverParams.zParams;
+}
+
+
+ZDICTLIB_API size_t
+ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity,
+                                const void* samplesBuffer,
+                                const size_t* samplesSizes, unsigned nbSamples,
+                                ZDICT_fastCover_params_t parameters)
+{
+    BYTE* const dict = (BYTE*)dictBuffer;
+    FASTCOVER_ctx_t ctx;
+    ZDICT_cover_params_t coverParams;
+    FASTCOVER_accel_t accelParams;
+    /* Initialize global data */
+    g_displayLevel = parameters.zParams.notificationLevel;
+    /* Assign splitPoint and f if not provided */
+    parameters.splitPoint = 1.0;
+    parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f;
+    parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel;
+    /* Convert to cover parameter */
+    memset(&coverParams, 0 , sizeof(coverParams));
+    FASTCOVER_convertToCoverParams(parameters, &coverParams);
+    /* Checks */
+    if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f,
+                                   parameters.accel)) {
+      DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n");
+      return ERROR(GENERIC);
+    }
+    if (nbSamples == 0) {
+      DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n");
+      return ERROR(GENERIC);
+    }
+    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+      DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+                   ZDICT_DICTSIZE_MIN);
+      return ERROR(dstSize_tooSmall);
+    }
+    /* Assign corresponding FASTCOVER_accel_t to accelParams*/
+    accelParams = FASTCOVER_defaultAccelParameters[parameters.accel];
+    /* Initialize context */
+    if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
+                            coverParams.d, parameters.splitPoint, parameters.f,
+                            accelParams)) {
+      DISPLAYLEVEL(1, "Failed to initialize context\n");
+      return ERROR(GENERIC);
+    }
+    /* Build the dictionary */
+    DISPLAYLEVEL(2, "Building dictionary\n");
+    {
+      /* Initialize array to keep track of frequency of dmer within activeSegment */
+      U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16));
+      const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer,
+                                                dictBufferCapacity, coverParams, segmentFreqs);
+      const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100);
+      const size_t dictionarySize = ZDICT_finalizeDictionary(
+          dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+          samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams);
+      if (!ZSTD_isError(dictionarySize)) {
+          DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
+                      (U32)dictionarySize);
+      }
+      FASTCOVER_ctx_destroy(&ctx);
+      free(segmentFreqs);
+      return dictionarySize;
+    }
+}
+
+
+ZDICTLIB_API size_t
+ZDICT_optimizeTrainFromBuffer_fastCover(
+                    void* dictBuffer, size_t dictBufferCapacity,
+                    const void* samplesBuffer,
+                    const size_t* samplesSizes, unsigned nbSamples,
+                    ZDICT_fastCover_params_t* parameters)
+{
+    ZDICT_cover_params_t coverParams;
+    FASTCOVER_accel_t accelParams;
+    /* constants */
+    const unsigned nbThreads = parameters->nbThreads;
+    const double splitPoint =
+        parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint;
+    const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
+    const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
+    const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
+    const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
+    const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
+    const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
+    const unsigned kIterations =
+        (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
+    const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f;
+    const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel;
+    /* Local variables */
+    const int displayLevel = parameters->zParams.notificationLevel;
+    unsigned iteration = 1;
+    unsigned d;
+    unsigned k;
+    COVER_best_t best;
+    POOL_ctx *pool = NULL;
+    /* Checks */
+    if (splitPoint <= 0 || splitPoint > 1) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n");
+      return ERROR(GENERIC);
+    }
+    if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n");
+      return ERROR(GENERIC);
+    }
+    if (kMinK < kMaxD || kMaxK < kMinK) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n");
+      return ERROR(GENERIC);
+    }
+    if (nbSamples == 0) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n");
+      return ERROR(GENERIC);
+    }
+    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n",
+                   ZDICT_DICTSIZE_MIN);
+      return ERROR(dstSize_tooSmall);
+    }
+    if (nbThreads > 1) {
+      pool = POOL_create(nbThreads, 1);
+      if (!pool) {
+        return ERROR(memory_allocation);
+      }
+    }
+    /* Initialization */
+    COVER_best_init(&best);
+    memset(&coverParams, 0 , sizeof(coverParams));
+    FASTCOVER_convertToCoverParams(*parameters, &coverParams);
+    accelParams = FASTCOVER_defaultAccelParameters[accel];
+    /* Turn down global display level to clean up display at level 2 and below */
+    g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
+    /* Loop through d first because each new value needs a new context */
+    LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
+                      kIterations);
+    for (d = kMinD; d <= kMaxD; d += 2) {
+      /* Initialize the context for this value of d */
+      FASTCOVER_ctx_t ctx;
+      LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
+      if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams)) {
+        LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
+        COVER_best_destroy(&best);
+        POOL_free(pool);
+        return ERROR(GENERIC);
+      }
+      /* Loop through k reusing the same context */
+      for (k = kMinK; k <= kMaxK; k += kStepSize) {
+        /* Prepare the arguments */
+        FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc(
+            sizeof(FASTCOVER_tryParameters_data_t));
+        LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
+        if (!data) {
+          LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
+          COVER_best_destroy(&best);
+          FASTCOVER_ctx_destroy(&ctx);
+          POOL_free(pool);
+          return ERROR(GENERIC);
+        }
+        data->ctx = &ctx;
+        data->best = &best;
+        data->dictBufferCapacity = dictBufferCapacity;
+        data->parameters = coverParams;
+        data->parameters.k = k;
+        data->parameters.d = d;
+        data->parameters.splitPoint = splitPoint;
+        data->parameters.steps = kSteps;
+        data->parameters.zParams.notificationLevel = g_displayLevel;
+        /* Check the parameters */
+        if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity,
+                                       data->ctx->f, accel)) {
+          DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n");
+          free(data);
+          continue;
+        }
+        /* Call the function and pass ownership of data to it */
+        COVER_best_start(&best);
+        if (pool) {
+          POOL_add(pool, &FASTCOVER_tryParameters, data);
+        } else {
+          FASTCOVER_tryParameters(data);
+        }
+        /* Print status */
+        LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%%       ",
+                           (U32)((iteration * 100) / kIterations));
+        ++iteration;
+      }
+      COVER_best_wait(&best);
+      FASTCOVER_ctx_destroy(&ctx);
+    }
+    LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
+    /* Fill the output buffer and parameters with output of the best parameters */
+    {
+      const size_t dictSize = best.dictSize;
+      if (ZSTD_isError(best.compressedSize)) {
+        const size_t compressedSize = best.compressedSize;
+        COVER_best_destroy(&best);
+        POOL_free(pool);
+        return compressedSize;
+      }
+      FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel);
+      memcpy(dictBuffer, best.dict, dictSize);
+      COVER_best_destroy(&best);
+      POOL_free(pool);
+      return dictSize;
+    }
+
+}
--- a/contrib/python-zstandard/zstd/dictBuilder/zdict.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.c	Mon Oct 22 14:46:06 2018 -0400
@@ -293,7 +293,7 @@
             refinedEnd = refinedStart + selectedCount;
         }
 
-        /* evaluate gain based on new ref */
+        /* evaluate gain based on new dict */
         start = refinedStart;
         pos = suffix[refinedStart];
         end = start;
@@ -341,7 +341,7 @@
         for (i=MINMATCHLENGTH; i<=(int)maxLength; i++)
             savings[i] = savings[i-1] + (lengthList[i] * (i-3));
 
-        DISPLAYLEVEL(4, "Selected ref at position %u, of length %u : saves %u (ratio: %.2f)  \n",
+        DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f)  \n",
                      (U32)pos, (U32)maxLength, savings[maxLength], (double)savings[maxLength] / maxLength);
 
         solution.pos = (U32)pos;
@@ -581,7 +581,7 @@
 
 typedef struct
 {
-    ZSTD_CCtx* ref;    /* contains reference to dictionary */
+    ZSTD_CDict* dict;    /* dictionary */
     ZSTD_CCtx* zc;     /* working context */
     void* workPlace;   /* must be ZSTD_BLOCKSIZE_MAX allocated */
 } EStats_ress_t;
@@ -597,8 +597,9 @@
     size_t cSize;
 
     if (srcSize > blockSizeMax) srcSize = blockSizeMax;   /* protection vs large samples */
-    {   size_t const errorCode = ZSTD_copyCCtx(esr.zc, esr.ref, 0);
-        if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_copyCCtx failed \n"); return; }
+    {   size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict);
+        if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; }
+
     }
     cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
     if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; }
@@ -697,7 +698,7 @@
     short litLengthNCount[MaxLL+1];
     U32 repOffset[MAXREPOFFSET];
     offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];
-    EStats_ress_t esr;
+    EStats_ress_t esr = { NULL, NULL, NULL };
     ZSTD_parameters params;
     U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total;
     size_t pos = 0, errorCode;
@@ -708,14 +709,6 @@
 
     /* init */
     DEBUGLOG(4, "ZDICT_analyzeEntropy");
-    esr.ref = ZSTD_createCCtx();
-    esr.zc = ZSTD_createCCtx();
-    esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);
-    if (!esr.ref || !esr.zc || !esr.workPlace) {
-        eSize = ERROR(memory_allocation);
-        DISPLAYLEVEL(1, "Not enough memory \n");
-        goto _cleanup;
-    }
     if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; }   /* too large dictionary */
     for (u=0; u<256; u++) countLit[u] = 1;   /* any character must be described */
     for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;
@@ -724,14 +717,17 @@
     memset(repOffset, 0, sizeof(repOffset));
     repOffset[1] = repOffset[4] = repOffset[8] = 1;
     memset(bestRepOffset, 0, sizeof(bestRepOffset));
-    if (compressionLevel<=0) compressionLevel = g_compressionLevel_default;
+    if (compressionLevel==0) compressionLevel = g_compressionLevel_default;
     params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);
-    {   size_t const beginResult = ZSTD_compressBegin_advanced(esr.ref, dictBuffer, dictBufferSize, params, 0);
-        if (ZSTD_isError(beginResult)) {
-            DISPLAYLEVEL(1, "error : ZSTD_compressBegin_advanced() failed : %s \n", ZSTD_getErrorName(beginResult));
-            eSize = ERROR(GENERIC);
-            goto _cleanup;
-    }   }
+
+    esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem);
+    esr.zc = ZSTD_createCCtx();
+    esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);
+    if (!esr.dict || !esr.zc || !esr.workPlace) {
+        eSize = ERROR(memory_allocation);
+        DISPLAYLEVEL(1, "Not enough memory \n");
+        goto _cleanup;
+    }
 
     /* collect stats on all samples */
     for (u=0; u<nbFiles; u++) {
@@ -856,7 +852,7 @@
     eSize += 12;
 
 _cleanup:
-    ZSTD_freeCCtx(esr.ref);
+    ZSTD_freeCDict(esr.dict);
     ZSTD_freeCCtx(esr.zc);
     free(esr.workPlace);
 
@@ -867,13 +863,13 @@
 
 size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
                           const void* customDictContent, size_t dictContentSize,
-                          const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
-                          ZDICT_params_t params)
+                          const void* samplesBuffer, const size_t* samplesSizes,
+                          unsigned nbSamples, ZDICT_params_t params)
 {
     size_t hSize;
 #define HBUFFSIZE 256   /* should prove large enough for all entropy headers */
     BYTE header[HBUFFSIZE];
-    int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel;
+    int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
     U32 const notificationLevel = params.notificationLevel;
 
     /* check conditions */
@@ -914,11 +910,12 @@
 }
 
 
-size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
-                                                 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
-                                                 ZDICT_params_t params)
+static size_t ZDICT_addEntropyTablesFromBuffer_advanced(
+        void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
+        const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+        ZDICT_params_t params)
 {
-    int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel;
+    int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
     U32 const notificationLevel = params.notificationLevel;
     size_t hSize = 8;
 
@@ -947,7 +944,11 @@
     return MIN(dictBufferCapacity, hSize+dictContentSize);
 }
 
-
+/* Hidden declaration for dbio.c */
+size_t ZDICT_trainFromBuffer_unsafe_legacy(
+                            void* dictBuffer, size_t maxDictSize,
+                            const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                            ZDICT_legacy_params_t params);
 /*! ZDICT_trainFromBuffer_unsafe_legacy() :
 *   Warning : `samplesBuffer` must be followed by noisy guard band.
 *   @return : size of dictionary, or an error code which can be tested with ZDICT_isError()
@@ -991,8 +992,10 @@
             U32 const pos = dictList[u].pos;
             U32 const length = dictList[u].length;
             U32 const printedLength = MIN(40, length);
-            if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize))
+            if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) {
+                free(dictList);
                 return ERROR(GENERIC);   /* should never happen */
+            }
             DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",
                          u, length, pos, dictList[u].savings);
             ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);
@@ -1082,17 +1085,17 @@
 size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
                              const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
 {
-    ZDICT_cover_params_t params;
+    ZDICT_fastCover_params_t params;
     DEBUGLOG(3, "ZDICT_trainFromBuffer");
     memset(&params, 0, sizeof(params));
     params.d = 8;
     params.steps = 4;
     /* Default to level 6 since no compression level information is available */
-    params.zParams.compressionLevel = 6;
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
-    params.zParams.notificationLevel = ZSTD_DEBUG;
+    params.zParams.compressionLevel = 3;
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1)
+    params.zParams.notificationLevel = DEBUGLEVEL;
 #endif
-    return ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, dictBufferCapacity,
+    return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity,
                                                samplesBuffer, samplesSizes, nbSamples,
                                                &params);
 }
--- a/contrib/python-zstandard/zstd/dictBuilder/zdict.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.h	Mon Oct 22 14:46:06 2018 -0400
@@ -39,7 +39,8 @@
 
 /*! ZDICT_trainFromBuffer():
  *  Train a dictionary from an array of samples.
- *  Redirect towards ZDICT_optimizeTrainFromBuffer_cover() single-threaded, with d=8 and steps=4.
+ *  Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,
+ *  f=20, and accel=1.
  *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
  *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
  *  The resulting dictionary will be saved into `dictBuffer`.
@@ -52,7 +53,8 @@
  *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
  */
 ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
-                                    const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
+                                    const void* samplesBuffer,
+                                    const size_t* samplesSizes, unsigned nbSamples);
 
 
 /*======   Helper functions   ======*/
@@ -84,11 +86,22 @@
 typedef struct {
     unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
     unsigned d;                  /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
-    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */
+    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
     unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
+    double splitPoint;           /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */
     ZDICT_params_t zParams;
 } ZDICT_cover_params_t;
 
+typedef struct {
+    unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
+    unsigned d;                  /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
+    unsigned f;                  /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/
+    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
+    unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
+    double splitPoint;           /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */
+    unsigned accel;              /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */
+    ZDICT_params_t zParams;
+} ZDICT_fastCover_params_t;
 
 /*! ZDICT_trainFromBuffer_cover():
  *  Train a dictionary from an array of samples using the COVER algorithm.
@@ -115,9 +128,9 @@
  * dictionary constructed with those parameters is stored in `dictBuffer`.
  *
  * All of the parameters d, k, steps are optional.
- * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}.
+ * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
  * if steps is zero it defaults to its default value.
- * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048].
+ * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
  *
  * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
  *           or an error code, which can be tested with ZDICT_isError().
@@ -129,6 +142,48 @@
     const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
           ZDICT_cover_params_t* parameters);
 
+/*! ZDICT_trainFromBuffer_fastCover():
+ *  Train a dictionary from an array of samples using a modified version of COVER algorithm.
+ *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ *  d and k are required.
+ *  All other parameters are optional, will use default values if not provided
+ *  The resulting dictionary will be saved into `dictBuffer`.
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ *          or an error code, which can be tested with ZDICT_isError().
+ *  Note: ZDICT_trainFromBuffer_fastCover() requires about 1 bytes of memory for each input byte and additionally another 6 * 2^f bytes of memory .
+ *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ */
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer,
+                    size_t dictBufferCapacity, const void *samplesBuffer,
+                    const size_t *samplesSizes, unsigned nbSamples,
+                    ZDICT_fastCover_params_t parameters);
+
+/*! ZDICT_optimizeTrainFromBuffer_fastCover():
+ * The same requirements as above hold for all the parameters except `parameters`.
+ * This function tries many parameter combinations (specifically, k and d combinations)
+ * and picks the best parameters. `*parameters` is filled with the best parameters found,
+ * dictionary constructed with those parameters is stored in `dictBuffer`.
+ * All of the parameters d, k, steps, f, and accel are optional.
+ * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
+ * if steps is zero it defaults to its default value.
+ * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
+ * If f is zero, default value of 20 is used.
+ * If accel is zero, default value of 1 is used.
+ *
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ *           or an error code, which can be tested with ZDICT_isError().
+ *           On success `*parameters` contains the parameters selected.
+ * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 1 byte of memory for each input byte and additionally another 6 * 2^f bytes of memory for each thread.
+ */
+ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer,
+                    size_t dictBufferCapacity, const void* samplesBuffer,
+                    const size_t* samplesSizes, unsigned nbSamples,
+                    ZDICT_fastCover_params_t* parameters);
+
 /*! ZDICT_finalizeDictionary():
  * Given a custom content as a basis for dictionary, and a set of samples,
  * finalize dictionary by adding headers and statistics.
--- a/contrib/python-zstandard/zstd/zstd.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd/zstd.h	Mon Oct 22 14:46:06 2018 -0400
@@ -35,31 +35,43 @@
 #endif
 
 
-/*******************************************************************************************************
+/*******************************************************************************
   Introduction
 
-  zstd, short for Zstandard, is a fast lossless compression algorithm,
-  targeting real-time compression scenarios at zlib-level and better compression ratios.
-  The zstd compression library provides in-memory compression and decompression functions.
-  The library supports compression levels from 1 up to ZSTD_maxCLevel() which is currently 22.
-  Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory.
+  zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
+  real-time compression scenarios at zlib-level and better compression ratios.
+  The zstd compression library provides in-memory compression and decompression
+  functions.
+
+  The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
+  which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
+  caution, as they require more memory. The library also offers negative
+  compression levels, which extend the range of speed vs. ratio preferences.
+  The lower the level, the faster the speed (at the cost of compression).
+
   Compression can be done in:
     - a single step (described as Simple API)
     - a single step, reusing a context (described as Explicit context)
     - unbounded multiple steps (described as Streaming compression)
-  The compression ratio achievable on small data can be highly improved using a dictionary in:
+
+  The compression ratio achievable on small data can be highly improved using
+  a dictionary. Dictionary compression can be performed in:
     - a single step (described as Simple dictionary API)
-    - a single step, reusing a dictionary (described as Bulk-processing dictionary API)
+    - a single step, reusing a dictionary (described as Bulk-processing
+      dictionary API)
 
-  Advanced experimental functions can be accessed using #define ZSTD_STATIC_LINKING_ONLY before including zstd.h.
-  Advanced experimental APIs shall never be used with a dynamic library.
-  They are not "stable", their definition may change in the future. Only static linking is allowed.
-*********************************************************************************************************/
+  Advanced experimental functions can be accessed using
+  `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
+
+  Advanced experimental APIs should never be used with a dynamically-linked
+  library. They are not "stable"; their definitions or signatures may change in
+  the future. Only static linking is allowed.
+*******************************************************************************/
 
 /*------   Version   ------*/
 #define ZSTD_VERSION_MAJOR    1
 #define ZSTD_VERSION_MINOR    3
-#define ZSTD_VERSION_RELEASE  4
+#define ZSTD_VERSION_RELEASE  6
 
 #define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
 ZSTDLIB_API unsigned ZSTD_versionNumber(void);   /**< useful to check dll version */
@@ -68,8 +80,14 @@
 #define ZSTD_QUOTE(str) #str
 #define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
 #define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
-ZSTDLIB_API const char* ZSTD_versionString(void);   /* added in v1.3.0 */
+ZSTDLIB_API const char* ZSTD_versionString(void);   /* v1.3.0+ */
 
+/***************************************
+*  Default constant
+***************************************/
+#ifndef ZSTD_CLEVEL_DEFAULT
+#  define ZSTD_CLEVEL_DEFAULT 3
+#endif
 
 /***************************************
 *  Simple API
@@ -96,7 +114,7 @@
  *  `src` should point to the start of a ZSTD encoded frame.
  *  `srcSize` must be at least as large as the frame header.
  *            hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
- *  @return : - decompressed size of the frame in `src`, if known
+ *  @return : - decompressed size of `src` frame content, if known
  *            - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
  *            - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
  *   note 1 : a 0 return value means the frame is valid but "empty".
@@ -106,7 +124,8 @@
  *            Optionally, application can rely on some implicit limit,
  *            as ZSTD_decompress() only needs an upper bound of decompressed size.
  *            (For example, data could be necessarily cut into blocks <= 16 KB).
- *   note 3 : decompressed size is always present when compression is done with ZSTD_compress()
+ *   note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ *            such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
  *   note 4 : decompressed size can be very large (64-bits value),
  *            potentially larger than what local system can handle as a single memory segment.
  *            In which case, it's necessary to use streaming mode to decompress data.
@@ -123,8 +142,7 @@
  *  Both functions work the same way, but ZSTD_getDecompressedSize() blends
  *  "empty", "unknown" and "error" results to the same return value (0),
  *  while ZSTD_getFrameContentSize() gives them separate return values.
- * `src` is the start of a zstd compressed frame.
- * @return : content size to be decompressed, as a 64-bits value _if known and not empty_, 0 otherwise. */
+ * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
 ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
 
 
@@ -205,7 +223,8 @@
  *  When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
  *  ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
  *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- *  `dictBuffer` can be released after ZSTD_CDict creation, since its content is copied within CDict */
+ *  `dictBuffer` can be released after ZSTD_CDict creation, since its content is copied within CDict
+ *  Note : A ZSTD_CDict can be created with an empty dictionary, but it is inefficient for small data. */
 ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
                                          int compressionLevel);
 
@@ -217,7 +236,9 @@
  *  Compression using a digested Dictionary.
  *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
  *  Note that compression level is decided during dictionary creation.
- *  Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
+ *  Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)
+ *  Note : ZSTD_compress_usingCDict() can be used with a ZSTD_CDict created from an empty dictionary.
+ *         But it is inefficient for small data, and it is recommended to use ZSTD_compressCCtx(). */
 ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
                                             void* dst, size_t dstCapacity,
                                       const void* src, size_t srcSize,
@@ -272,39 +293,44 @@
 *  since it will play nicer with system's memory, by re-using already allocated memory.
 *  Use one separate ZSTD_CStream per thread for parallel execution.
 *
-*  Start a new compression by initializing ZSTD_CStream.
+*  Start a new compression by initializing ZSTD_CStream context.
 *  Use ZSTD_initCStream() to start a new compression operation.
-*  Use ZSTD_initCStream_usingDict() or ZSTD_initCStream_usingCDict() for a compression which requires a dictionary (experimental section)
+*  Use variants ZSTD_initCStream_usingDict() or ZSTD_initCStream_usingCDict() for streaming with dictionary (experimental section)
 *
-*  Use ZSTD_compressStream() repetitively to consume input stream.
-*  The function will automatically update both `pos` fields.
-*  Note that it may not consume the entire input, in which case `pos < size`,
-*  and it's up to the caller to present again remaining data.
+*  Use ZSTD_compressStream() as many times as necessary to consume input stream.
+*  The function will automatically update both `pos` fields within `input` and `output`.
+*  Note that the function may not consume the entire input,
+*  for example, because the output buffer is already full,
+*  in which case `input.pos < input.size`.
+*  The caller must check if input has been entirely consumed.
+*  If not, the caller must make some room to receive more compressed data,
+*  typically by emptying output buffer, or allocating a new output buffer,
+*  and then present again remaining input data.
 *  @return : a size hint, preferred nb of bytes to use as input for next function call
 *            or an error code, which can be tested using ZSTD_isError().
 *            Note 1 : it's just a hint, to help latency a little, any other value will work fine.
 *            Note 2 : size hint is guaranteed to be <= ZSTD_CStreamInSize()
 *
-*  At any moment, it's possible to flush whatever data remains within internal buffer, using ZSTD_flushStream().
-*  `output->pos` will be updated.
-*  Note that some content might still be left within internal buffer if `output->size` is too small.
-*  @return : nb of bytes still present within internal buffer (0 if it's empty)
+*  At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
+*  using ZSTD_flushStream(). `output->pos` will be updated.
+*  Note that, if `output->size` is too small, a single invocation of ZSTD_flushStream() might not be enough (return code > 0).
+*  In which case, make some room to receive more compressed data, and call again ZSTD_flushStream().
+*  @return : 0 if internal buffers are entirely flushed,
+*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
 *            or an error code, which can be tested using ZSTD_isError().
 *
 *  ZSTD_endStream() instructs to finish a frame.
 *  It will perform a flush and write frame epilogue.
 *  The epilogue is required for decoders to consider a frame completed.
-*  ZSTD_endStream() may not be able to flush full data if `output->size` is too small.
-*  In which case, call again ZSTD_endStream() to complete the flush.
+*  flush() operation is the same, and follows same rules as ZSTD_flushStream().
 *  @return : 0 if frame fully completed and fully flushed,
-             or >0 if some data is still present within internal buffer
-                  (value is minimum size estimation for remaining data to flush, but it could be more)
+*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
 *            or an error code, which can be tested using ZSTD_isError().
 *
 * *******************************************************************/
 
 typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
-                                 /* Continue to distinguish them for compatibility with versions <= v1.2.0 */
+                                 /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
 /*===== ZSTD_CStream management functions =====*/
 ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
 ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
@@ -359,21 +385,28 @@
 
 
 
+
+#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
 /****************************************************************************************
- * START OF ADVANCED AND EXPERIMENTAL FUNCTIONS
+ *   ADVANCED AND EXPERIMENTAL FUNCTIONS
+ ****************************************************************************************
  * The definitions in this section are considered experimental.
  * They should never be used with a dynamic library, as prototypes may change in the future.
  * They are provided for advanced scenarios.
  * Use them only in association with static linking.
  * ***************************************************************************************/
 
-#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
-#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+ZSTDLIB_API int ZSTD_minCLevel(void);  /*!< minimum negative compression level allowed */
 
-/* --- Constants ---*/
-#define ZSTD_MAGICNUMBER            0xFD2FB528   /* >= v0.8.0 */
+/* ---  Constants  ---*/
+#define ZSTD_MAGICNUMBER            0xFD2FB528   /* v0.8+ */
+#define ZSTD_MAGIC_DICTIONARY       0xEC30A437   /* v0.7+ */
 #define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
-#define ZSTD_MAGIC_DICTIONARY       0xEC30A437   /* >= v0.7.0 */
+
+#define ZSTD_BLOCKSIZELOG_MAX 17
+#define ZSTD_BLOCKSIZE_MAX   (1<<ZSTD_BLOCKSIZELOG_MAX)   /* define, for static allocation */
 
 #define ZSTD_WINDOWLOG_MAX_32   30
 #define ZSTD_WINDOWLOG_MAX_64   31
@@ -390,9 +423,10 @@
 #define ZSTD_SEARCHLOG_MIN       1
 #define ZSTD_SEARCHLENGTH_MAX    7   /* only for ZSTD_fast, other strategies are limited to 6 */
 #define ZSTD_SEARCHLENGTH_MIN    3   /* only for ZSTD_btopt, other strategies are limited to 4 */
-#define ZSTD_TARGETLENGTH_MIN    1   /* only used by btopt, btultra and btfast */
+#define ZSTD_TARGETLENGTH_MAX  ZSTD_BLOCKSIZE_MAX
+#define ZSTD_TARGETLENGTH_MIN    0   /* note : comparing this constant to an unsigned results in a tautological test */
+#define ZSTD_LDM_MINMATCH_MAX 4096
 #define ZSTD_LDM_MINMATCH_MIN    4
-#define ZSTD_LDM_MINMATCH_MAX 4096
 #define ZSTD_LDM_BUCKETSIZELOG_MAX 8
 
 #define ZSTD_FRAMEHEADERSIZE_PREFIX 5   /* minimum input size to know frame header size */
@@ -404,7 +438,8 @@
 static const size_t ZSTD_skippableHeaderSize = 8;  /* magic number + skippable frame length */
 
 
-/*--- Advanced types ---*/
+
+/* ---  Advanced types  --- */
 typedef enum { ZSTD_fast=1, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2,
                ZSTD_btlazy2, ZSTD_btopt, ZSTD_btultra } ZSTD_strategy;   /* from faster to stronger */
 
@@ -480,9 +515,9 @@
 ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
 
 /*! ZSTD_frameHeaderSize() :
-*   `src` should point to the start of a ZSTD frame
-*   `srcSize` must be >= ZSTD_frameHeaderSize_prefix.
-*   @return : size of the Frame Header */
+ *  srcSize must be >= ZSTD_frameHeaderSize_prefix.
+ * @return : size of the Frame Header,
+ *           or an error code (if srcSize is too small) */
 ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
 
 
@@ -711,29 +746,48 @@
 
 /*! ZSTD_resetCStream() :
  *  start a new compression job, using same parameters from previous job.
- *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place..
+ *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
  *  Note that zcs must be init at least once before using ZSTD_resetCStream().
  *  If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
  *  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
  *  For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
  *  but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
- * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
+ * @return : 0, or an error code (which can be tested using ZSTD_isError())
+ */
 ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
 
 
 typedef struct {
-    unsigned long long ingested;
-    unsigned long long consumed;
-    unsigned long long produced;
+    unsigned long long ingested;   /* nb input bytes read and buffered */
+    unsigned long long consumed;   /* nb input bytes actually compressed */
+    unsigned long long produced;   /* nb of compressed bytes generated and buffered */
+    unsigned long long flushed;    /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
+    unsigned currentJobID;         /* MT only : latest started job nb */
+    unsigned nbActiveWorkers;      /* MT only : nb of workers actively compressing at probe time */
 } ZSTD_frameProgression;
 
-/* ZSTD_getFrameProgression():
+/* ZSTD_getFrameProgression() :
  * tells how much data has been ingested (read from input)
  * consumed (input actually compressed) and produced (output) for current frame.
- * Therefore, (ingested - consumed) is amount of input data buffered internally, not yet compressed.
- * Can report progression inside worker threads (multi-threading and non-blocking mode).
+ * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
+ * Aggregates progression inside active worker threads.
  */
-ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
+ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
+
+/*! ZSTD_toFlushNow() :
+ *  Tell how many bytes are ready to be flushed immediately.
+ *  Useful for multithreading scenarios (nbWorkers >= 1).
+ *  Probe the oldest active job, defined as oldest job not yet entirely flushed,
+ *  and check its output buffer.
+ * @return : amount of data stored in oldest job and ready to be flushed immediately.
+ *  if @return == 0, it means either :
+ *  + there is no active job (could be checked with ZSTD_frameProgression()), or
+ *  + oldest job is still actively compressing data,
+ *    but everything it has produced has also been flushed so far,
+ *    therefore flushing speed is currently limited by production speed of oldest job
+ *    irrespective of the speed of concurrent newer jobs.
+ */
+ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
 
 
 
@@ -880,6 +934,11 @@
     unsigned dictID;
     unsigned checksumFlag;
 } ZSTD_frameHeader;
+/** ZSTD_getFrameHeader() :
+ *  decode Frame Header, or requires larger `srcSize`.
+ * @return : 0, `zfhPtr` is correctly filled,
+ *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ *           or an error code, which can be tested using ZSTD_isError() */
 ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
 ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
 
@@ -901,23 +960,15 @@
 /**       New advanced API (experimental)       */
 /* ============================================ */
 
-/* notes on API design :
- *   In this proposal, parameters are pushed one by one into an existing context,
- *   and then applied on all subsequent compression jobs.
- *   When no parameter is ever provided, CCtx is created with compression level ZSTD_CLEVEL_DEFAULT.
+/* API design :
+ *   In this advanced API, parameters are pushed one by one into an existing context,
+ *   using ZSTD_CCtx_set*() functions.
+ *   Pushed parameters are sticky : they are applied to next job, and any subsequent job.
+ *   It's possible to reset parameters to "default" using ZSTD_CCtx_reset().
+ *   Important : "sticky" parameters only work with `ZSTD_compress_generic()` !
+ *               For any other entry point, "sticky" parameters are ignored !
  *
  *   This API is intended to replace all others advanced / experimental API entry points.
- *   But it stands a reasonable chance to become "stable", after a reasonable testing period.
- */
-
-/* note on naming convention :
- *   Initially, the API favored names like ZSTD_setCCtxParameter() .
- *   In this proposal, convention is changed towards ZSTD_CCtx_setParameter() .
- *   The main driver is that it identifies more clearly the target object type.
- *   It feels clearer when considering multiple targets :
- *   ZSTD_CDict_setParameter() (rather than ZSTD_setCDictParameter())
- *   ZSTD_CCtxParams_setParameter()  (rather than ZSTD_setCCtxParamsParameter() )
- *   etc...
  */
 
 /* note on enum design :
@@ -947,7 +998,7 @@
     /* compression parameters */
     ZSTD_p_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
                               * Default level is ZSTD_CLEVEL_DEFAULT==3.
-                              * Special: value 0 means "do not change cLevel".
+                              * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
                               * Note 1 : it's possible to pass a negative compression level by casting it to unsigned type.
                               * Note 2 : setting a level sets all default values of other compression parameters.
                               * Note 3 : setting compressionLevel automatically updates ZSTD_p_compressLiterals. */
@@ -956,16 +1007,19 @@
                               * Special: value 0 means "use default windowLog".
                               * Note: Using a window size greater than ZSTD_MAXWINDOWSIZE_DEFAULT (default: 2^27)
                               *       requires explicitly allowing such window size during decompression stage. */
-    ZSTD_p_hashLog,          /* Size of the probe table, as a power of 2.
+    ZSTD_p_hashLog,          /* Size of the initial probe table, as a power of 2.
                               * Resulting table size is (1 << (hashLog+2)).
                               * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
                               * Larger tables improve compression ratio of strategies <= dFast,
                               * and improve speed of strategies > dFast.
                               * Special: value 0 means "use default hashLog". */
-    ZSTD_p_chainLog,         /* Size of the full-search table, as a power of 2.
+    ZSTD_p_chainLog,         /* Size of the multi-probe search table, as a power of 2.
                               * Resulting table size is (1 << (chainLog+2)).
+                              * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
                               * Larger tables result in better and slower compression.
                               * This parameter is useless when using "fast" strategy.
+                              * Note it's still useful when using "dfast" strategy,
+                              * in which case it defines a secondary probe table.
                               * Special: value 0 means "use default chainLog". */
     ZSTD_p_searchLog,        /* Number of search attempts, as a power of 2.
                               * More attempts result in better and slower compression.
@@ -1047,27 +1101,52 @@
     /* experimental parameters - no stability guaranteed                   */
     /* =================================================================== */
 
-    ZSTD_p_compressLiterals=1000, /* control huffman compression of literals (enabled) by default.
-                              * disabling it improves speed and decreases compression ratio by a large amount.
-                              * note : this setting is automatically updated when changing compression level.
-                              *        positive compression levels set ZSTD_p_compressLiterals to 1.
-                              *        negative compression levels set ZSTD_p_compressLiterals to 0. */
-
     ZSTD_p_forceMaxWindow=1100, /* Force back-reference distances to remain < windowSize,
                               * even when referencing into Dictionary content (default:0) */
+    ZSTD_p_forceAttachDict,  /* ZSTD supports usage of a CDict in-place
+                              * (avoiding having to copy the compression tables
+                              * from the CDict into the working context). Using
+                              * a CDict in this way saves an initial setup step,
+                              * but comes at the cost of more work per byte of
+                              * input. ZSTD has a simple internal heuristic that
+                              * guesses which strategy will be faster. You can
+                              * use this flag to override that guess.
+                              *
+                              * Note that the by-reference, in-place strategy is
+                              * only used when reusing a compression context
+                              * with compatible compression parameters. (If
+                              * incompatible / uninitialized, the working
+                              * context needs to be cleared anyways, which is
+                              * about as expensive as overwriting it with the
+                              * dictionary context, so there's no savings in
+                              * using the CDict by-ref.)
+                              *
+                              * Values greater than 0 force attaching the dict.
+                              * Values less than 0 force copying the dict.
+                              * 0 selects the default heuristic-guided behavior.
+                              */
 
 } ZSTD_cParameter;
 
 
 /*! ZSTD_CCtx_setParameter() :
  *  Set one compression parameter, selected by enum ZSTD_cParameter.
- *  Setting a parameter is generally only possible during frame initialization (before starting compression),
- *  except for a few exceptions which can be updated during compression: compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
- *  Note : when `value` is an enum, cast it to unsigned for proper type checking.
- *  @result : informational value (typically, value being set clamped correctly),
+ *  Setting a parameter is generally only possible during frame initialization (before starting compression).
+ *  Exception : when using multi-threading mode (nbThreads >= 1),
+ *              following parameters can be updated _during_ compression (within same frame):
+ *              => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
+ *              new parameters will be active on next job, or after a flush().
+ *  Note : when `value` type is not unsigned (int, or enum), cast it to unsigned for proper type checking.
+ *  @result : informational value (typically, value being set, correctly clamped),
  *            or an error code (which can be tested with ZSTD_isError()). */
 ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value);
 
+/*! ZSTD_CCtx_getParameter() :
+ * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned* value);
+
 /*! ZSTD_CCtx_setPledgedSrcSize() :
  *  Total input data size to be compressed as a single frame.
  *  This value will be controlled at the end, and result in error if not respected.
@@ -1114,30 +1193,45 @@
 
 /*! ZSTD_CCtx_refPrefix() :
  *  Reference a prefix (single-usage dictionary) for next compression job.
- *  Decompression need same prefix to properly regenerate data.
- *  Prefix is **only used once**. Tables are discarded at end of compression job.
- *  Subsequent compression jobs will be done without prefix (if none is explicitly referenced).
- *  If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_CDict instead.
+ *  Decompression will need same prefix to properly regenerate data.
+ *  Compressing with a prefix is similar in outcome as performing a diff and compressing it,
+ *  but performs much faster, especially during decompression (compression speed is tunable with compression level).
+ *  Note that prefix is **only used once**. Tables are discarded at end of compression job (ZSTD_e_end).
  * @result : 0, or an error code (which can be tested with ZSTD_isError()).
  *  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
- *  Note 1 : Prefix buffer is referenced. It must outlive compression job.
- *  Note 2 : Referencing a prefix involves building tables, which are dependent on compression parameters.
+ *  Note 1 : Prefix buffer is referenced. It **must** outlive compression job.
+ *           Its contain must remain unmodified up to end of compression (ZSTD_e_end).
+ *  Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
+ *           ensure that the window size is large enough to contain the entire source.
+ *           See ZSTD_p_windowLog.
+ *  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
  *           It's a CPU consuming operation, with non-negligible impact on latency.
- *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
+ *           If there is a need to use same prefix multiple times, consider loadDictionary instead.
+ *  Note 4 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
  *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode. */
-ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize);
-ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
+                                       const void* prefix, size_t prefixSize);
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx,
+                                       const void* prefix, size_t prefixSize,
+                                       ZSTD_dictContentType_e dictContentType);
 
 /*! ZSTD_CCtx_reset() :
  *  Return a CCtx to clean state.
  *  Useful after an error, or to interrupt an ongoing compression job and start a new one.
  *  Any internal data not yet flushed is cancelled.
- *  Dictionary (if any) is dropped.
- *  All parameters are back to default values.
- *  It's possible to modify compression parameters after a reset.
+ *  The parameters and dictionary are kept unchanged, to reset them use ZSTD_CCtx_resetParameters().
  */
 ZSTDLIB_API void ZSTD_CCtx_reset(ZSTD_CCtx* cctx);
 
+/*! ZSTD_CCtx_resetParameters() :
+ *  All parameters are back to default values (compression level is ZSTD_CLEVEL_DEFAULT).
+ *  Dictionary (if any) is dropped.
+ *  Resetting parameters is only possible during frame initialization (before starting compression).
+ *  To reset the context use ZSTD_CCtx_reset().
+ *  @return 0 or an error code (which can be checked with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_resetParameters(ZSTD_CCtx* cctx);
+
 
 
 typedef enum {
@@ -1235,6 +1329,13 @@
  */
 ZSTDLIB_API size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned value);
 
+/*! ZSTD_CCtxParam_getParameter() :
+ * Similar to ZSTD_CCtx_getParameter.
+ * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParam_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned* value);
+
 /*! ZSTD_CCtx_setParametersUsingCCtxParams() :
  *  Apply a set of ZSTD_CCtx_params to the compression context.
  *  This can be done even after compression is started,
@@ -1246,10 +1347,13 @@
         ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
 
 
-/*===   Advanced parameters for decompression API  ===*/
+/* ==================================== */
+/*===   Advanced decompression API   ===*/
+/* ==================================== */
 
-/* The following parameters must be set after creating a ZSTD_DCtx* (or ZSTD_DStream*) object,
- * but before starting decompression of a frame.
+/* The following API works the same way as the advanced compression API :
+ * a context is created, parameters are pushed into it one by one,
+ * then the context can be used to decompress data using an interface similar to the straming API.
  */
 
 /*! ZSTD_DCtx_loadDictionary() :
@@ -1286,17 +1390,25 @@
 
 /*! ZSTD_DCtx_refPrefix() :
  *  Reference a prefix (single-usage dictionary) for next compression job.
- *  Prefix is **only used once**. It must be explicitly referenced before each frame.
- *  If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_DDict instead.
+ *  This is the reverse operation of ZSTD_CCtx_refPrefix(),
+ *  and must use the same prefix as the one used during compression.
+ *  Prefix is **only used once**. Reference is discarded at end of frame.
+ *  End of frame is reached when ZSTD_DCtx_decompress_generic() returns 0.
  * @result : 0, or an error code (which can be tested with ZSTD_isError()).
  *  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
- *  Note 2 : Prefix buffer is referenced. It must outlive compression job.
+ *  Note 2 : Prefix buffer is referenced. It **must** outlive decompression job.
+ *           Prefix buffer must remain unmodified up to the end of frame,
+ *           reached when ZSTD_DCtx_decompress_generic() returns 0.
  *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
  *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode.
  *  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
+ *           A fulldict prefix is more costly though.
  */
-ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize);
-ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
+                                    const void* prefix, size_t prefixSize);
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx,
+                                    const void* prefix, size_t prefixSize,
+                                    ZSTD_dictContentType_e dictContentType);
 
 
 /*! ZSTD_DCtx_setMaxWindowSize() :
@@ -1318,6 +1430,13 @@
 ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
 
 
+/*! ZSTD_getFrameHeader_advanced() :
+ *  same as ZSTD_getFrameHeader(),
+ *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
+ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr,
+                        const void* src, size_t srcSize, ZSTD_format_e format);
+
+
 /*! ZSTD_decompress_generic() :
  *  Behave the same as ZSTD_decompressStream.
  *  Decompression parameters cannot be changed once decompression is started.
@@ -1383,8 +1502,6 @@
         Use ZSTD_insertBlock() for such a case.
 */
 
-#define ZSTD_BLOCKSIZELOG_MAX 17
-#define ZSTD_BLOCKSIZE_MAX   (1<<ZSTD_BLOCKSIZELOG_MAX)   /* define, for static allocation */
 /*=====   Raw zstd block functions  =====*/
 ZSTDLIB_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
 ZSTDLIB_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
--- a/contrib/python-zstandard/zstd_cffi.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python-zstandard/zstd_cffi.py	Mon Oct 22 14:46:06 2018 -0400
@@ -40,6 +40,8 @@
     'DECOMPRESSION_RECOMMENDED_INPUT_SIZE',
     'DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE',
     'MAGIC_NUMBER',
+    'BLOCKSIZELOG_MAX',
+    'BLOCKSIZE_MAX',
     'WINDOWLOG_MIN',
     'WINDOWLOG_MAX',
     'CHAINLOG_MIN',
@@ -52,6 +54,7 @@
     'SEARCHLENGTH_MIN',
     'SEARCHLENGTH_MAX',
     'TARGETLENGTH_MIN',
+    'TARGETLENGTH_MAX',
     'LDM_MINMATCH_MIN',
     'LDM_MINMATCH_MAX',
     'LDM_BUCKETSIZELOG_MAX',
@@ -102,6 +105,8 @@
 CONTENTSIZE_ERROR = lib.ZSTD_CONTENTSIZE_ERROR
 ZSTD_VERSION = (lib.ZSTD_VERSION_MAJOR, lib.ZSTD_VERSION_MINOR, lib.ZSTD_VERSION_RELEASE)
 
+BLOCKSIZELOG_MAX = lib.ZSTD_BLOCKSIZELOG_MAX
+BLOCKSIZE_MAX = lib.ZSTD_BLOCKSIZE_MAX
 WINDOWLOG_MIN = lib.ZSTD_WINDOWLOG_MIN
 WINDOWLOG_MAX = lib.ZSTD_WINDOWLOG_MAX
 CHAINLOG_MIN = lib.ZSTD_CHAINLOG_MIN
@@ -114,6 +119,7 @@
 SEARCHLENGTH_MIN = lib.ZSTD_SEARCHLENGTH_MIN
 SEARCHLENGTH_MAX = lib.ZSTD_SEARCHLENGTH_MAX
 TARGETLENGTH_MIN = lib.ZSTD_TARGETLENGTH_MIN
+TARGETLENGTH_MAX = lib.ZSTD_TARGETLENGTH_MAX
 LDM_MINMATCH_MIN = lib.ZSTD_LDM_MINMATCH_MIN
 LDM_MINMATCH_MAX = lib.ZSTD_LDM_MINMATCH_MAX
 LDM_BUCKETSIZELOG_MAX = lib.ZSTD_LDM_BUCKETSIZELOG_MAX
@@ -191,7 +197,6 @@
         (lib.ZSTD_p_nbWorkers, params.threads),
         (lib.ZSTD_p_jobSize, params.job_size),
         (lib.ZSTD_p_overlapSizeLog, params.overlap_size_log),
-        (lib.ZSTD_p_compressLiterals, params.compress_literals),
         (lib.ZSTD_p_forceMaxWindow, params.force_max_window),
         (lib.ZSTD_p_enableLongDistanceMatching, params.enable_ldm),
         (lib.ZSTD_p_ldmHashLog, params.ldm_hash_log),
@@ -224,9 +229,6 @@
             if arg not in kwargs:
                 kwargs[arg] = getattr(params, attr)
 
-        if 'compress_literals' not in kwargs:
-            kwargs['compress_literals'] = 1 if level >= 0 else 0
-
         return ZstdCompressionParameters(**kwargs)
 
     def __init__(self, format=0, compression_level=0, window_log=0, hash_log=0,
@@ -235,14 +237,11 @@
                  write_dict_id=0, job_size=0, overlap_size_log=0,
                  force_max_window=0, enable_ldm=0, ldm_hash_log=0,
                  ldm_min_match=0, ldm_bucket_size_log=0, ldm_hash_every_log=0,
-                 threads=0, compress_literals=None):
+                 threads=0):
 
         if threads < 0:
             threads = _cpu_count()
 
-        if compress_literals is None:
-            compress_literals = compression_level >= 0
-
         self.format = format
         self.compression_level = compression_level
         self.window_log = window_log
@@ -257,7 +256,6 @@
         self.write_dict_id = write_dict_id
         self.job_size = job_size
         self.overlap_size_log = overlap_size_log
-        self.compress_literals = compress_literals
         self.force_max_window = force_max_window
         self.enable_ldm = enable_ldm
         self.ldm_hash_log = ldm_hash_log
@@ -411,13 +409,14 @@
                 raise ZstdError('zstd compress error: %s' %
                                 _zstd_error(zresult))
 
-            if not out_buffer.pos:
-                break
+            if out_buffer.pos:
+                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                total_write += out_buffer.pos
+                self._bytes_compressed += out_buffer.pos
+                out_buffer.pos = 0
 
-            self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
-            total_write += out_buffer.pos
-            self._bytes_compressed += out_buffer.pos
-            out_buffer.pos = 0
+            if not zresult:
+                break
 
         return total_write
 
@@ -460,6 +459,14 @@
         if self._finished:
             raise ZstdError('compressor object already finished')
 
+        if flush_mode == COMPRESSOBJ_FLUSH_BLOCK:
+            z_flush_mode = lib.ZSTD_e_flush
+        elif flush_mode == COMPRESSOBJ_FLUSH_FINISH:
+            z_flush_mode = lib.ZSTD_e_end
+            self._finished = True
+        else:
+            raise ZstdError('unhandled flush mode')
+
         assert self._out.pos == 0
 
         in_buffer = ffi.new('ZSTD_inBuffer *')
@@ -467,35 +474,13 @@
         in_buffer.size = 0
         in_buffer.pos = 0
 
-        if flush_mode == COMPRESSOBJ_FLUSH_BLOCK:
-            zresult = lib.ZSTD_compress_generic(self._compressor._cctx,
-                                                self._out,
-                                                in_buffer,
-                                                lib.ZSTD_e_flush)
-            if lib.ZSTD_isError(zresult):
-                raise ZstdError('zstd compress error: %s' %
-                                _zstd_error(zresult))
-
-            # Output buffer is guaranteed to hold full block.
-            assert zresult == 0
-
-            if self._out.pos:
-                result = ffi.buffer(self._out.dst, self._out.pos)[:]
-                self._out.pos = 0
-                return result
-            else:
-                return b''
-
-        assert flush_mode == COMPRESSOBJ_FLUSH_FINISH
-        self._finished = True
-
         chunks = []
 
         while True:
             zresult = lib.ZSTD_compress_generic(self._compressor._cctx,
                                                 self._out,
                                                 in_buffer,
-                                                lib.ZSTD_e_end)
+                                                z_flush_mode)
             if lib.ZSTD_isError(zresult):
                 raise ZstdError('error ending compression stream: %s' %
                                 _zstd_error(zresult))
@@ -510,11 +495,107 @@
         return b''.join(chunks)
 
 
+class ZstdCompressionChunker(object):
+    def __init__(self, compressor, chunk_size):
+        self._compressor = compressor
+        self._out = ffi.new('ZSTD_outBuffer *')
+        self._dst_buffer = ffi.new('char[]', chunk_size)
+        self._out.dst = self._dst_buffer
+        self._out.size = chunk_size
+        self._out.pos = 0
+
+        self._in = ffi.new('ZSTD_inBuffer *')
+        self._in.src = ffi.NULL
+        self._in.size = 0
+        self._in.pos = 0
+        self._finished = False
+
+    def compress(self, data):
+        if self._finished:
+            raise ZstdError('cannot call compress() after compression finished')
+
+        if self._in.src != ffi.NULL:
+            raise ZstdError('cannot perform operation before consuming output '
+                            'from previous operation')
+
+        data_buffer = ffi.from_buffer(data)
+
+        if not len(data_buffer):
+            return
+
+        self._in.src = data_buffer
+        self._in.size = len(data_buffer)
+        self._in.pos = 0
+
+        while self._in.pos < self._in.size:
+            zresult = lib.ZSTD_compress_generic(self._compressor._cctx,
+                                                self._out,
+                                                self._in,
+                                                lib.ZSTD_e_continue)
+
+            if self._in.pos == self._in.size:
+                self._in.src = ffi.NULL
+                self._in.size = 0
+                self._in.pos = 0
+
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd compress error: %s' %
+                                _zstd_error(zresult))
+
+            if self._out.pos == self._out.size:
+                yield ffi.buffer(self._out.dst, self._out.pos)[:]
+                self._out.pos = 0
+
+    def flush(self):
+        if self._finished:
+            raise ZstdError('cannot call flush() after compression finished')
+
+        if self._in.src != ffi.NULL:
+            raise ZstdError('cannot call flush() before consuming output from '
+                            'previous operation')
+
+        while True:
+            zresult = lib.ZSTD_compress_generic(self._compressor._cctx,
+                                                self._out, self._in,
+                                                lib.ZSTD_e_flush)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd compress error: %s' % _zstd_error(zresult))
+
+            if self._out.pos:
+                yield ffi.buffer(self._out.dst, self._out.pos)[:]
+                self._out.pos = 0
+
+            if not zresult:
+                return
+
+    def finish(self):
+        if self._finished:
+            raise ZstdError('cannot call finish() after compression finished')
+
+        if self._in.src != ffi.NULL:
+            raise ZstdError('cannot call finish() before consuming output from '
+                            'previous operation')
+
+        while True:
+            zresult = lib.ZSTD_compress_generic(self._compressor._cctx,
+                                                self._out, self._in,
+                                                lib.ZSTD_e_end)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd compress error: %s' % _zstd_error(zresult))
+
+            if self._out.pos:
+                yield ffi.buffer(self._out.dst, self._out.pos)[:]
+                self._out.pos = 0
+
+            if not zresult:
+                self._finished = True
+                return
+
+
 class CompressionReader(object):
-    def __init__(self, compressor, source, size, read_size):
+    def __init__(self, compressor, source, read_size):
         self._compressor = compressor
         self._source = source
-        self._source_size = size
         self._read_size = read_size
         self._entered = False
         self._closed = False
@@ -530,12 +611,6 @@
         if self._entered:
             raise ValueError('cannot __enter__ multiple times')
 
-        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._compressor._cctx,
-                                                  self._source_size)
-        if lib.ZSTD_isError(zresult):
-            raise ZstdError('error setting source size: %s' %
-                            _zstd_error(zresult))
-
         self._entered = True
         return self
 
@@ -578,6 +653,7 @@
         self._closed = True
         return None
 
+    @property
     def closed(self):
         return self._closed
 
@@ -596,9 +672,6 @@
     next = __next__
 
     def read(self, size=-1):
-        if not self._entered:
-            raise ZstdError('read() must be called from an active context manager')
-
         if self._closed:
             raise ValueError('stream is closed')
 
@@ -759,16 +832,14 @@
         self._dict_data = dict_data
 
         # We defer setting up garbage collection until after calling
-        # _ensure_cctx() to ensure the memory size estimate is more accurate.
+        # _setup_cctx() to ensure the memory size estimate is more accurate.
         try:
-            self._ensure_cctx()
+            self._setup_cctx()
         finally:
             self._cctx = ffi.gc(cctx, lib.ZSTD_freeCCtx,
                                 size=lib.ZSTD_sizeof_CCtx(cctx))
 
-    def _ensure_cctx(self):
-        lib.ZSTD_CCtx_reset(self._cctx)
-
+    def _setup_cctx(self):
         zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(self._cctx,
                                                              self._params)
         if lib.ZSTD_isError(zresult):
@@ -793,7 +864,7 @@
         return lib.ZSTD_sizeof_CCtx(self._cctx)
 
     def compress(self, data):
-        self._ensure_cctx()
+        lib.ZSTD_CCtx_reset(self._cctx)
 
         data_buffer = ffi.from_buffer(data)
 
@@ -830,7 +901,7 @@
         return ffi.buffer(out, out_buffer.pos)[:]
 
     def compressobj(self, size=-1):
-        self._ensure_cctx()
+        lib.ZSTD_CCtx_reset(self._cctx)
 
         if size < 0:
             size = lib.ZSTD_CONTENTSIZE_UNKNOWN
@@ -851,6 +922,19 @@
 
         return cobj
 
+    def chunker(self, size=-1, chunk_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+        lib.ZSTD_CCtx_reset(self._cctx)
+
+        if size < 0:
+            size = lib.ZSTD_CONTENTSIZE_UNKNOWN
+
+        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError('error setting source size: %s' %
+                            _zstd_error(zresult))
+
+        return ZstdCompressionChunker(self, chunk_size=chunk_size)
+
     def copy_stream(self, ifh, ofh, size=-1,
                     read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
                     write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
@@ -860,7 +944,7 @@
         if not hasattr(ofh, 'write'):
             raise ValueError('second argument must have a write() method')
 
-        self._ensure_cctx()
+        lib.ZSTD_CCtx_reset(self._cctx)
 
         if size < 0:
             size = lib.ZSTD_CONTENTSIZE_UNKNOWN
@@ -927,7 +1011,7 @@
 
     def stream_reader(self, source, size=-1,
                       read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE):
-        self._ensure_cctx()
+        lib.ZSTD_CCtx_reset(self._cctx)
 
         try:
             size = len(source)
@@ -937,7 +1021,12 @@
         if size < 0:
             size = lib.ZSTD_CONTENTSIZE_UNKNOWN
 
-        return CompressionReader(self, source, size, read_size)
+        zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size)
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError('error setting source size: %s' %
+                            _zstd_error(zresult))
+
+        return CompressionReader(self, source, read_size)
 
     def stream_writer(self, writer, size=-1,
                  write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
@@ -945,7 +1034,7 @@
         if not hasattr(writer, 'write'):
             raise ValueError('must pass an object with a write() method')
 
-        self._ensure_cctx()
+        lib.ZSTD_CCtx_reset(self._cctx)
 
         if size < 0:
             size = lib.ZSTD_CONTENTSIZE_UNKNOWN
@@ -967,7 +1056,7 @@
             raise ValueError('must pass an object with a read() method or '
                              'conforms to buffer protocol')
 
-        self._ensure_cctx()
+        lib.ZSTD_CCtx_reset(self._cctx)
 
         if size < 0:
             size = lib.ZSTD_CONTENTSIZE_UNKNOWN
@@ -1267,7 +1356,7 @@
 
         chunks = []
 
-        while in_buffer.pos < in_buffer.size:
+        while True:
             zresult = lib.ZSTD_decompress_generic(self._decompressor._dctx,
                                                   out_buffer, in_buffer)
             if lib.ZSTD_isError(zresult):
@@ -1280,7 +1369,12 @@
 
             if out_buffer.pos:
                 chunks.append(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
-                out_buffer.pos = 0
+
+            if (zresult == 0 or
+                    (in_buffer.pos == in_buffer.size and out_buffer.pos == 0)):
+                break
+
+            out_buffer.pos = 0
 
         return b''.join(chunks)
 
@@ -1303,8 +1397,6 @@
         if self._entered:
             raise ValueError('cannot __enter__ multiple times')
 
-        self._decompressor._ensure_dctx()
-
         self._entered = True
         return self
 
@@ -1347,6 +1439,7 @@
         self._closed = True
         return None
 
+    @property
     def closed(self):
         return self._closed
 
@@ -1364,10 +1457,7 @@
 
     next = __next__
 
-    def read(self, size=-1):
-        if not self._entered:
-            raise ZstdError('read() must be called from an active context manager')
-
+    def read(self, size):
         if self._closed:
             raise ValueError('stream is closed')
 
@@ -1442,10 +1532,6 @@
         return ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
 
     def seek(self, pos, whence=os.SEEK_SET):
-        if not self._entered:
-            raise ZstdError('seek() must be called from an active context '
-                            'manager')
-
         if self._closed:
             raise ValueError('stream is closed')
 
--- a/contrib/python3-whitelist	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/python3-whitelist	Mon Oct 22 14:46:06 2018 -0400
@@ -1,4 +1,9 @@
 test-abort-checkin.t
+test-absorb-filefixupstate.py
+test-absorb-phase.t
+test-absorb-rename.t
+test-absorb-strip.t
+test-absorb.t
 test-add.t
 test-addremove-similar.t
 test-addremove.t
@@ -9,14 +14,17 @@
 test-annotate.py
 test-annotate.t
 test-archive-symlinks.t
+test-archive.t
 test-atomictempfile.py
 test-audit-path.t
 test-audit-subrepo.t
 test-automv.t
 test-backout.t
 test-backwards-remove.t
+test-bad-extension.t
 test-bad-pull.t
 test-basic.t
+test-bdiff.py
 test-bheads.t
 test-bisect.t
 test-bisect2.t
@@ -42,25 +50,35 @@
 test-bundle2-multiple-changegroups.t
 test-bundle2-pushback.t
 test-bundle2-remote-changegroup.t
+test-cache-abuse.t
 test-cappedreader.py
 test-casecollision.t
 test-cat.t
 test-cbor.py
 test-censor.t
 test-changelog-exec.t
+test-check-code.t
 test-check-commit.t
+test-check-config.py
+test-check-config.t
 test-check-execute.t
 test-check-interfaces.py
 test-check-module-imports.t
+test-check-py3-compat.t
 test-check-pyflakes.t
 test-check-pylint.t
 test-check-shbang.t
 test-children.t
+test-churn.t
 test-clone-cgi.t
 test-clone-pull-corruption.t
 test-clone-r.t
+test-clone-uncompressed.t
 test-clone-update-order.t
+test-clone.t
 test-clonebundles.t
+test-close-head.t
+test-commandserver.t
 test-commit-amend.t
 test-commit-interactive.t
 test-commit-multiple.t
@@ -72,9 +90,15 @@
 test-config.t
 test-conflict.t
 test-confused-revert.t
+test-context-metadata.t
 test-context.py
 test-contrib-check-code.t
 test-contrib-check-commit.t
+test-contrib-dumprevlog.t
+test-contrib-perf.t
+test-contrib-relnotes.t
+test-contrib-testparseutil.t
+test-contrib.t
 test-convert-authormap.t
 test-convert-clonebranches.t
 test-convert-cvs-branch.t
@@ -94,6 +118,7 @@
 test-copytrace-heuristics.t
 test-debugbuilddag.t
 test-debugbundle.t
+test-debugcommands.t
 test-debugextensions.t
 test-debugindexdot.t
 test-debugrename.t
@@ -101,6 +126,7 @@
 test-diff-antipatience.t
 test-diff-binary-file.t
 test-diff-change.t
+test-diff-color.t
 test-diff-copy-depth.t
 test-diff-hashes.t
 test-diff-ignore-whitespace.t
@@ -116,6 +142,7 @@
 test-directaccess.t
 test-dirstate-backup.t
 test-dirstate-nonnormalset.t
+test-dirstate-race.t
 test-dirstate.t
 test-dispatch.py
 test-doctest.py
@@ -129,6 +156,7 @@
 test-empty.t
 test-encode.t
 test-encoding-func.py
+test-encoding-textwrap.t
 test-encoding.t
 test-eol-add.t
 test-eol-clone.t
@@ -175,22 +203,37 @@
 test-fileset-generated.t
 test-fileset.t
 test-fix-topology.t
+test-fix.t
 test-flags.t
+test-fncache.t
 test-generaldelta.t
 test-getbundle.t
 test-git-export.t
+test-globalopts.t
+test-glog-beautifygraph.t
 test-glog-topological.t
+test-glog.t
 test-gpg.t
 test-graft.t
+test-grep.t
+test-hardlinks.t
+test-help.t
 test-hg-parseurl.py
 test-hghave.t
 test-hgignore.t
 test-hgk.t
 test-hgrc.t
+test-hgweb-annotate-whitespace.t
 test-hgweb-bundle.t
+test-hgweb-csp.t
 test-hgweb-descend-empties.t
+test-hgweb-diffs.t
 test-hgweb-empty.t
+test-hgweb-filelog.t
+test-hgweb-non-interactive.t
+test-hgweb-raw.t
 test-hgweb-removed.t
+test-hgweb.t
 test-hgwebdir-paths.py
 test-hgwebdirsym.t
 test-histedit-arguments.t
@@ -211,8 +254,10 @@
 test-http-branchmap.t
 test-http-bundle1.t
 test-http-clone-r.t
+test-http-permissions.t
 test-http.t
 test-hybridencode.py
+test-i18n.t
 test-identify.t
 test-impexp-branch.t
 test-import-bypass.t
@@ -222,6 +267,9 @@
 test-import.t
 test-imports-checker.t
 test-incoming-outgoing.t
+test-infinitepush-bundlestore.t
+test-infinitepush-ci.t
+test-infinitepush.t
 test-inherit-mode.t
 test-init.t
 test-issue1089.t
@@ -238,6 +286,7 @@
 test-issue4074.t
 test-issue522.t
 test-issue586.t
+test-issue5979.t
 test-issue612.t
 test-issue619.t
 test-issue660.t
@@ -246,6 +295,7 @@
 test-journal-exists.t
 test-journal-share.t
 test-journal.t
+test-keyword.t
 test-known.t
 test-largefiles-cache.t
 test-largefiles-misc.t
@@ -254,6 +304,7 @@
 test-largefiles.t
 test-lfs-largefiles.t
 test-lfs-pointer.py
+test-linelog.py
 test-linerange.py
 test-locate.t
 test-lock-badness.t
@@ -277,6 +328,7 @@
 test-merge-halt.t
 test-merge-internal-tools-pattern.t
 test-merge-local.t
+test-merge-no-file-change.t
 test-merge-remove.t
 test-merge-revert.t
 test-merge-revert2.t
@@ -296,6 +348,7 @@
 test-minifileset.py
 test-minirst.py
 test-mq-git.t
+test-mq-guards.t
 test-mq-header-date.t
 test-mq-header-from.t
 test-mq-merge.t
@@ -308,6 +361,7 @@
 test-mq-qimport-fail-cleanup.t
 test-mq-qnew.t
 test-mq-qpush-exact.t
+test-mq-qpush-fail.t
 test-mq-qqueue.t
 test-mq-qrefresh-interactive.t
 test-mq-qrefresh-replace-log-message.t
@@ -317,11 +371,14 @@
 test-mq-safety.t
 test-mq-subrepo.t
 test-mq-symlinks.t
+test-mq.t
 test-mv-cp-st-diff.t
+test-narrow-acl.t
 test-narrow-archive.t
 test-narrow-clone-no-ellipsis.t
 test-narrow-clone-non-narrow-server.t
 test-narrow-clone-nonlinear.t
+test-narrow-clone-stream.t
 test-narrow-clone.t
 test-narrow-commit.t
 test-narrow-copies.t
@@ -338,32 +395,48 @@
 test-narrow-shallow-merges.t
 test-narrow-shallow.t
 test-narrow-strip.t
+test-narrow-trackedcmd.t
 test-narrow-update.t
+test-narrow-widen-no-ellipsis.t
 test-narrow-widen.t
 test-narrow.t
 test-nested-repo.t
 test-newbranch.t
+test-newercgi.t
 test-nointerrupt.t
 test-obshistory.t
 test-obsmarker-template.t
 test-obsmarkers-effectflag.t
+test-obsolete-bounds-checking.t
 test-obsolete-bundle-strip.t
 test-obsolete-changeset-exchange.t
 test-obsolete-checkheads.t
 test-obsolete-distributed.t
 test-obsolete-divergent.t
 test-obsolete-tag-cache.t
+test-obsolete.t
+test-origbackup-conflict.t
+test-pager-legacy.t
 test-pager.t
 test-parents.t
+test-parse-date.t
 test-parseindex2.py
 test-patch-offset.t
 test-patch.t
+test-patchbomb-bookmark.t
+test-patchbomb-tls.t
+test-patchbomb.t
+test-pathconflicts-basic.t
 test-pathconflicts-merge.t
 test-pathconflicts-update.t
 test-pathencode.py
 test-pending.t
 test-permissions.t
+test-phases-exchange.t
 test-phases.t
+test-profile.t
+test-progress.t
+test-propertycache.py
 test-pull-branch.t
 test-pull-http.t
 test-pull-permission.t
@@ -372,6 +445,7 @@
 test-pull-update.t
 test-pull.t
 test-purge.t
+test-push-cgi.t
 test-push-checkheads-partial-C1.t
 test-push-checkheads-partial-C2.t
 test-push-checkheads-partial-C3.t
@@ -405,6 +479,7 @@
 test-pushvars.t
 test-qrecord.t
 test-rebase-abort.t
+test-rebase-backup.t
 test-rebase-base-flag.t
 test-rebase-bookmarks.t
 test-rebase-brute-force.t
@@ -433,25 +508,33 @@
 test-rebase-transaction.t
 test-rebuildstate.t
 test-record.t
+test-releasenotes-formatting.t
+test-releasenotes-merging.t
+test-releasenotes-parsing.t
 test-relink.t
 test-remove.t
 test-removeemptydirs.t
 test-rename-after-merge.t
 test-rename-dir-merge.t
 test-rename-merge1.t
+test-rename-merge2.t
 test-rename.t
 test-repair-strip.t
 test-repo-compengines.t
+test-requires.t
 test-resolve.t
 test-revert-flags.t
 test-revert-interactive.t
 test-revert-unknown.t
+test-revert.t
+test-revisions.t
 test-revlog-ancestry.py
 test-revlog-group-emptyiter.t
 test-revlog-mmapindex.t
 test-revlog-packentry.t
 test-revlog-raw.py
 test-revlog-v2.t
+test-revlog.t
 test-revset-dirstate-parents.t
 test-revset-legacy-lookup.t
 test-revset-outgoing.t
@@ -483,12 +566,17 @@
 test-ssh-clone-r.t
 test-ssh-proto-unbundle.t
 test-ssh-proto.t
+test-ssh-repoerror.t
 test-ssh.t
 test-sshserver.py
 test-stack.t
+test-status-color.t
 test-status-inprocess.py
 test-status-rev.t
 test-status-terse.t
+test-status.t
+test-storage.py
+test-stream-bundle-v2.t
 test-strict.t
 test-strip-cross.t
 test-strip.t
@@ -507,6 +595,7 @@
 test-template-functions.t
 test-template-keywords.t
 test-template-map.t
+test-tools.t
 test-transplant.t
 test-treemanifest.t
 test-ui-color.py
@@ -529,12 +618,23 @@
 test-url-rev.t
 test-url.py
 test-username-newline.t
+test-util.py
 test-verify.t
 test-walk.t
 test-walkrepo.py
 test-websub.t
 test-win32text.t
 test-wireproto-clientreactor.py
+test-wireproto-command-branchmap.t
+test-wireproto-command-changesetdata.t
+test-wireproto-command-filedata.t
+test-wireproto-command-filesdata.t
+test-wireproto-command-heads.t
+test-wireproto-command-listkeys.t
+test-wireproto-command-lookup.t
+test-wireproto-command-manifestdata.t
+test-wireproto-command-pushkey.t
+test-wireproto-command-rawstorefiledata.t
 test-wireproto-framing.py
 test-wireproto-serverreactor.py
 test-wireproto.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/relnotes	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,181 @@
+#!/usr/bin/env python3
+"""Generate release notes from our commit log.
+
+This uses the relnotes extension directives when they're available,
+and falls back to our old pre-relnotes logic that used to live in the
+release-tools repo.
+"""
+import argparse
+import re
+import subprocess
+
+# Regenerate this list with
+#   hg export 'grep("\.\. [a-z]+::")' | grep '^\.\.' | \
+#     sed 's/.. //;s/::.*//' | sort -u
+rnsections = ["api", "bc", "container", "feature", "fix", "note", "perf"]
+
+rules = {
+    # keep
+    r"\(issue": 100,
+    r"\(BC\)": 100,
+    r"\(API\)": 100,
+    # core commands, bump up
+    r"(commit|files|log|pull|push|patch|status|tag|summary)(|s|es):": 20,
+    r"(annotate|alias|branch|bookmark|clone|graft|import|verify).*:": 20,
+    # extensions, bump up
+    r"(mq|shelve|rebase):": 20,
+    # newsy
+    r": deprecate": 20,
+    r"(option|feature|command|support)": 10,
+    # bug-like?
+    r"(fix|don't break|improve)": 7,
+    # boring stuff, bump down
+    r"^contrib": -5,
+    r"debug": -5,
+    r"help": -5,
+    r"(doc|bundle2|obsolete|obsmarker|rpm|setup|debug\S+:)": -15,
+    r"(check-code|check-commit|import-checker)": -20,
+    # cleanups and refactoring
+    r"(cleanup|whitespace|nesting|indent|spelling|comment)": -20,
+    r"(typo|hint|note|style:|correct doc)": -20,
+    r"_": -10,
+    r"(argument|absolute_import|attribute|assignment|mutable)": -15,
+    r"(unused|useless|unnecessary|duplicate|deprecated|scope|True|False)": -10,
+    r"(redundant|pointless|confusing|uninitialized|meaningless|dead)": -10,
+    r": (drop|remove|inherit|rename|simplify|naming|inline)": -10,
+    r"(docstring|document .* method)": -20,
+    r"(factor|extract|prepare|split|replace| import)": -20,
+    r": add.*(function|method|implementation|test|example)": -10,
+    r": (move|extract) .* (to|into|from)": -20,
+    r": implement ": -5,
+    r": use .* implementation": -20,
+    r"\S\S\S+\.\S\S\S\S+": -5,
+    r": use .* instead of": -20,
+    r"__": -5,
+    # dumb keywords
+    r"\S+/\S+:": -10,
+    r"\S+\.\S+:": -10,
+    # drop
+    r"^i18n-": -50,
+    r"^i18n:.*(hint|comment)": -50,
+    r"perf:": -50,
+    r"check-code:": -50,
+    r"Added.*for changeset": -50,
+    r"tests?:": -50,
+    r"test-": -50,
+    r"add.* tests": -50,
+    r"^_": -50,
+}
+
+cutoff = 10
+commits = []
+
+groupings = [
+    (r"util|parsers|repo|ctx|context|revlog|filelog|alias|cmdutil", "core"),
+    (r"revset|templater|ui|dirstate|hook|i18n|transaction|wire", "core"),
+    (r"color|pager", "core"),
+    (r"hgweb|paper|coal|gitweb", "hgweb"),
+    (r"pull|push|revert|resolve|annotate|bookmark|branch|clone", "commands"),
+    (r"commands|commit|config|files|graft|import|log|merge|patch", "commands"),
+    (r"phases|status|summary|amend|tag|help|verify", "commands"),
+    (r"rebase|mq|convert|eol|histedit|largefiles", "extensions"),
+    (r"shelve|unshelve", "extensions"),
+]
+
+def main():
+    ap = argparse.ArgumentParser()
+    ap.add_argument(
+        "startrev",
+        metavar="REV",
+        type=str,
+        nargs=1,
+        help=(
+            "Starting revision for the release notes. This revision "
+            "won't be included, but later revisions will."
+        ),
+    )
+    ap.add_argument(
+        "--stoprev",
+        metavar="REV",
+        type=str,
+        default="@",
+        nargs=1,
+        help=(
+            "Stop revision for release notes. This revision will be included,"
+            " but no later revisions will. This revision needs to be "
+            "a descendant of startrev."
+        ),
+    )
+    args = ap.parse_args()
+    fromext = subprocess.check_output(
+        [
+            "hg",
+            "--config",
+            "extensions.releasenotes=",
+            "releasenotes",
+            "-r",
+            "%s::%s" % (args.startrev[0], args.stoprev[0]),
+        ]
+    ).decode("utf-8")
+    # Find all release notes from un-relnotes-flagged commits.
+    for entry in sorted(
+        subprocess.check_output(
+            [
+                "hg",
+                "log",
+                "-r",
+                r'%s::%s - merge() - grep("\n\.\. (%s)::")'
+                % (args.startrev[0], args.stoprev[0], "|".join(rnsections)),
+                "-T",
+                r"{desc|firstline}\n",
+            ]
+        )
+        .decode("utf-8")
+        .splitlines()
+    ):
+        desc = entry.replace("`", "'")
+
+        score = 0
+        for rule, val in rules.items():
+            if re.search(rule, desc):
+                score += val
+
+        desc = desc.replace("(issue", "(Bts:issue")
+
+        if score >= cutoff:
+            commits.append(desc)
+    # Group unflagged notes.
+    groups = {}
+    bcs = []
+    apis = []
+
+    for d in commits:
+        if "(BC)" in d:
+            bcs.append(d)
+        if "(API)" in d:
+            apis.append(d)
+        for rule, g in groupings:
+            if re.match(rule, d):
+                groups.setdefault(g, []).append(d)
+                break
+        else:
+            groups.setdefault("unsorted", []).append(d)
+    print(fromext)
+    # print legacy release notes sections
+    for g in sorted(groups):
+        print("\n=== %s ===" % g)
+        for d in sorted(groups[g]):
+            print(" * %s" % d)
+
+    print("\n=== BC ===\n")
+
+    for d in sorted(bcs):
+        print(" * %s" % d)
+
+    print("\n=== API Changes ===\n")
+
+    for d in sorted(apis):
+        print(" * %s" % d)
+
+if __name__ == "__main__":
+    main()
--- a/contrib/revsetbenchmarks.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/revsetbenchmarks.py	Mon Oct 22 14:46:06 2018 -0400
@@ -149,7 +149,7 @@
         return '%4s' % ('x%i' % factor)
     else:
         order = int(math.log(factor)) + 1
-        while 1 < math.log(factor):
+        while math.log(factor) > 1:
             factor //= 0
         return 'x%ix%i' % (factor, order)
 
@@ -190,7 +190,7 @@
     for var in variants:
         if not var:
             var = 'iter'
-        if 8 < len(var):
+        if len(var) > 8:
             var = var[:3] + '..' + var[-3:]
         header.append('%-8s' % var)
         if relative:
--- a/contrib/showstack.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/showstack.py	Mon Oct 22 14:46:06 2018 -0400
@@ -4,7 +4,7 @@
 """dump stack trace when receiving SIGQUIT (Ctrl-\) and SIGINFO (Ctrl-T on BSDs)
 """
 
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 import signal
 import sys
 import traceback
@@ -14,8 +14,14 @@
     traceback.print_stack(args[1], limit=10, file=sys.stderr)
     sys.stderr.write("----\n")
 
+def sigexit(*args):
+    sigshow(*args)
+    print('alarm!')
+    sys.exit(1)
+
 def extsetup(ui):
     signal.signal(signal.SIGQUIT, sigshow)
+    signal.signal(signal.SIGALRM, sigexit)
     try:
         signal.signal(signal.SIGINFO, sigshow)
     except AttributeError:
--- a/contrib/simplemerge	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/simplemerge	Mon Oct 22 14:46:06 2018 -0400
@@ -12,6 +12,7 @@
     context,
     error,
     fancyopts,
+    pycompat,
     simplemerge,
     ui as uimod,
 )
@@ -19,15 +20,15 @@
     procutil,
 )
 
-options = [('L', 'label', [], _('labels to use on conflict markers')),
-           ('a', 'text', None, _('treat all files as text')),
-           ('p', 'print', None,
-            _('print results instead of overwriting LOCAL')),
-           ('', 'no-minimal', None, _('no effect (DEPRECATED)')),
-           ('h', 'help', None, _('display help and exit')),
-           ('q', 'quiet', None, _('suppress output'))]
+options = [(b'L', b'label', [], _(b'labels to use on conflict markers')),
+           (b'a', b'text', None, _(b'treat all files as text')),
+           (b'p', b'print', None,
+            _(b'print results instead of overwriting LOCAL')),
+           (b'', b'no-minimal', None, _(b'no effect (DEPRECATED)')),
+           (b'h', b'help', None, _(b'display help and exit')),
+           (b'q', b'quiet', None, _(b'suppress output'))]
 
-usage = _('''simplemerge [OPTS] LOCAL BASE OTHER
+usage = _(b'''simplemerge [OPTS] LOCAL BASE OTHER
 
     Simple three-way file merge utility with a minimal feature set.
 
@@ -40,44 +41,47 @@
     """Exception raised on errors in parsing the command line."""
 
 def showhelp():
-    sys.stdout.write(usage)
-    sys.stdout.write('\noptions:\n')
+    pycompat.stdout.write(usage)
+    pycompat.stdout.write(b'\noptions:\n')
 
     out_opts = []
     for shortopt, longopt, default, desc in options:
-        out_opts.append(('%2s%s' % (shortopt and '-%s' % shortopt,
-                                    longopt and ' --%s' % longopt),
-                         '%s' % desc))
+        out_opts.append((b'%2s%s' % (shortopt and b'-%s' % shortopt,
+                                     longopt and b' --%s' % longopt),
+                         b'%s' % desc))
     opts_len = max([len(opt[0]) for opt in out_opts])
     for first, second in out_opts:
-        sys.stdout.write(' %-*s  %s\n' % (opts_len, first, second))
+        pycompat.stdout.write(b' %-*s  %s\n' % (opts_len, first, second))
 
 try:
-    for fp in (sys.stdin, sys.stdout, sys.stderr):
+    for fp in (sys.stdin, pycompat.stdout, sys.stderr):
         procutil.setbinary(fp)
 
     opts = {}
     try:
-        args = fancyopts.fancyopts(sys.argv[1:], options, opts)
+        bargv = [a.encode('utf8') for a in sys.argv[1:]]
+        args = fancyopts.fancyopts(bargv, options, opts)
     except getopt.GetoptError as e:
         raise ParseError(e)
-    if opts['help']:
+    if opts[b'help']:
         showhelp()
         sys.exit(0)
     if len(args) != 3:
-            raise ParseError(_('wrong number of arguments'))
+            raise ParseError(_(b'wrong number of arguments').decode('utf8'))
     local, base, other = args
     sys.exit(simplemerge.simplemerge(uimod.ui.load(),
                                      context.arbitraryfilectx(local),
                                      context.arbitraryfilectx(base),
                                      context.arbitraryfilectx(other),
-                                     **opts))
+                                     **pycompat.strkwargs(opts)))
 except ParseError as e:
-    sys.stdout.write("%s: %s\n" % (sys.argv[0], e))
+    if pycompat.ispy3:
+        e = str(e).encode('utf8')
+    pycompat.stdout.write(b"%s: %s\n" % (sys.argv[0].encode('utf8'), e))
     showhelp()
     sys.exit(1)
 except error.Abort as e:
-    sys.stderr.write("abort: %s\n" % e)
+    pycompat.stderr.write(b"abort: %s\n" % e)
     sys.exit(255)
 except KeyboardInterrupt:
     sys.exit(255)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/testparseutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,630 @@
+# testparseutil.py - utilities to parse test script for check tools
+#
+#  Copyright 2018 FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import, print_function
+
+import abc
+import re
+import sys
+
+####################
+# for Python3 compatibility (almost comes from mercurial/pycompat.py)
+
+ispy3 = (sys.version_info[0] >= 3)
+
+def identity(a):
+    return a
+
+def _rapply(f, xs):
+    if xs is None:
+        # assume None means non-value of optional data
+        return xs
+    if isinstance(xs, (list, set, tuple)):
+        return type(xs)(_rapply(f, x) for x in xs)
+    if isinstance(xs, dict):
+        return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
+    return f(xs)
+
+def rapply(f, xs):
+    if f is identity:
+        # fast path mainly for py2
+        return xs
+    return _rapply(f, xs)
+
+if ispy3:
+    import builtins
+
+    # TODO: .buffer might not exist if std streams were replaced; we'll need
+    # a silly wrapper to make a bytes stream backed by a unicode one.
+    stdin = sys.stdin.buffer
+    stdout = sys.stdout.buffer
+    stderr = sys.stderr.buffer
+
+    def bytestr(s):
+        # tiny version of pycompat.bytestr
+        return s.encode('latin1')
+
+    def sysstr(s):
+        if isinstance(s, builtins.str):
+            return s
+        return s.decode(u'latin-1')
+
+    def opentext(f):
+        return open(f, 'rb')
+else:
+    stdin = sys.stdin
+    stdout = sys.stdout
+    stderr = sys.stderr
+
+    bytestr = str
+    sysstr = identity
+
+    opentext = open
+
+def b2s(x):
+    # convert BYTES elements in "x" to SYSSTR recursively
+    return rapply(sysstr, x)
+
+def writeout(data):
+    # write "data" in BYTES into stdout
+    stdout.write(data)
+
+def writeerr(data):
+    # write "data" in BYTES into stderr
+    stderr.write(data)
+
+####################
+
+class embeddedmatcher(object):
+    """Base class to detect embedded code fragments in *.t test script
+    """
+    __metaclass__ = abc.ABCMeta
+
+    def __init__(self, desc):
+        self.desc = desc
+
+    @abc.abstractmethod
+    def startsat(self, line):
+        """Examine whether embedded code starts at line
+
+        This can return arbitrary object, and it is used as 'ctx' for
+        subsequent method invocations.
+        """
+
+    @abc.abstractmethod
+    def endsat(self, ctx, line):
+        """Examine whether embedded code ends at line"""
+
+    @abc.abstractmethod
+    def isinside(self, ctx, line):
+        """Examine whether line is inside embedded code, if not yet endsat
+        """
+
+    @abc.abstractmethod
+    def ignores(self, ctx):
+        """Examine whether detected embedded code should be ignored"""
+
+    @abc.abstractmethod
+    def filename(self, ctx):
+        """Return filename of embedded code
+
+        If filename isn't specified for embedded code explicitly, this
+        returns None.
+        """
+
+    @abc.abstractmethod
+    def codeatstart(self, ctx, line):
+        """Return actual code at the start line of embedded code
+
+        This might return None, if the start line doesn't contain
+        actual code.
+        """
+
+    @abc.abstractmethod
+    def codeatend(self, ctx, line):
+        """Return actual code at the end line of embedded code
+
+        This might return None, if the end line doesn't contain actual
+        code.
+        """
+
+    @abc.abstractmethod
+    def codeinside(self, ctx, line):
+        """Return actual code at line inside embedded code"""
+
+def embedded(basefile, lines, errors, matchers):
+    """pick embedded code fragments up from given lines
+
+    This is common parsing logic, which examines specified matchers on
+    given lines.
+
+    :basefile: a name of a file, from which lines to be parsed come.
+    :lines: to be parsed (might be a value returned by "open(basefile)")
+    :errors: an array, into which messages for detected error are stored
+    :matchers: an array of embeddedmatcher objects
+
+    This function yields '(filename, starts, ends, code)' tuple.
+
+    :filename: a name of embedded code, if it is explicitly specified
+               (e.g.  "foobar" of "cat >> foobar <<EOF").
+               Otherwise, this is None
+    :starts: line number (1-origin), at which embedded code starts (inclusive)
+    :ends: line number (1-origin), at which embedded code ends (exclusive)
+    :code: extracted embedded code, which is single-stringified
+
+    >>> class ambigmatcher(object):
+    ...     # mock matcher class to examine implementation of
+    ...     # "ambiguous matching" corner case
+    ...     def __init__(self, desc, matchfunc):
+    ...         self.desc = desc
+    ...         self.matchfunc = matchfunc
+    ...     def startsat(self, line):
+    ...         return self.matchfunc(line)
+    >>> ambig1 = ambigmatcher(b'ambiguous #1',
+    ...                       lambda l: l.startswith(b'  $ cat '))
+    >>> ambig2 = ambigmatcher(b'ambiguous #2',
+    ...                       lambda l: l.endswith(b'<< EOF\\n'))
+    >>> lines = [b'  $ cat > foo.py << EOF\\n']
+    >>> errors = []
+    >>> matchers = [ambig1, ambig2]
+    >>> list(t for t in embedded(b'<dummy>', lines, errors, matchers))
+    []
+    >>> b2s(errors)
+    ['<dummy>:1: ambiguous line for "ambiguous #1", "ambiguous #2"']
+
+    """
+    matcher = None
+    ctx = filename = code = startline = None # for pyflakes
+
+    for lineno, line in enumerate(lines, 1):
+        if not line.endswith(b'\n'):
+            line += b'\n' # to normalize EOF line
+        if matcher: # now, inside embedded code
+            if matcher.endsat(ctx, line):
+                codeatend = matcher.codeatend(ctx, line)
+                if codeatend is not None:
+                    code.append(codeatend)
+                if not matcher.ignores(ctx):
+                    yield (filename, startline, lineno, b''.join(code))
+                matcher = None
+                # DO NOT "continue", because line might start next fragment
+            elif not matcher.isinside(ctx, line):
+                # this is an error of basefile
+                # (if matchers are implemented correctly)
+                errors.append(b'%s:%d: unexpected line for "%s"'
+                              % (basefile, lineno, matcher.desc))
+                # stop extracting embedded code by current 'matcher',
+                # because appearance of unexpected line might mean
+                # that expected end-of-embedded-code line might never
+                # appear
+                matcher = None
+                # DO NOT "continue", because line might start next fragment
+            else:
+                code.append(matcher.codeinside(ctx, line))
+                continue
+
+        # examine whether current line starts embedded code or not
+        assert not matcher
+
+        matched = []
+        for m in matchers:
+            ctx = m.startsat(line)
+            if ctx:
+                matched.append((m, ctx))
+        if matched:
+            if len(matched) > 1:
+                # this is an error of matchers, maybe
+                errors.append(b'%s:%d: ambiguous line for %s' %
+                              (basefile, lineno,
+                               b', '.join([b'"%s"' % m.desc
+                                           for m, c in matched])))
+                # omit extracting embedded code, because choosing
+                # arbitrary matcher from matched ones might fail to
+                # detect the end of embedded code as expected.
+                continue
+            matcher, ctx = matched[0]
+            filename = matcher.filename(ctx)
+            code = []
+            codeatstart = matcher.codeatstart(ctx, line)
+            if codeatstart is not None:
+                code.append(codeatstart)
+                startline = lineno
+            else:
+                startline = lineno + 1
+
+    if matcher:
+        # examine whether EOF ends embedded code, because embedded
+        # code isn't yet ended explicitly
+        if matcher.endsat(ctx, b'\n'):
+            codeatend = matcher.codeatend(ctx, b'\n')
+            if codeatend is not None:
+                code.append(codeatend)
+            if not matcher.ignores(ctx):
+                yield (filename, startline, lineno + 1, b''.join(code))
+        else:
+            # this is an error of basefile
+            # (if matchers are implemented correctly)
+            errors.append(b'%s:%d: unexpected end of file for "%s"'
+                          % (basefile, lineno, matcher.desc))
+
+# heredoc limit mark to ignore embedded code at check-code.py or so
+heredocignorelimit = b'NO_CHECK_EOF'
+
+# the pattern to match against cases below, and to return a limit mark
+# string as 'lname' group
+#
+# - << LIMITMARK
+# - << "LIMITMARK"
+# - << 'LIMITMARK'
+heredoclimitpat = br'\s*<<\s*(?P<lquote>["\']?)(?P<limit>\w+)(?P=lquote)'
+
+class fileheredocmatcher(embeddedmatcher):
+    """Detect "cat > FILE << LIMIT" style embedded code
+
+    >>> matcher = fileheredocmatcher(b'heredoc .py file', br'[^<]+\.py')
+    >>> b2s(matcher.startsat(b'  $ cat > file.py << EOF\\n'))
+    ('file.py', '  > EOF\\n')
+    >>> b2s(matcher.startsat(b'  $ cat   >>file.py   <<EOF\\n'))
+    ('file.py', '  > EOF\\n')
+    >>> b2s(matcher.startsat(b'  $ cat>  \\x27any file.py\\x27<<  "EOF"\\n'))
+    ('any file.py', '  > EOF\\n')
+    >>> b2s(matcher.startsat(b"  $ cat > file.py << 'ANYLIMIT'\\n"))
+    ('file.py', '  > ANYLIMIT\\n')
+    >>> b2s(matcher.startsat(b'  $ cat<<ANYLIMIT>"file.py"\\n'))
+    ('file.py', '  > ANYLIMIT\\n')
+    >>> start = b'  $ cat > file.py << EOF\\n'
+    >>> ctx = matcher.startsat(start)
+    >>> matcher.codeatstart(ctx, start)
+    >>> b2s(matcher.filename(ctx))
+    'file.py'
+    >>> matcher.ignores(ctx)
+    False
+    >>> inside = b'  > foo = 1\\n'
+    >>> matcher.endsat(ctx, inside)
+    False
+    >>> matcher.isinside(ctx, inside)
+    True
+    >>> b2s(matcher.codeinside(ctx, inside))
+    'foo = 1\\n'
+    >>> end = b'  > EOF\\n'
+    >>> matcher.endsat(ctx, end)
+    True
+    >>> matcher.codeatend(ctx, end)
+    >>> matcher.endsat(ctx, b'  > EOFEOF\\n')
+    False
+    >>> ctx = matcher.startsat(b'  $ cat > file.py << NO_CHECK_EOF\\n')
+    >>> matcher.ignores(ctx)
+    True
+    """
+    _prefix = b'  > '
+
+    def __init__(self, desc, namepat):
+        super(fileheredocmatcher, self).__init__(desc)
+
+        # build the pattern to match against cases below (and ">>"
+        # variants), and to return a target filename string as 'name'
+        # group
+        #
+        # - > NAMEPAT
+        # - > "NAMEPAT"
+        # - > 'NAMEPAT'
+        namepat = (br'\s*>>?\s*(?P<nquote>["\']?)(?P<name>%s)(?P=nquote)'
+                   % namepat)
+        self._fileres = [
+            # "cat > NAME << LIMIT" case
+            re.compile(br'  \$ \s*cat' + namepat + heredoclimitpat),
+            # "cat << LIMIT > NAME" case
+            re.compile(br'  \$ \s*cat' + heredoclimitpat + namepat),
+        ]
+
+    def startsat(self, line):
+        # ctx is (filename, END-LINE-OF-EMBEDDED-CODE) tuple
+        for filere in self._fileres:
+            matched = filere.match(line)
+            if matched:
+                return (matched.group('name'),
+                        b'  > %s\n' % matched.group('limit'))
+
+    def endsat(self, ctx, line):
+        return ctx[1] == line
+
+    def isinside(self, ctx, line):
+        return line.startswith(self._prefix)
+
+    def ignores(self, ctx):
+        return b'  > %s\n' % heredocignorelimit == ctx[1]
+
+    def filename(self, ctx):
+        return ctx[0]
+
+    def codeatstart(self, ctx, line):
+        return None # no embedded code at start line
+
+    def codeatend(self, ctx, line):
+        return None # no embedded code at end line
+
+    def codeinside(self, ctx, line):
+        return line[len(self._prefix):] # strip prefix
+
+####
+# for embedded python script
+
+class pydoctestmatcher(embeddedmatcher):
+    """Detect ">>> code" style embedded python code
+
+    >>> matcher = pydoctestmatcher()
+    >>> startline = b'  >>> foo = 1\\n'
+    >>> matcher.startsat(startline)
+    True
+    >>> matcher.startsat(b'  ... foo = 1\\n')
+    False
+    >>> ctx = matcher.startsat(startline)
+    >>> matcher.filename(ctx)
+    >>> matcher.ignores(ctx)
+    False
+    >>> b2s(matcher.codeatstart(ctx, startline))
+    'foo = 1\\n'
+    >>> inside = b'  >>> foo = 1\\n'
+    >>> matcher.endsat(ctx, inside)
+    False
+    >>> matcher.isinside(ctx, inside)
+    True
+    >>> b2s(matcher.codeinside(ctx, inside))
+    'foo = 1\\n'
+    >>> inside = b'  ... foo = 1\\n'
+    >>> matcher.endsat(ctx, inside)
+    False
+    >>> matcher.isinside(ctx, inside)
+    True
+    >>> b2s(matcher.codeinside(ctx, inside))
+    'foo = 1\\n'
+    >>> inside = b'  expected output\\n'
+    >>> matcher.endsat(ctx, inside)
+    False
+    >>> matcher.isinside(ctx, inside)
+    True
+    >>> b2s(matcher.codeinside(ctx, inside))
+    '\\n'
+    >>> inside = b'  \\n'
+    >>> matcher.endsat(ctx, inside)
+    False
+    >>> matcher.isinside(ctx, inside)
+    True
+    >>> b2s(matcher.codeinside(ctx, inside))
+    '\\n'
+    >>> end = b'  $ foo bar\\n'
+    >>> matcher.endsat(ctx, end)
+    True
+    >>> matcher.codeatend(ctx, end)
+    >>> end = b'\\n'
+    >>> matcher.endsat(ctx, end)
+    True
+    >>> matcher.codeatend(ctx, end)
+    """
+    _prefix = b'  >>> '
+    _prefixre = re.compile(br'  (>>>|\.\.\.) ')
+
+    # If a line matches against not _prefixre but _outputre, that line
+    # is "an expected output line" (= not a part of code fragment).
+    #
+    # Strictly speaking, a line matching against "(#if|#else|#endif)"
+    # is also treated similarly in "inline python code" semantics by
+    # run-tests.py. But "directive line inside inline python code"
+    # should be rejected by Mercurial reviewers. Therefore, this
+    # regexp does not matche against such directive lines.
+    _outputre = re.compile(br'  $|  [^$]')
+
+    def __init__(self):
+        super(pydoctestmatcher, self).__init__(b"doctest style python code")
+
+    def startsat(self, line):
+        # ctx is "True"
+        return line.startswith(self._prefix)
+
+    def endsat(self, ctx, line):
+        return not (self._prefixre.match(line) or self._outputre.match(line))
+
+    def isinside(self, ctx, line):
+        return True # always true, if not yet ended
+
+    def ignores(self, ctx):
+        return False # should be checked always
+
+    def filename(self, ctx):
+        return None # no filename
+
+    def codeatstart(self, ctx, line):
+        return line[len(self._prefix):] # strip prefix '  >>> '/'  ... '
+
+    def codeatend(self, ctx, line):
+        return None # no embedded code at end line
+
+    def codeinside(self, ctx, line):
+        if self._prefixre.match(line):
+            return line[len(self._prefix):] # strip prefix '  >>> '/'  ... '
+        return b'\n' # an expected output line is treated as an empty line
+
+class pyheredocmatcher(embeddedmatcher):
+    """Detect "python << LIMIT" style embedded python code
+
+    >>> matcher = pyheredocmatcher()
+    >>> b2s(matcher.startsat(b'  $ python << EOF\\n'))
+    '  > EOF\\n'
+    >>> b2s(matcher.startsat(b'  $ $PYTHON   <<EOF\\n'))
+    '  > EOF\\n'
+    >>> b2s(matcher.startsat(b'  $ "$PYTHON"<<  "EOF"\\n'))
+    '  > EOF\\n'
+    >>> b2s(matcher.startsat(b"  $ $PYTHON << 'ANYLIMIT'\\n"))
+    '  > ANYLIMIT\\n'
+    >>> matcher.startsat(b'  $ "$PYTHON" < EOF\\n')
+    >>> start = b'  $ python << EOF\\n'
+    >>> ctx = matcher.startsat(start)
+    >>> matcher.codeatstart(ctx, start)
+    >>> matcher.filename(ctx)
+    >>> matcher.ignores(ctx)
+    False
+    >>> inside = b'  > foo = 1\\n'
+    >>> matcher.endsat(ctx, inside)
+    False
+    >>> matcher.isinside(ctx, inside)
+    True
+    >>> b2s(matcher.codeinside(ctx, inside))
+    'foo = 1\\n'
+    >>> end = b'  > EOF\\n'
+    >>> matcher.endsat(ctx, end)
+    True
+    >>> matcher.codeatend(ctx, end)
+    >>> matcher.endsat(ctx, b'  > EOFEOF\\n')
+    False
+    >>> ctx = matcher.startsat(b'  $ python << NO_CHECK_EOF\\n')
+    >>> matcher.ignores(ctx)
+    True
+    """
+    _prefix = b'  > '
+
+    _startre = re.compile(br'  \$ (\$PYTHON|"\$PYTHON"|python).*' +
+                          heredoclimitpat)
+
+    def __init__(self):
+        super(pyheredocmatcher, self).__init__(b"heredoc python invocation")
+
+    def startsat(self, line):
+        # ctx is END-LINE-OF-EMBEDDED-CODE
+        matched = self._startre.match(line)
+        if matched:
+            return b'  > %s\n' % matched.group('limit')
+
+    def endsat(self, ctx, line):
+        return ctx == line
+
+    def isinside(self, ctx, line):
+        return line.startswith(self._prefix)
+
+    def ignores(self, ctx):
+        return b'  > %s\n' % heredocignorelimit == ctx
+
+    def filename(self, ctx):
+        return None # no filename
+
+    def codeatstart(self, ctx, line):
+        return None # no embedded code at start line
+
+    def codeatend(self, ctx, line):
+        return None # no embedded code at end line
+
+    def codeinside(self, ctx, line):
+        return line[len(self._prefix):] # strip prefix
+
+_pymatchers = [
+    pydoctestmatcher(),
+    pyheredocmatcher(),
+    # use '[^<]+' instead of '\S+', in order to match against
+    # paths including whitespaces
+    fileheredocmatcher(b'heredoc .py file', br'[^<]+\.py'),
+]
+
+def pyembedded(basefile, lines, errors):
+    return embedded(basefile, lines, errors, _pymatchers)
+
+####
+# for embedded shell script
+
+_shmatchers = [
+    # use '[^<]+' instead of '\S+', in order to match against
+    # paths including whitespaces
+    fileheredocmatcher(b'heredoc .sh file', br'[^<]+\.sh'),
+]
+
+def shembedded(basefile, lines, errors):
+    return embedded(basefile, lines, errors, _shmatchers)
+
+####
+# for embedded hgrc configuration
+
+_hgrcmatchers = [
+    # use '[^<]+' instead of '\S+', in order to match against
+    # paths including whitespaces
+    fileheredocmatcher(b'heredoc hgrc file',
+                       br'(([^/<]+/)+hgrc|\$HGRCPATH|\${HGRCPATH})'),
+]
+
+def hgrcembedded(basefile, lines, errors):
+    return embedded(basefile, lines, errors, _hgrcmatchers)
+
+####
+
+if __name__ == "__main__":
+    import optparse
+    import sys
+
+    def showembedded(basefile, lines, embeddedfunc, opts):
+        errors = []
+        for name, starts, ends, code in embeddedfunc(basefile, lines, errors):
+            if not name:
+                name = b'<anonymous>'
+            writeout(b"%s:%d: %s starts\n" % (basefile, starts, name))
+            if opts.verbose and code:
+                writeout(b"  |%s\n" %
+                         b"\n  |".join(l for l in code.splitlines()))
+            writeout(b"%s:%d: %s ends\n" % (basefile, ends, name))
+        for e in errors:
+            writeerr(b"%s\n" % e)
+        return len(errors)
+
+    def applyembedded(args, embeddedfunc, opts):
+        ret = 0
+        if args:
+            for f in args:
+                with opentext(f) as fp:
+                    if showembedded(bytestr(f), fp, embeddedfunc, opts):
+                        ret = 1
+        else:
+            lines = [l for l in stdin.readlines()]
+            if showembedded(b'<stdin>', lines, embeddedfunc, opts):
+                ret = 1
+        return ret
+
+    commands = {}
+    def command(name, desc):
+        def wrap(func):
+            commands[name] = (desc, func)
+        return wrap
+
+    @command("pyembedded", "detect embedded python script")
+    def pyembeddedcmd(args, opts):
+        return applyembedded(args, pyembedded, opts)
+
+    @command("shembedded", "detect embedded shell script")
+    def shembeddedcmd(args, opts):
+        return applyembedded(args, shembedded, opts)
+
+    @command("hgrcembedded", "detect embedded hgrc configuration")
+    def hgrcembeddedcmd(args, opts):
+        return applyembedded(args, hgrcembedded, opts)
+
+    availablecommands = "\n".join(["  - %s: %s" % (key, value[0])
+                                   for key, value in commands.items()])
+
+    parser = optparse.OptionParser("""%prog COMMAND [file ...]
+
+Pick up embedded code fragments from given file(s) or stdin, and list
+up start/end lines of them in standard compiler format
+("FILENAME:LINENO:").
+
+Available commands are:
+""" + availablecommands + """
+""")
+    parser.add_option("-v", "--verbose",
+                      help="enable additional output (e.g. actual code)",
+                      action="store_true")
+    (opts, args) = parser.parse_args()
+
+    if not args or args[0] not in commands:
+        parser.print_help()
+        sys.exit(255)
+
+    sys.exit(commands[args[0]][1](args[1:], opts))
--- a/contrib/undumprevlog	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/undumprevlog	Mon Oct 22 14:46:06 2018 -0400
@@ -7,7 +7,9 @@
 
 import sys
 from mercurial import (
+    encoding,
     node,
+    pycompat,
     revlog,
     transaction,
     vfs as vfsmod,
@@ -19,17 +21,17 @@
 for fp in (sys.stdin, sys.stdout, sys.stderr):
     procutil.setbinary(fp)
 
-opener = vfsmod.vfs('.', False)
-tr = transaction.transaction(sys.stderr.write, opener, {'store': opener},
-                             "undump.journal")
+opener = vfsmod.vfs(b'.', False)
+tr = transaction.transaction(sys.stderr.write, opener, {b'store': opener},
+                             b"undump.journal")
 while True:
     l = sys.stdin.readline()
     if not l:
         break
     if l.startswith("file:"):
-        f = l[6:-1]
+        f = encoding.strtolocal(l[6:-1])
         r = revlog.revlog(opener, f)
-        print(f)
+        pycompat.stdout.write(b'%s\n' % f)
     elif l.startswith("node:"):
         n = node.bin(l[6:-1])
     elif l.startswith("linkrev:"):
@@ -41,7 +43,7 @@
     elif l.startswith("length:"):
         length = int(l[8:-1])
         sys.stdin.readline() # start marker
-        d = sys.stdin.read(length)
+        d = encoding.strtolocal(sys.stdin.read(length))
         sys.stdin.readline() # end marker
         r.addrevision(d, tr, lr, p1, p2)
 
--- a/contrib/wix/help.wxs	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/wix/help.wxs	Mon Oct 22 14:46:06 2018 -0400
@@ -43,12 +43,16 @@
           <Component Id="help.internals" Guid="$(var.help.internals.guid)" Win64='$(var.IsX64)'>
             <File Id="internals.bundle2.txt"      Name="bundle2.txt" />
             <File Id="internals.bundles.txt"      Name="bundles.txt" KeyPath="yes" />
+            <File Id="internals.cbor.txt"         Name="cbor.txt" />
             <File Id="internals.censor.txt"       Name="censor.txt" />
             <File Id="internals.changegroups.txt" Name="changegroups.txt" />
             <File Id="internals.config.txt"       Name="config.txt" />
+            <File Id="internals.linelog.txt"      Name="linelog.txt" />
             <File Id="internals.requirements.txt" Name="requirements.txt" />
             <File Id="internals.revlogs.txt"      Name="revlogs.txt" />
             <File Id="internals.wireprotocol.txt" Name="wireprotocol.txt" />
+            <File Id="internals.wireprotocolrpc.txt" Name="wireprotocolrpc.txt" />
+            <File Id="internals.wireprotocolv2.txt" Name="wireprotocolv2.txt" />
           </Component>
         </Directory>
 
--- a/contrib/zsh_completion	Wed Oct 10 12:25:28 2018 -0400
+++ b/contrib/zsh_completion	Mon Oct 22 14:46:06 2018 -0400
@@ -82,7 +82,7 @@
 
   if [[ -z "$cmd" ]]
   then
-    _arguments -s -w : $_hg_global_opts \
+    _arguments -s -S : $_hg_global_opts \
     ':mercurial command:_hg_commands'
     return
   fi
@@ -119,7 +119,7 @@
     _hg_cmd_${cmd}
   else
     # complete unknown commands normally
-    _arguments -s -w : $_hg_global_opts \
+    _arguments -s -S : $_hg_global_opts \
       '*:files:_hg_files'
   fi
 }
@@ -139,7 +139,7 @@
   typeset -gA _hg_alias_list
   local hline cmd cmdalias
 
-  _call_program hg hg debugcomplete -v | while read -A hline
+  _call_program hg HGPLAINEXCEPT=alias hg debugcomplete -v | while read -A hline
   do
     cmd=$hline[1]
     _hg_cmd_list+=($cmd)
@@ -193,21 +193,13 @@
 
 # likely merge candidates
 _hg_mergerevs() {
-  typeset -a heads
-  local myrev
+  typeset -a heads branches
+  local revset='sort(head() and not ., -rev)'
 
-  heads=(${(f)"$(_hg_cmd heads --template '{rev}:{branch}\\n')"})
-  # exclude own revision
-  myrev=$(_hg_cmd log -r . --template '{rev}:{branch}\\n')
-  heads=(${heads:#$myrev})
-
+  heads=(${(f)"$(_hg_cmd log -r '$revset' --template '{rev}:{branch}\\n')"})
   (( $#heads )) && _describe -t heads 'heads' heads
 
-  branches=(${(f)"$(_hg_cmd heads --template '{branch}\\n')"})
-  # exclude own revision
-  myrev=$(_hg_cmd log -r . --template '{branch}\\n')
-  branches=(${branches:#$myrev})
-
+  branches=(${(S)heads/#*:/})
   (( $#branches )) && _describe -t branches 'branches' branches
 }
 
@@ -245,10 +237,10 @@
   _wanted files expl 'missing files' _multi_parts / status_files
 }
 
-_hg_modified() {
+_hg_committable() {
   typeset -a status_files
-  _hg_status m
-  _wanted files expl 'modified files' _multi_parts / status_files
+  _hg_status mar
+  _wanted files expl 'modified, added or removed files' _multi_parts / status_files
 }
 
 _hg_resolve() {
@@ -281,6 +273,23 @@
     (( $#items )) && _describe -t config 'config item' items
 }
 
+_hg_internal_merge_tools=(
+  \\:dump \\:fail \\:forcedump \\:local \\:merge \\:merge-local \\:merge-other
+  \\:merge3 \\:other \\:prompt \\:tagmerge \\:union
+)
+
+_hg_merge_tools() {
+  typeset -a external_tools
+  _describe -t internal_tools 'internal merge tools' _hg_internal_merge_tools
+  external_tools=(${(f)"$(_hg_cmd showconfig merge-tools | cut -d . -f 2)"})
+  (( $#external_tools )) && _describe -t external_tools 'external merge tools' external_tools
+}
+
+_hg_shelves() {
+  shelves=("${(f)$(_hg_cmd shelve -ql)}")
+  (( $#shelves )) && _describe -t shelves 'shelves' shelves
+}
+
 _hg_addremove() {
   _alternative 'files:unknown files:_hg_unknown' \
     'files:missing files:_hg_missing'
@@ -371,39 +380,35 @@
 
 # Common options
 _hg_global_opts=(
-    '(--repository -R)'{-R+,--repository=}'[repository root directory]:repository:_files -/'
-    '--cwd[change working directory]:new working directory:_files -/'
-    '(--noninteractive -y)'{-y,--noninteractive}'[do not prompt, assume yes for any required answers]'
+    '(--repository -R)'{-R+,--repository=}'[repository root directory or name of overlay bundle file]:repository:_files -/'
+    '--cwd=[change working directory]:new working directory:_files -/'
+    '(--noninteractive -y)'{-y,--noninteractive}'[do not prompt, automatically pick the first choice for all prompts]'
     '(--verbose -v)'{-v,--verbose}'[enable additional output]'
-    '*--config[set/override config option]:defined config items:_hg_config'
+    '*--config=[set/override config option]:defined config items:_hg_config'
     '(--quiet -q)'{-q,--quiet}'[suppress output]'
     '(--help -h)'{-h,--help}'[display help and exit]'
-    '--debug[debug mode]'
+    '--debug[enable debugging output]'
     '--debugger[start debugger]'
-    '--encoding[set the charset encoding]'
-    '--encodingmode[set the charset encoding mode]'
-    '--lsprof[print improved command execution profile]'
-    '--traceback[print traceback on exception]'
+    '--encoding=[set the charset encoding]:encoding'
+    '--encodingmode=[set the charset encoding mode]:encoding mode'
+    '--traceback[always print a traceback on exception]'
     '--time[time how long the command takes]'
-    '--profile[profile]'
+    '--profile[print command execution profile]'
     '--version[output version information and exit]'
+    '--hidden[consider hidden changesets]'
+    '--color=[when to colorize]:when:(true false yes no always auto never debug)'
+    '--pager=[when to paginate (default: auto)]:when:(true false yes no always auto never)'
 )
 
 _hg_pat_opts=(
-  '*'{-I+,--include=}'[include names matching the given patterns]:dir:_files -W $(_hg_cmd root) -/'
-  '*'{-X+,--exclude=}'[exclude names matching the given patterns]:dir:_files -W $(_hg_cmd root) -/')
-
-_hg_clone_opts=(
-  $_hg_remote_opts
-  '(--noupdate -U)'{-U,--noupdate}'[do not update the new working directory]'
-  '--pull[use pull protocol to copy metadata]'
-  '--uncompressed[use uncompressed transfer (fast over LAN)]')
+  '*'{-I+,--include=}'[include names matching the given patterns]:pattern:_files -W $(_hg_cmd root) -/'
+  '*'{-X+,--exclude=}'[exclude names matching the given patterns]:pattern:_files -W $(_hg_cmd root) -/')
 
 _hg_date_user_opts=(
   '(--currentdate -D)'{-D,--currentdate}'[record the current date as commit date]'
   '(--currentuser -U)'{-U,--currentuser}'[record the current user as committer]'
-  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date:'
-  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user:')
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date'
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user')
 
 _hg_gitlike_opts=(
   '(--git -g)'{-g,--git}'[use git extended diff format]')
@@ -411,10 +416,13 @@
 _hg_diff_opts=(
   $_hg_gitlike_opts
   '(--text -a)'{-a,--text}'[treat all files as text]'
-  '--nodates[omit dates from diff headers]')
+  '--binary[generate binary diffs in git mode (default)]'
+  '--nodates[omit dates from diff headers]'
+)
 
 _hg_mergetool_opts=(
-  '(--tool -t)'{-t+,--tool=}'[specify merge tool]:tool:')
+  '(--tool -t)'{-t+,--tool=}'[specify merge tool]:merge tool:_hg_merge_tools'
+)
 
 _hg_dryrun_opts=(
   '(--dry-run -n)'{-n,--dry-run}'[do not perform actions, just print output]')
@@ -422,32 +430,39 @@
 _hg_ignore_space_opts=(
   '(--ignore-all-space -w)'{-w,--ignore-all-space}'[ignore white space when comparing lines]'
   '(--ignore-space-change -b)'{-b,--ignore-space-change}'[ignore changes in the amount of white space]'
-  '(--ignore-blank-lines -B)'{-B,--ignore-blank-lines}'[ignore changes whose lines are all blank]')
+  '(--ignore-blank-lines -B)'{-B,--ignore-blank-lines}'[ignore changes whose lines are all blank]'
+  '(--ignore-space-at-eol -Z)'{-Z,--ignore-space-at-eol}'[ignore changes in whitespace at EOL]'
+)
 
-_hg_style_opts=(
-  '--style[display using template map file]:'
-  '--template[display with template]:')
+_hg_template_opts=(
+  '(--template -T)'{-T+,--template=}'[display with template]:template'
+)
 
 _hg_log_opts=(
-  $_hg_global_opts $_hg_style_opts $_hg_gitlike_opts
-  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:'
+  $_hg_global_opts $_hg_template_opts $_hg_gitlike_opts
+  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:limit'
   '(--no-merges -M)'{-M,--no-merges}'[do not show merges]'
   '(--patch -p)'{-p,--patch}'[show patch]'
   '--stat[output diffstat-style summary of changes]'
+  '(--graph -G)'{-G,--graph}'[show the revision DAG]'
 )
 
 _hg_commit_opts=(
   '(-m --message -l --logfile --edit -e)'{-e,--edit}'[edit commit message]'
-  '(-e --edit -l --logfile --message -m)'{-m+,--message=}'[use <text> as commit message]:message:'
+  '(-e --edit -l --logfile --message -m)'{-m+,--message=}'[use <text> as commit message]:message'
   '(-e --edit -m --message --logfile -l)'{-l+,--logfile=}'[read the commit message from <file>]:log file:_files')
 
 _hg_remote_opts=(
-  '(--ssh -e)'{-e+,--ssh=}'[specify ssh command to use]:'
-  '--remotecmd[specify hg command to run on the remote side]:')
+  '(--ssh -e)'{-e+,--ssh=}'[specify ssh command to use]:command'
+  '--remotecmd=[specify hg command to run on the remote side]:remote command'
+  '--insecure[do not verify server certificate (ignoring web.cacerts config)]'
+)
 
-_hg_branch_bmark_opts=(
-  '(--bookmark -B)'{-B+,--bookmark=}'[specify bookmark(s)]:bookmark:_hg_bookmarks'
-  '(--branch -b)'{-b+,--branch=}'[specify branch(es)]:branch:_hg_branches'
+_hg_clone_opts=(
+  $_hg_remote_opts
+  '(--noupdate -U)'{-U,--noupdate}'[do not update the new working directory]'
+  '--pull[use pull protocol to copy metadata]'
+  '--stream[clone with minimal data processing]'
 )
 
 _hg_subrepos_opts=(
@@ -458,50 +473,53 @@
 }
 
 _hg_cmd_add() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts $_hg_subrepos_opts \
   '*:unknown files:_hg_unknown'
 }
 
 _hg_cmd_addremove() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
-  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts $_hg_subrepos_opts \
+  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:similarity' \
   '*:unknown or missing files:_hg_addremove'
 }
 
 _hg_cmd_annotate() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_ignore_space_opts $_hg_pat_opts \
   '(--rev -r)'{-r+,--rev=}'[annotate the specified revision]:revision:_hg_labels' \
-  '(--follow -f)'{-f,--follow}'[follow file copies and renames]' \
+  "--no-follow[don't follow copies and renames]" \
   '(--text -a)'{-a,--text}'[treat all files as text]' \
-  '(--user -u)'{-u,--user}'[list the author]' \
-  '(--date -d)'{-d,--date}'[list the date]' \
+  '(--user -u)'{-u,--user}'[list the author (long with -v)]' \
+  '(--file -f)'{-f,--file}'[list the filename]' \
+  '(--date -d)'{-d,--date}'[list the date (short with -q)]' \
   '(--number -n)'{-n,--number}'[list the revision number (default)]' \
   '(--changeset -c)'{-c,--changeset}'[list the changeset]' \
+  '(--line-number -l)'{-l,--line-number}'[show line number at the first appearance]' \
   '*:files:_hg_files'
 }
 
 _hg_cmd_archive() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
   '--no-decode[do not pass files through decoders]' \
-  '(--prefix -p)'{-p+,--prefix=}'[directory prefix for files in archive]:' \
+  '(--prefix -p)'{-p+,--prefix=}'[directory prefix for files in archive]:prefix' \
   '(--rev -r)'{-r+,--rev=}'[revision to distribute]:revision:_hg_labels' \
   '(--type -t)'{-t+,--type=}'[type of distribution to create]:archive type:(files tar tbz2 tgz uzip zip)' \
   '*:destination:_files'
 }
 
 _hg_cmd_backout() {
-  _arguments -s -w : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
     '--merge[merge with old dirstate parent after backout]' \
-    '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
-    '--parent[parent to choose when backing out merge]' \
-    '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
-    '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
-    '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text:' \
-    '(--logfile -l)'{-l+,--logfile=}'[read commit message from <file>]:log file:_files'
+    '--no-commit[do not commit]' \
+    '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+    '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
+    '(--rev -r 1)'{-r+,--rev=}'[revision to backout]:revision:_hg_labels' \
+    '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text' \
+    '(--logfile -l)'{-l+,--logfile=}'[read commit message from <file>]:log file:_files' \
+    ':revision:_hg_labels'
 }
 
 _hg_cmd_bisect() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(-)'{-r,--reset}'[reset bisect state]' \
   '(--extend -e)'{-e,--extend}'[extend the bisect range]' \
   '(--good -g --bad -b --skip -s --reset -r)'{-g,--good}'[mark changeset good]'::revision:_hg_labels \
@@ -512,391 +530,428 @@
 }
 
 _hg_cmd_bookmarks() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--force -f)'{-f,--force}'[force]' \
-  '(--inactive -i)'{-i,--inactive}'[mark a bookmark inactive]' \
-  '(--rev -r --delete -d --rename -m)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
-  '(--rev -r --delete -d --rename -m)'{-d,--delete}'[delete a given bookmark]' \
-  '(--rev -r --delete -d --rename -m)'{-m+,--rename=}'[rename a given bookmark]:bookmark:_hg_bookmarks' \
+  '(--inactive -i --delete -d --list -l)'{-i,--inactive}'[mark a bookmark inactive]' \
+  '(--rev -r --delete -d --rename -m --list -l)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
+  '(--rev -r --delete -d --rename -m --list -l --inactive -i)'{-d,--delete}'[delete a given bookmark]' \
+  '(--rev -r --delete -d --rename -m --list -l)'{-m+,--rename=}'[rename a given bookmark]:bookmark:_hg_bookmarks' \
+  '(--inactive -i --delete -d --list -l)'{-l,--list}'[list existing bookmarks]' \
   ':bookmark:_hg_bookmarks'
 }
 
 _hg_cmd_branch() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--force -f)'{-f,--force}'[set branch name even if it shadows an existing branch]' \
   '(--clean -C)'{-C,--clean}'[reset branch name to parent branch name]'
 }
 
 _hg_cmd_branches() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--active -a)'{-a,--active}'[show only branches that have unmerge heads]' \
+  _arguments -s -S : $_hg_global_opts \
   '(--closed -c)'{-c,--closed}'[show normal and closed branches]'
 }
 
 _hg_cmd_bundle() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts \
-  '(--force -f)'{-f,--force}'[run even when remote repository is unrelated]' \
-  '(2)*--base[a base changeset to specify instead of a destination]:revision:_hg_labels' \
-  '(--branch -b)'{-b+,--branch=}'[a specific branch to bundle]:' \
-  '(--rev -r)'{-r+,--rev=}'[changeset(s) to bundle]:' \
-  '--all[bundle all changesets in the repository]' \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts \
+  '(--force -f)'{-f,--force}'[run even when the destination is unrelated]' \
+  '(2)*--base=[a base changeset assumed to be available at the destination]:revision:_hg_labels' \
+  '*'{-b+,--branch=}'[a specific branch you would like to bundle]:branch:_hg_branches' \
+  '*'{-r+,--rev=}'[a changeset intended to be added to the destination]:revision:_hg_labels' \
+  '(--all -a)'{-a,--all}'[bundle all changesets in the repository]' \
+  '(--type -t)'{-t+,--type=}'[bundle compression type to use (default: bzip2)]:bundle type' \
   ':output file:_files' \
   ':destination repository:_files -/'
 }
 
 _hg_cmd_cat() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
-  '(--output -o)'{-o+,--output=}'[print output to file with formatted name]:filespec:' \
-  '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
+  '(--output -o)'{-o+,--output=}'[print output to file with formatted name]:format string' \
+  '(--rev -r)'{-r+,--rev=}'[print the given revision]:revision:_hg_labels' \
   '--decode[apply any matching decode filter]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_clone() {
-  _arguments -s -w : $_hg_global_opts $_hg_clone_opts \
-  '(--rev -r)'{-r+,--rev=}'[a changeset you would like to have after cloning]:' \
-  '(--updaterev -u)'{-u+,--updaterev=}'[revision, tag or branch to check out]:' \
-  '(--branch -b)'{-b+,--branch=}'[clone only the specified branch]:' \
+  _arguments -s -S : $_hg_global_opts $_hg_clone_opts \
+  '*'{-r+,--rev=}'[do not clone everything, but include this changeset and its ancestors]:revision' \
+  '(--updaterev -u)'{-u+,--updaterev=}'[revision, tag, or branch to check out]:revision' \
+  '*'{-b+,--branch=}"[do not clone everything, but include this branch's changesets and their ancestors]:branch" \
   ':source repository:_hg_remote' \
   ':destination:_hg_clone_dest'
 }
 
 _hg_cmd_commit() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
   '(--addremove -A)'{-A,--addremove}'[mark new/missing files as added/removed before committing]' \
-  '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text:' \
+  '(--message -m)'{-m+,--message=}'[use <text> as commit message]:text' \
   '(--logfile -l)'{-l+,--logfile=}'[read commit message from <file>]:log file:_files' \
-  '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
-  '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
-  '--amend[amend the parent of the working dir]' \
-  '--close-branch[mark a branch as closed]' \
-  '*:file:_hg_files'
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
+  '--amend[amend the parent of the working directory]' \
+  '--close-branch[mark a branch head as closed]' \
+  '(--interactive -i)'{-i,--interactive}'[use interactive mode]' \
+  '(--secret -s)'{-s,--secret}'[use the secret phase for committing]' \
+  '*:file:_hg_committable'
 }
 
 _hg_cmd_copy() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
   '(--after -A)'{-A,--after}'[record a copy that has already occurred]' \
   '(--force -f)'{-f,--force}'[forcibly copy over an existing managed file]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_diff() {
+  local context state state_descr line ret=1
   typeset -A opt_args
-  _arguments -s -w : $_hg_global_opts $_hg_diff_opts $_hg_ignore_space_opts \
+
+  _arguments -s -S : $_hg_global_opts $_hg_diff_opts $_hg_ignore_space_opts \
                      $_hg_pat_opts $_hg_subrepos_opts \
   '*'{-r+,--rev=}'[revision]:revision:_hg_revrange' \
+  '--noprefix[omit a/ and b/ prefixes from filenames]' \
   '(--show-function -p)'{-p,--show-function}'[show which function each change is in]' \
-  '(--change -c)'{-c+,--change=}'[change made by revision]:' \
-  '(--text -a)'{-a,--text}'[treat all files as text]' \
+  '(--change -c)'{-c+,--change=}'[change made by revision]:revision:_hg_labels' \
   '--reverse[produce a diff that undoes the changes]' \
-  '(--unified -U)'{-U+,--unified=}'[number of lines of context to show]:' \
+  '(--unified -U)'{-U+,--unified=}'[number of lines of context to show]:count' \
   '--stat[output diffstat-style summary of changes]' \
-  '*:file:->diff_files'
+  '--root=[produce diffs relative to subdirectory]:directory:_files -/' \
+  '*:file:->diff_files' && ret=0
 
   if [[ $state == 'diff_files' ]]
   then
-    if [[ -n $opt_args[-r] ]]
+    if [[ -n ${opt_args[(I)-r|--rev]} ]]
     then
-      _hg_files
+      _hg_files && ret=0
     else
-      _hg_modified
+      _hg_committable && ret=0
     fi
   fi
+
+  return ret
 }
 
 _hg_cmd_export() {
-  _arguments -s -w : $_hg_global_opts $_hg_diff_opts \
-  '(--outout -o)'{-o+,--output=}'[print output to file with formatted name]:filespec:' \
+  _arguments -s -S : $_hg_global_opts $_hg_diff_opts \
+  '(--bookmark -B)'{-B+,--bookmark=}'[export changes only reachable by given bookmark]:bookmark:_hg_bookmarks' \
+  '(--output -o)'{-o+,--output=}'[print output to file with formatted name]:format string' \
   '--switch-parent[diff against the second parent]' \
-  '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
+  '*'{-r+,--rev=}'[revisions to export]:revision:_hg_labels' \
   '*:revision:_hg_labels'
 }
 
+_hg_cmd_files() {
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  '(--rev -r)'{-r+,--rev=}'[search the repository as it is in revision]:revision:_hg_labels' \
+  '(--print0 -0)'{-0,--print0}'[end filenames with NUL, for use with xargs]' \
+  '*:file:_hg_files'
+}
+
 _hg_cmd_forget() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  '(--interactive -i)'{-i,--interactive}'[use interactive mode]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_graft() {
-  _arguments -s -w : $_hg_global_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_dryrun_opts \
                      $_hg_date_user_opts $_hg_mergetool_opts \
-  '(--continue -c)'{-c,--continue}'[resume interrupted graft]' \
+  '*'{-r+,--rev=}'[revisions to graft]:revision:_hg_labels' \
+  '(--continue -c --abort -a)'{-c,--continue}'[resume interrupted graft]' \
+  '(--continue -c --abort -a)'{-a,--abort}'[abort interrupted graft]' \
   '(--edit -e)'{-e,--edit}'[invoke editor on commit messages]' \
   '--log[append graft info to log message]' \
+  "--no-commit[don't commit, just apply the changes in working directory]" \
+  '(--force -f)'{-f,--force}'[force graft]' \
   '*:revision:_hg_labels'
 }
 
 _hg_cmd_grep() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
-  '(--print0 -0)'{-0,--print0}'[end filenames with NUL]' \
-  '--all[print all revisions with matches]' \
-  '(--follow -f)'{-f,--follow}'[follow changeset or file history]' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
+  '(--print0 -0)'{-0,--print0}'[end fields with NUL]' \
+  '--diff[print all revisions when the term was introduced or removed]' \
+  '(--text -a)'{-a,--text}'[treat all files as text]' \
+  '(--follow -f)'{-f,--follow}'[follow changeset history, or file history across copies and renames]' \
   '(--ignore-case -i)'{-i,--ignore-case}'[ignore case when matching]' \
-  '(--files-with-matches -l)'{-l,--files-with-matches}'[print only filenames and revs that match]' \
+  '(--files-with-matches -l)'{-l,--files-with-matches}'[print only filenames and revisions that match]' \
   '(--line-number -n)'{-n,--line-number}'[print matching line numbers]' \
-  '*'{-r+,--rev=}'[search in given revision range]:revision:_hg_revrange' \
-  '(--user -u)'{-u,--user}'[print user who committed change]' \
-  '(--date -d)'{-d,--date}'[print date of a changeset]' \
+  '*'{-r+,--rev=}'[only search files changed within revision range]:revision:_hg_revrange' \
+  '(--user -u)'{-u,--user}'[list the author (long with -v)]' \
+  '(--date -d)'{-d,--date}'[list the date (short with -q)]' \
   '1:search pattern:' \
   '*:files:_hg_files'
 }
 
 _hg_cmd_heads() {
-  _arguments -s -w : $_hg_global_opts $_hg_style_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_template_opts \
   '(--topo -t)'{-t,--topo}'[show topological heads only]' \
   '(--closed -c)'{-c,--closed}'[show normal and closed branch heads]' \
-  '(--rev -r)'{-r+,--rev=}'[show only heads which are descendants of rev]:revision:_hg_labels'
+  '(--rev -r)'{-r+,--rev=}'[show only heads which are descendants of revision]:revision:_hg_labels'
 }
 
 _hg_cmd_help() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--extension -e)'{-e,--extension}'[show only help for extensions]' \
   '(--command -c)'{-c,--command}'[show only help for commands]' \
-  '(--keyword -k)'{-k+,--keyword}'[show topics matching keyword]' \
+  '(--keyword -k)'{-k,--keyword}'[show topics matching keyword]' \
+  '*'{-s+,--system=}'[show help for specific platform(s)]:platform:(windows vms plan9 unix)' \
   '*:mercurial help topic:_hg_help_topics'
 }
 
 _hg_cmd_identify() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts \
-  '(--rev -r)'{-r+,--rev=}'[identify the specified rev]:revision:_hg_labels' \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts \
+  '(--rev -r)'{-r+,--rev=}'[identify the specified revision]:revision:_hg_labels' \
   '(--num -n)'{-n,--num}'[show local revision number]' \
   '(--id -i)'{-i,--id}'[show global revision id]' \
   '(--branch -b)'{-b,--branch}'[show branch]' \
-  '(--bookmark -B)'{-B,--bookmark}'[show bookmarks]' \
+  '(--bookmarks -B)'{-B,--bookmarks}'[show bookmarks]' \
   '(--tags -t)'{-t,--tags}'[show tags]'
 }
 
 _hg_cmd_import() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts \
-  '(--strip -p)'{-p+,--strip=}'[directory strip option for patch (default: 1)]:count:' \
-  '(--force -f)'{-f,--force}'[skip check for outstanding uncommitted changes]' \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts \
+  '(--strip -p)'{-p+,--strip=}'[directory strip option for patch (default: 1)]:count' \
   '--bypass[apply patch without touching the working directory]' \
   '--no-commit[do not commit, just update the working directory]' \
   '--partial[commit even if some hunks fail]' \
-  '--exact[apply patch to the nodes from which it was generated]' \
+  '--exact[abort if patch would apply lossily]' \
+  '--prefix=[apply patch to subdirectory]:directory:_files -/' \
   '--import-branch[use any branch information in patch (implied by --exact)]' \
-  '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
-  '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
-  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:' \
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
+  '(--similarity -s)'{-s+,--similarity=}'[guess renamed files by similarity (0<=s<=100)]:similarity' \
   '*:patch:_files'
 }
 
 _hg_cmd_incoming() {
-  _arguments -s -w : $_hg_log_opts $_hg_branch_bmark_opts $_hg_remote_opts \
-                     $_hg_subrepos_opts \
-  '(--force -f)'{-f,--force}'[run even when the remote repository is unrelated]' \
-  '(--rev -r)'{-r+,--rev=}'[a specific revision up to which you would like to pull]:revision:_hg_labels' \
+  _arguments -s -S : $_hg_log_opts $_hg_remote_opts $_hg_subrepos_opts \
+  '(--force -f)'{-f,--force}'[run even if remote repository is unrelated]' \
+  '*'{-r+,--rev=}'[a remote changeset intended to be added]:revision:_hg_labels' \
   '(--newest-first -n)'{-n,--newest-first}'[show newest record first]' \
-  '--bundle[file to store the bundles into]:bundle file:_files' \
+  '--bundle=[file to store the bundles into]:bundle file:_files' \
+  '(--bookmarks -B)'{-B,--bookmarks}'[compare bookmarks]' \
+  '*'{-b+,--branch=}'[a specific branch you would like to pull]:branch:_hg_branches' \
   ':source:_hg_remote'
 }
 
 _hg_cmd_init() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts \
-  ':dir:_files -/'
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts \
+  ':directory:_files -/'
 }
 
 _hg_cmd_locate() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
-  '(--rev -r)'{-r+,--rev=}'[search repository as it stood at revision]:revision:_hg_labels' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
+  '(--rev -r)'{-r+,--rev=}'[search the repository as it is in revision]:revision:_hg_labels' \
   '(--print0 -0)'{-0,--print0}'[end filenames with NUL, for use with xargs]' \
-  '(--fullpath -f)'{-f,--fullpath}'[print complete paths]' \
+  '(--fullpath -f)'{-f,--fullpath}'[print complete paths from the filesystem root]' \
   '*:search pattern:_hg_files'
 }
 
 _hg_cmd_log() {
-  _arguments -s -w : $_hg_log_opts $_hg_pat_opts \
-  '(--follow --follow-first -f)'{-f,--follow}'[follow changeset or history]' \
-  '(-f --follow)--follow-first[only follow the first parent of merge changesets]' \
+  _arguments -s -S : $_hg_log_opts $_hg_pat_opts \
+  '(--follow -f)'{-f,--follow}'[follow changeset history, or file history across copies and renames]' \
   '(--copies -C)'{-C,--copies}'[show copied files]' \
-  '(--keyword -k)'{-k+,--keyword}'[search for a keyword]:' \
+  '*'{-k+,--keyword=}'[search for a keyword]:keyword' \
   '*'{-r+,--rev=}'[show the specified revision or revset]:revision:_hg_revrange' \
+  '--removed[include revisions where files were removed]' \
   '(--only-merges -m)'{-m,--only-merges}'[show only merges]' \
-  '(--prune -P)'{-P+,--prune=}'[do not display revision or any of its ancestors]:revision:_hg_labels' \
-  '(--graph -G)'{-G,--graph}'[show the revision DAG]' \
-  '(--branch -b)'{-b+,--branch=}'[show changesets within the given named branch]:branch:_hg_branches' \
-  '(--user -u)'{-u+,--user=}'[revisions committed by user]:user:' \
-  '(--date -d)'{-d+,--date=}'[show revisions matching date spec]:date:' \
+  '*'{-P+,--prune=}'[do not display revision or any of its ancestors]:revision:_hg_labels' \
+  '*'{-b+,--branch=}'[show changesets within the given named branch]:branch:_hg_branches' \
+  '*'{-u+,--user=}'[revisions committed by user]:user' \
+  '(--date -d)'{-d+,--date=}'[show revisions matching date spec]:date' \
   '*:files:_hg_files'
 }
 
 _hg_cmd_manifest() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '--all[list files from all revisions]' \
   '(--rev -r)'{-r+,--rev=}'[revision to display]:revision:_hg_labels' \
   ':revision:_hg_labels'
 }
 
 _hg_cmd_merge() {
-  _arguments -s -w : $_hg_global_opts $_hg_mergetool_opts \
-  '(--force -f)'{-f,--force}'[force a merge with outstanding changes]' \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts \
   '(--rev -r 1)'{-r+,--rev=}'[revision to merge]:revision:_hg_mergerevs' \
   '(--preview -P)'{-P,--preview}'[review revisions to merge (no merge is performed)]' \
+  '(- :)--abort[abort the ongoing merge]' \
   ':revision:_hg_mergerevs'
 }
 
 _hg_cmd_outgoing() {
-  _arguments -s -w : $_hg_log_opts $_hg_branch_bmark_opts $_hg_remote_opts \
-                     $_hg_subrepos_opts \
-  '(--force -f)'{-f,--force}'[run even when the remote repository is unrelated]' \
-  '*'{-r+,--rev=}'[a specific revision you would like to push]:revision:_hg_revrange' \
+  _arguments -s -S : $_hg_log_opts $_hg_remote_opts $_hg_subrepos_opts \
+  '(--force -f)'{-f,--force}'[run even when the destination is unrelated]' \
+  '*'{-r+,--rev=}'[a changeset intended to be included in the destination]:revision:_hg_revrange' \
   '(--newest-first -n)'{-n,--newest-first}'[show newest record first]' \
+  '(--bookmarks -B)'{-B,--bookmarks}'[compare bookmarks]' \
+  '*'{-b+,--branch=}'[a specific branch you would like to push]:branch:_hg_branches' \
   ':destination:_hg_remote'
 }
 
 _hg_cmd_parents() {
-  _arguments -s -w : $_hg_global_opts $_hg_style_opts \
-  '(--rev -r)'{-r+,--rev=}'[show parents of the specified rev]:revision:_hg_labels' \
+  _arguments -s -S : $_hg_global_opts $_hg_template_opts \
+  '(--rev -r)'{-r+,--rev=}'[show parents of the specified revision]:revision:_hg_labels' \
   ':last modified file:_hg_files'
 }
 
 _hg_cmd_paths() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   ':path:_hg_paths'
 }
 
 _hg_cmd_phase() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--public -p)'{-p,--public}'[set changeset phase to public]' \
-  '(--draft -d)'{-d,--draft}'[set changeset phase to draft]' \
-  '(--secret -s)'{-s,--secret}'[set changeset phase to secret]' \
+  _arguments -s -S : $_hg_global_opts \
+  '(--public -p --draft -d --secret -s)'{-p,--public}'[set changeset phase to public]' \
+  '(--public -p --draft -d --secret -s)'{-d,--draft}'[set changeset phase to draft]' \
+  '(--public -p --draft -d --secret -s)'{-s,--secret}'[set changeset phase to secret]' \
   '(--force -f)'{-f,--force}'[allow to move boundary backward]' \
-  '(--rev -r)'{-r+,--rev=}'[target revision]:revision:_hg_labels' \
-  ':revision:_hg_labels'
+  '*'{-r+,--rev=}'[target revision]:revision:_hg_labels' \
+  '*:revision:_hg_labels'
 }
 
 _hg_cmd_pull() {
-  _arguments -s -w : $_hg_global_opts $_hg_branch_bmark_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts \
   '(--force -f)'{-f,--force}'[run even when the remote repository is unrelated]' \
-  '(--update -u)'{-u,--update}'[update to new tip if changesets were pulled]' \
-  '(--rev -r)'{-r+,--rev}'[a specific revision up to which you would like to pull]:revision:' \
+  '(--update -u)'{-u,--update}'[update to new branch head if new descendants were pulled]' \
+  '*'{-r+,--rev=}'[a remote changeset intended to be added]:revision:_hg_labels' \
+  '*'{-B+,--bookmark=}'[bookmark to pull]:bookmark:_hg_bookmarks' \
+  '*'{-b+,--branch=}'[a specific branch you would like to pull]:branch:_hg_branches' \
   ':source:_hg_remote'
 }
 
 _hg_cmd_push() {
-  _arguments -s -w : $_hg_global_opts $_hg_branch_bmark_opts $_hg_remote_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts \
   '(--force -f)'{-f,--force}'[force push]' \
-  '(--rev -r)'{-r+,--rev=}'[a specific revision you would like to push]:revision:_hg_labels' \
+  '*'{-r+,--rev=}'[a changeset intended to be included in the destination]:revision:_hg_labels' \
+  '*'{-B+,--bookmark=}'[bookmark to push]:bookmark:_hg_bookmarks' \
+  '*'{-b+,--branch=}'[a specific branch you would like to push]:branch:_hg_branches' \
   '--new-branch[allow pushing a new branch]' \
   ':destination:_hg_remote'
 }
 
 _hg_cmd_remove() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts \
-  '(--after -A)'{-A,--after}'[record remove that has already occurred]' \
-  '(--force -f)'{-f,--force}'[remove file even if modified]' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts $_hg_subrepos_opts \
+  '(--after -A)'{-A,--after}'[record delete for missing files]' \
+  '(--force -f)'{-f,--force}'[forget added files, delete modified files]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_rename() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
   '(--after -A)'{-A,--after}'[record a rename that has already occurred]' \
   '(--force -f)'{-f,--force}'[forcibly copy over an existing managed file]' \
   '*:file:_hg_files'
 }
 
 _hg_cmd_resolve() {
-  local context state line
+  local context state state_descr line ret=1
   typeset -A opt_args
 
-  _arguments -s -w : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts $_hg_pat_opts \
   '(--all -a)'{-a,--all}'[select all unresolved files]' \
   '(--no-status -n)'{-n,--no-status}'[hide status prefix]' \
   '(--list -l --mark -m --unmark -u)'{-l,--list}'[list state of files needing merge]:*:merged files:->resolve_files' \
   '(--mark -m --list -l --unmark -u)'{-m,--mark}'[mark files as resolved]:*:unresolved files:_hg_unresolved' \
-  '(--unmark -u --list -l --mark -m)'{-u,--unmark}'[unmark files as resolved]:*:resolved files:_hg_resolved' \
-  '*:file:_hg_unresolved'
+  '(--unmark -u --list -l --mark -m)'{-u,--unmark}'[mark files as unresolved]:*:resolved files:_hg_resolved' \
+  '*:file:_hg_unresolved' && ret=0
 
   if [[ $state == 'resolve_files' ]]
   then
     _alternative 'files:resolved files:_hg_resolved' \
-      'files:unresolved files:_hg_unresolved'
+      'files:unresolved files:_hg_unresolved' && ret=0
   fi
+
+  return ret
 }
 
 _hg_cmd_revert() {
-  local context state line
+  local context state state_descr line ret=1
   typeset -A opt_args
 
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_dryrun_opts \
   '(--all -a :)'{-a,--all}'[revert all changes when no arguments given]' \
-  '(--rev -r)'{-r+,--rev=}'[revision to revert to]:revision:_hg_labels' \
+  '(--rev -r)'{-r+,--rev=}'[revert to the specified revision]:revision:_hg_labels' \
   '(--no-backup -C)'{-C,--no-backup}'[do not save backup copies of files]' \
-  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:date code:' \
-  '*:file:->diff_files'
+  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:date' \
+  '(--interactive -i)'{-i,--interactive}'[interactively select the changes]' \
+  '*:file:->revert_files' && ret=0
 
-  if [[ $state == 'diff_files' ]]
+  if [[ $state == 'revert_files' ]]
   then
-    if [[ -n $opt_args[-r] ]]
+    if [[ -n ${opt_args[(I)-r|--rev]} ]]
     then
-      _hg_files
+      _hg_files && ret=0
     else
       typeset -a status_files
       _hg_status mard
-      _wanted files expl 'modified, added, removed or deleted file' _multi_parts / status_files
+      _wanted files expl 'modified, added, removed or deleted file' _multi_parts / status_files && ret=0
     fi
   fi
+
+  return ret
 }
 
 _hg_cmd_rollback() {
-  _arguments -s -w : $_hg_global_opts $_hg_dryrun_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_dryrun_opts \
   '(--force -f)'{-f,--force}'[ignore safety measures]' \
 }
 
 _hg_cmd_serve() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--accesslog -A)'{-A+,--accesslog=}'[name of access log file]:log file:_files' \
-  '(--errorlog -E)'{-E+,--errorlog=}'[name of error log file]:log file:_files' \
+  _arguments -s -S : $_hg_global_opts $_hg_subrepos_opts \
+  '(--accesslog -A)'{-A+,--accesslog=}'[name of access log file to write to]:log file:_files' \
+  '(--errorlog -E)'{-E+,--errorlog=}'[name of error log file to write to]:log file:_files' \
   '(--daemon -d)'{-d,--daemon}'[run server in background]' \
-  '(--port -p)'{-p+,--port=}'[listen port]:listen port:' \
-  '(--address -a)'{-a+,--address=}'[interface address]:interface address:' \
-  '--prefix[prefix path to serve from]:directory:_files' \
-  '(--name -n)'{-n+,--name=}'[name to show in web pages]:repository name:' \
-  '--web-conf[name of the hgweb config file]:webconf_file:_files' \
-  '--pid-file[name of file to write process ID to]:pid_file:_files' \
-  '--cmdserver[cmdserver mode]:mode:' \
-  '(--templates -t)'{-t,--templates}'[web template directory]:template dir:_files -/' \
-  '--style[web template style]:style' \
+  '(--port -p)'{-p+,--port=}'[port to listen on (default: 8000)]:listen port' \
+  '(--address -a)'{-a+,--address=}'[address to listen on (default: all interfaces)]:interface address' \
+  '--prefix=[prefix path to serve from (default: server root)]:directory:_files' \
+  '(--name -n)'{-n+,--name=}'[name to show in web pages (default: working directory)]:repository name' \
+  '--web-conf=[name of the hgweb config file]:config file:_files' \
+  '--pid-file=[name of file to write process ID to]:pid file:_files' \
+  '--cmdserver[for remote clients]' \
+  '(--templates -t)'{-t+,--templates=}'[web template directory]:template dir:_files -/' \
+  '--style=[template style to use]:style' \
   '--stdio[for remote clients]' \
-  '--certificate[certificate file]:cert_file:_files' \
-  '(--ipv6 -6)'{-6,--ipv6}'[use IPv6 in addition to IPv4]'
+  '(--ipv6 -6)'{-6,--ipv6}'[use IPv6 in addition to IPv4]' \
+  '--certificate=[SSL certificate file]:certificate file:_files' \
+  '--print-url[start and print only the URL]'
 }
 
 _hg_cmd_showconfig() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--untrusted -u)'{-u,--untrusted}'[show untrusted configuration options]' \
-  ':config item:_hg_config'
+  '(--edit -e)'{-e,--edit}'[edit user config]' \
+  '(--local -l --global -g)'{-l,--local}'[edit repository config]' \
+  '(--local -l --global -g)'{-g,--global}'[edit global config]' \
+  '*:config item:_hg_config'
 }
 
 _hg_cmd_status() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
   '(--all -A)'{-A,--all}'[show status of all files]' \
   '(--modified -m)'{-m,--modified}'[show only modified files]' \
   '(--added -a)'{-a,--added}'[show only added files]' \
   '(--removed -r)'{-r,--removed}'[show only removed files]' \
   '(--deleted -d)'{-d,--deleted}'[show only deleted (but tracked) files]' \
   '(--clean -c)'{-c,--clean}'[show only files without changes]' \
-  '(--unknown -u)'{-u,--unknown}'[show only unknown files]' \
-  '(--ignored -i)'{-i,--ignored}'[show ignored files]' \
+  '(--unknown -u)'{-u,--unknown}'[show only unknown (not tracked) files]' \
+  '(--ignored -i)'{-i,--ignored}'[show only ignored files]' \
   '(--no-status -n)'{-n,--no-status}'[hide status prefix]' \
   '(--copies -C)'{-C,--copies}'[show source of copied files]' \
   '(--print0 -0)'{-0,--print0}'[end filenames with NUL, for use with xargs]' \
-  '--rev[show difference from revision]:revision:_hg_labels' \
-  '--change[list the changed files of a revision]:revision:_hg_labels' \
+  '*--rev=[show difference from revision]:revision:_hg_labels' \
+  '--change=[list the changed files of a revision]:revision:_hg_labels' \
   '*:files:_files'
 }
 
 _hg_cmd_summary() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '--remote[check for push and pull]'
 }
 
 _hg_cmd_tag() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--local -l)'{-l,--local}'[make the tag local]' \
-  '(--message -m)'{-m+,--message=}'[message for tag commit log entry]:message:' \
-  '(--date -d)'{-d+,--date=}'[record datecode as commit date]:date code:' \
-  '(--user -u)'{-u+,--user=}'[record user as commiter]:user:' \
+  '(--message -m)'{-m+,--message=}'[message for tag commit log entry]:message' \
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user' \
   '(--rev -r)'{-r+,--rev=}'[revision to tag]:revision:_hg_labels' \
   '(--force -f)'{-f,--force}'[force tag]' \
   '--remove[remove a tag]' \
@@ -905,22 +960,23 @@
 }
 
 _hg_cmd_tip() {
-  _arguments -s -w : $_hg_global_opts $_hg_gitlike_opts $_hg_style_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_gitlike_opts $_hg_template_opts \
   '(--patch -p)'{-p,--patch}'[show patch]'
 }
 
 _hg_cmd_unbundle() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--update -u)'{-u,--update}'[update to new tip if changesets were unbundled]' \
-  ':files:_files'
+  _arguments -s -S : $_hg_global_opts \
+  '(--update -u)'{-u,--update}'[update to new branch head if changesets were unbundled]' \
+  '*:files:_files'
 }
 
 _hg_cmd_update() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--clean -C)'{-C,--clean}'[overwrite locally modified files]' \
-  '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
-  '(--check -c)'{-c,--check}'[update across branches if no uncommitted changes]' \
-  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:' \
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts \
+  '(--clean -C)'{-C,--clean}'[discard uncommitted changes (no backup)]' \
+  '(--check -c)'{-c,--check}'[require clean working directory]' \
+  '(--merge -m)'{-m,--merge}'[merge uncommitted changes]' \
+  '(--date -d)'{-d+,--date=}'[tipmost revision matching date]:date' \
+  '(--rev -r 1)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
   ':revision:_hg_labels'
 }
 
@@ -928,8 +984,8 @@
 
 # HGK
 _hg_cmd_view() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:' \
+  _arguments -s -S : $_hg_global_opts \
+  '(--limit -l)'{-l+,--limit=}'[limit number of changes displayed]:limit' \
   ':revision range:_hg_labels'
 }
 
@@ -983,54 +1039,55 @@
   '(--summary -s)'{-s,--summary}'[print first line of patch header]')
 
 _hg_cmd_qapplied() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts \
   '(--last -1)'{-1,--last}'[show only the preceding applied patch]' \
   '*:patch:_hg_qapplied'
 }
 
 _hg_cmd_qclone() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts $_hg_clone_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts $_hg_clone_opts \
   '(--patches -p)'{-p+,--patches=}'[location of source patch repository]:' \
   ':source repository:_hg_remote' \
   ':destination:_hg_clone_dest'
 }
 
 _hg_cmd_qdelete() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--keep -k)'{-k,--keep}'[keep patch file]' \
   '*'{-r+,--rev=}'[stop managing a revision]:applied patch:_hg_revrange' \
   '*:unapplied patch:_hg_qdeletable'
 }
 
 _hg_cmd_qdiff() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_diff_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_diff_opts \
                      $_hg_ignore_space_opts \
   '*:pattern:_hg_files'
 }
 
 _hg_cmd_qfinish() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--applied -a)'{-a,--applied}'[finish all applied patches]' \
   '*:patch:_hg_qapplied'
 }
 
 _hg_cmd_qfold() {
-  _arguments -s -w : $_hg_global_opts $_h_commit_opts \
-  '(--keep,-k)'{-k,--keep}'[keep folded patch files]' \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts \
+  '(--keep -k)'{-k,--keep}'[keep folded patch files]' \
   '(--force -f)'{-f,--force}'[overwrite any local changes]' \
   '--no-backup[do not save backup copies of files]' \
   '*:unapplied patch:_hg_qunapplied'
 }
 
 _hg_cmd_qgoto() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--force -f)'{-f,--force}'[overwrite any local changes]' \
   '--keep-changes[tolerate non-conflicting local changes]' \
+  '--no-backup[do not save backup copies of files]' \
   ':patch:_hg_qseries'
 }
 
 _hg_cmd_qguard() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--list -l)'{-l,--list}'[list all patches and guards]' \
   '(--none -n)'{-n,--none}'[drop all guards]' \
   ':patch:_hg_qseries' \
@@ -1038,14 +1095,14 @@
 }
 
 _hg_cmd_qheader() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   ':patch:_hg_qseries'
 }
 
 _hg_cmd_qimport() {
-  _arguments -s -w : $_hg_global_opts $_hg_gitlike_opts \
-  '(--existing -e)'{-e,--existing}'[import file in patch dir]' \
-  '(--name -n 2)'{-n+,--name}'[patch file name]:name:' \
+  _arguments -s -S : $_hg_global_opts $_hg_gitlike_opts \
+  '(--existing -e)'{-e,--existing}'[import file in patch directory]' \
+  '(--name -n 2)'{-n+,--name=}'[name of patch file]:name' \
   '(--force -f)'{-f,--force}'[overwrite existing files]' \
   '*'{-r+,--rev=}'[place existing revisions under mq control]:revision:_hg_revrange' \
   '(--push -P)'{-P,--push}'[qpush after importing]' \
@@ -1053,32 +1110,32 @@
 }
 
 _hg_cmd_qnew() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
   ':patch:'
 }
 
 _hg_cmd_qnext() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts
 }
 
 _hg_cmd_qpop() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--all -a :)'{-a,--all}'[pop all patches]' \
-  '(--force -f)'{-f,--force}'[forget any local changes]' \
+  '(--force -f)'{-f,--force}'[forget any local changes to patched files]' \
   '--keep-changes[tolerate non-conflicting local changes]' \
   '--no-backup[do not save backup copies of files]' \
   ':patch:_hg_qapplied'
 }
 
 _hg_cmd_qprev() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts
 }
 
 _hg_cmd_qpush() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--all -a :)'{-a,--all}'[apply all patches]' \
   '(--list -l)'{-l,--list}'[list patch name in commit text]' \
-  '(--force -f)'{-f,--force}'[apply if the patch has rejects]' \
+  '(--force -f)'{-f,--force}'[apply on top of local changes]' \
   '(--exact -e)'{-e,--exact}'[apply the target patch to its recorded parent]' \
   '--move[reorder patch series and apply only the patch]' \
   '--keep-changes[tolerate non-conflicting local changes]' \
@@ -1087,137 +1144,182 @@
 }
 
 _hg_cmd_qrefresh() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_commit_opts $_hg_gitlike_opts \
-  '(--short -s)'{-s,--short}'[short refresh]' \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
+  '(--short -s)'{-s,--short}'[refresh only files already in the patch and specified files]' \
   '*:files:_hg_files'
 }
 
 _hg_cmd_qrename() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   ':patch:_hg_qunapplied' \
   ':destination:'
 }
 
 _hg_cmd_qselect() {
-  _arguments -s -w : $_hg_global_opts \
+  _arguments -s -S : $_hg_global_opts \
   '(--none -n :)'{-n,--none}'[disable all guards]' \
   '(--series -s :)'{-s,--series}'[list all guards in series file]' \
   '--pop[pop to before first guarded applied patch]' \
-  '--reapply[pop and reapply patches]' \
+  '--reapply[pop, then reapply patches]' \
   '*:guards:_hg_qguards'
 }
 
 _hg_cmd_qseries() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts \
   '(--missing -m)'{-m,--missing}'[print patches not in series]'
 }
 
 _hg_cmd_qunapplied() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts \
   '(--first -1)'{-1,--first}'[show only the first patch]'
 }
 
 _hg_cmd_qtop() {
-  _arguments -s -w : $_hg_global_opts $_hg_qseries_opts
+  _arguments -s -S : $_hg_global_opts $_hg_qseries_opts
 }
 
 _hg_cmd_strip() {
-  _arguments -s -w : $_hg_global_opts \
-  '(--force -f)'{-f,--force}'[force removal, discard uncommitted changes, no backup]' \
-  '(--no-backup -n)'{-n,--no-backup}'[no backups]' \
-  '(--keep -k)'{-k,--keep}'[do not modify working copy during strip]' \
-  '(--bookmark -B)'{-B+,--bookmark=}'[remove revs only reachable from given bookmark]:bookmark:_hg_bookmarks' \
-  '(--rev -r)'{-r+,--rev=}'[revision]:revision:_hg_labels' \
-  ':revision:_hg_labels'
+  _arguments -s -S : $_hg_global_opts \
+  '(--force -f)'{-f,--force}'[force removal of changesets, discard uncommitted changes (no backup)]' \
+  '--no-backup[do not save backup bundle]' \
+  '(--keep -k)'{-k,--keep}'[do not modify working directory during strip]' \
+  '*'{-B+,--bookmark=}'[remove revisions only reachable from given bookmark]:bookmark:_hg_bookmarks' \
+  '*'{-r+,--rev=}'[strip specified revision]:revision:_hg_labels' \
+  '*:revision:_hg_labels'
 }
 
 # Patchbomb
 _hg_cmd_email() {
-  _arguments -s -w : $_hg_global_opts $_hg_remote_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_remote_opts $_hg_gitlike_opts \
   '--plain[omit hg patch header]' \
   '--body[send patches as inline message text (default)]' \
   '(--outgoing -o)'{-o,--outgoing}'[send changes not found in the target repository]' \
   '(--bundle -b)'{-b,--bundle}'[send changes not in target as a binary bundle]' \
-  '--bundlename[name of the bundle attachment file (default: bundle)]:' \
-  '*'{-r+,--rev=}'[search in given revision range]:revision:_hg_revrange' \
+  '(--bookmark -B)'{-B+,--bookmark=}'[send changes only reachable by given bookmark]:bookmark:_hg_bookmarks' \
+  '--bundlename=[name of the bundle attachment file (default: bundle)]:name' \
+  '*'{-r+,--rev=}'[a revision to send]:revision:_hg_revrange' \
   '--force[run even when remote repository is unrelated (with -b/--bundle)]' \
-  '*--base[a base changeset to specify instead of a destination (with -b/--bundle)]:revision:_hg_labels' \
+  '*--base=[a base changeset to specify instead of a destination (with -b/--bundle)]:revision:_hg_labels' \
   '--intro[send an introduction email for a single patch]' \
   '(--inline -i --attach -a)'{-a,--attach}'[send patches as attachments]' \
   '(--attach -a --inline -i)'{-i,--inline}'[send patches as inline attachments]' \
-  '*--bcc[email addresses of blind carbon copy recipients]:email:' \
-  '*'{-c+,--cc}'[email addresses of copy recipients]:email:' \
+  '*--bcc=[email addresses of blind carbon copy recipients]:email' \
+  '*'{-c+,--cc=}'[email addresses of copy recipients]:email' \
+  '--confirm[ask for confirmation before sending]' \
   '(--diffstat -d)'{-d,--diffstat}'[add diffstat output to messages]' \
-  '--date[use the given date as the sending date]:date:' \
-  '--desc[use the given file as the series description]:files:_files' \
-  '(--from -f)'{-f,--from}'[email address of sender]:email:' \
+  '--date=[use the given date as the sending date]:date' \
+  '--desc=[use the given file as the series description]:files:_files' \
+  '(--from -f)'{-f+,--from=}'[email address of sender]:email' \
   '(--test -n)'{-n,--test}'[print messages that would be sent]' \
-  '(--mbox -m)'{-m,--mbox}'[write messages to mbox file instead of sending them]:file:' \
-  '*--reply-to[email addresses replies should be sent to]:email:' \
-  '(--subject -s)'{-s,--subject}'[subject of first message (intro or single patch)]:subject:' \
-  '--in-reply-to[message identifier to reply to]:msgid:' \
-  '*--flag[flags to add in subject prefixes]:flag:' \
-  '*'{-t,--to}'[email addresses of recipients]:email:' \
+  '(--mbox -m)'{-m+,--mbox=}'[write messages to mbox file instead of sending them]:file:_files' \
+  '*--reply-to=[email addresses replies should be sent to]:email' \
+  '(--subject -s)'{-s+,--subject=}'[subject of first message (intro or single patch)]:subject' \
+  '--in-reply-to=[message identifier to reply to]:msgid' \
+  '*--flag=[flags to add in subject prefixes]:flag' \
+  '*'{-t+,--to=}'[email addresses of recipients]:email' \
   ':revision:_hg_revrange'
 }
 
 # Rebase
 _hg_cmd_rebase() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_mergetool_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts $_hg_mergetool_opts $_hg_dryrun_opts  \
   '*'{-r+,--rev=}'[rebase these revisions]:revision:_hg_revrange' \
-  '(--source -s)'{-s+,--source=}'[rebase from the specified changeset]:revision:_hg_labels' \
-  '(--base -b)'{-b+,--base=}'[rebase from the base of the specified changeset]:revision:_hg_labels' \
+  '(--source -s --base -b)'{-s+,--source=}'[rebase the specified changeset and descendants]:revision:_hg_labels' \
+  '(--source -s --base -b)'{-b+,--base=}'[rebase everything from branching point of specified changeset]:revision:_hg_labels' \
   '(--dest -d)'{-d+,--dest=}'[rebase onto the specified changeset]:revision:_hg_labels' \
-  '--collapse[collapse the rebased changeset]' \
-  '--keep[keep original changeset]' \
-  '--keepbranches[keep original branch name]' \
-  '(--continue -c)'{-c,--continue}'[continue an interrupted rebase]' \
-  '(--abort -a)'{-a,--abort}'[abort an interrupted rebase]' \
+  '--collapse[collapse the rebased changesets]' \
+  '(--keep -k)'{-k,--keep}'[keep original changesets]' \
+  '--keepbranches[keep original branch names]' \
+  '(--continue -c --abort -a)'{-c,--continue}'[continue an interrupted rebase]' \
+  '(--continue -c --abort -a)'{-a,--abort}'[abort an interrupted rebase]' \
 }
 
 # Record
 _hg_cmd_record() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_pat_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts $_hg_pat_opts \
                      $_hg_ignore_space_opts $_hg_subrepos_opts \
   '(--addremove -A)'{-A,--addremove}'[mark new/missing files as added/removed before committing]' \
   '--close-branch[mark a branch as closed, hiding it from the branch list]' \
   '--amend[amend the parent of the working dir]' \
-  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date:' \
-  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user:'
+  '(--date -d)'{-d+,--date=}'[record the specified date as commit date]:date' \
+  '(--user -u)'{-u+,--user=}'[record the specified user as committer]:user'
 }
 
 _hg_cmd_qrecord() {
-  _arguments -s -w : $_hg_global_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_commit_opts $_hg_date_user_opts $_hg_gitlike_opts \
                      $_hg_pat_opts $_hg_ignore_space_opts $_hg_subrepos_opts
 }
 
 # Convert
 _hg_cmd_convert() {
-_arguments -s -w : $_hg_global_opts \
-  '(--source-type -s)'{-s,--source-type}'[source repository type]' \
-  '(--dest-type -d)'{-d,--dest-type}'[destination repository type]' \
-  '(--rev -r)'{-r+,--rev=}'[import up to target revision]:revision:' \
+_arguments -s -S : $_hg_global_opts \
+  '(--source-type -s)'{-s+,--source-type=}'[source repository type]:type:(hg cvs darcs git svn mtn gnuarch bzr p4)' \
+  '(--dest-type -d)'{-d+,--dest-type=}'[destination repository type]:type:(hg svn)' \
+  '*'{-r+,--rev=}'[import up to source revision]:revision' \
   '(--authormap -A)'{-A+,--authormap=}'[remap usernames using this file]:file:_files' \
-  '--filemap[remap file names using contents of file]:file:_files' \
-  '--splicemap[splice synthesized history into place]:file:_files' \
-  '--branchmap[change branch names while converting]:file:_files' \
+  '--filemap=[remap file names using contents of file]:file:_files' \
+  '--full[apply filemap changes by converting all files again]' \
+  '--splicemap=[splice synthesized history into place]:file:_files' \
+  '--branchmap=[change branch names while converting]:file:_files' \
   '--branchsort[try to sort changesets by branches]' \
   '--datesort[try to sort changesets by date]' \
-  '--sourcesort[preserve source changesets order]'
-}
-
-# Graphlog
-_hg_cmd_glog() {
-  _hg_cmd_log $@
+  '--sourcesort[preserve source changesets order]' \
+  '--closesort[try to reorder closed revisions]'
 }
 
 # Purge
 _hg_cmd_purge() {
-  _arguments -s -w : $_hg_global_opts $_hg_pat_opts $_hg_subrepos_opts \
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
   '(--abort-on-err -a)'{-a,--abort-on-err}'[abort if an error occurs]' \
   '--all[purge ignored files too]' \
+  '--dirs[purge empty directories]' \
+  '--files[purge files]' \
   '(--print -p)'{-p,--print}'[print filenames instead of deleting them]' \
   '(--print0 -0)'{-0,--print0}'[end filenames with NUL, for use with xargs (implies -p/--print)]'
 }
 
+# Shelve
+_hg_cmd_shelve() {
+  local context state state_descr line ret=1
+  typeset -A opt_args
+
+  _arguments -s -S : $_hg_global_opts $_hg_pat_opts \
+  '(--addremove -A)'{-A,--addremove}'[mark new/missing files as added/removed before shelving]' \
+  '(--unknown -u)'{-u,--unknown}'[store unknown files in the shelve]' \
+  '(--name -n :)--cleanup[delete all shelved changes]' \
+  '--date=[shelve with the specified commit date]:date' \
+  '(--delete -d)'{-d,--delete}'[delete the named shelved change(s)]' \
+  '(--edit -e)'{-e,--edit}'[invoke editor on commit messages]' \
+  '(--list -l)'{-l,--list}'[list current shelves]' \
+  '(--message -m)'{-m+,--message=}'[use text as shelve message]:text' \
+  '(--name -n)'{-n+,--name=}'[use the given name for the shelved commit]:name' \
+  '(--patch -p)'{-p,--patch}'[output patches for changes]' \
+  '(--interactive -i)'{-i,--interactive}'[interactive mode, only works while creating a shelve]' \
+  '--stat[output diffstat-style summary of changes]' \
+  '*:file:->shelve_files' && ret=0
+
+  if [[ $state == 'shelve_files' ]]
+  then
+    if [[ -n ${opt_args[(I)-d|--delete|-l|--list|-p|--patch|--stat]} ]]
+    then
+      _hg_shelves && ret=0
+    else
+      typeset -a status_files
+      _hg_status mard
+      _wanted files expl 'modified, added, removed or deleted file' _multi_parts / status_files && ret=0
+    fi
+  fi
+
+  return ret
+}
+
+_hg_cmd_unshelve() {
+  _arguments -s -S : $_hg_global_opts $_hg_mergetool_opts \
+  '(--abort -a --continue -c --name -n :)'{-a,--abort}'[abort an incomplete unshelve operation]' \
+  '(--abort -a --continue -c --name -n :)'{-c,--continue}'[continue an incomplete unshelve operation]' \
+  '(--keep -k)'{-k,--keep}'[keep shelve after unshelving]' \
+  '(--name -n :)'{-n+,--name=}'[restore shelved change with given name]:shelve:_hg_shelves' \
+  ':shelve:_hg_shelves'
+}
+
 _hg "$@"
--- a/doc/check-seclevel.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/doc/check-seclevel.py	Mon Oct 22 14:46:06 2018 -0400
@@ -87,7 +87,8 @@
 
 def checkhghelps(ui):
     errorcnt = 0
-    for names, sec, doc in helptable:
+    for h in helptable:
+        names, sec, doc = h[0:3]
         if callable(doc):
             doc = doc(ui)
         errorcnt += checkseclevel(ui, doc,
--- a/doc/gendoc.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/doc/gendoc.py	Mon Oct 22 14:46:06 2018 -0400
@@ -138,18 +138,20 @@
 
 def showtopic(ui, topic):
     extrahelptable = [
-        (["common"], '', loaddoc('common')),
-        (["hg.1"], '', loaddoc('hg.1')),
-        (["hg-ssh.8"], '', loaddoc('hg-ssh.8')),
-        (["hgignore.5"], '', loaddoc('hgignore.5')),
-        (["hgrc.5"], '', loaddoc('hgrc.5')),
-        (["hgignore.5.gendoc"], '', loaddoc('hgignore')),
-        (["hgrc.5.gendoc"], '', loaddoc('config')),
+        (["common"], '', loaddoc('common'), help.TOPIC_CATEGORY_MISC),
+        (["hg.1"], '', loaddoc('hg.1'), help.TOPIC_CATEGORY_CONFIG),
+        (["hg-ssh.8"], '', loaddoc('hg-ssh.8'), help.TOPIC_CATEGORY_CONFIG),
+        (["hgignore.5"], '', loaddoc('hgignore.5'), help.TOPIC_CATEGORY_CONFIG),
+        (["hgrc.5"], '', loaddoc('hgrc.5'), help.TOPIC_CATEGORY_CONFIG),
+        (["hgignore.5.gendoc"], '', loaddoc('hgignore'),
+         help.TOPIC_CATEGORY_CONFIG),
+        (["hgrc.5.gendoc"], '', loaddoc('config'), help.TOPIC_CATEGORY_CONFIG),
     ]
     helpprinter(ui, helptable + extrahelptable, None, include=[topic])
 
 def helpprinter(ui, helptable, sectionfunc, include=[], exclude=[]):
-    for names, sec, doc in helptable:
+    for h in helptable:
+        names, sec, doc = h[0:3]
         if exclude and names[0] in exclude:
             continue
         if include and names[0] not in include:
--- a/doc/runrst	Wed Oct 10 12:25:28 2018 -0400
+++ b/doc/runrst	Mon Oct 22 14:46:06 2018 -0400
@@ -12,11 +12,14 @@
 where WRITER is the name of a Docutils writer such as 'html' or 'manpage'
 """
 
+from __future__ import absolute_import
+
 import sys
 try:
-    from docutils.parsers.rst import roles
-    from docutils.core import publish_cmdline
-    from docutils import nodes, utils
+    import docutils.core as core
+    import docutils.nodes as nodes
+    import docutils.utils as utils
+    import docutils.parsers.rst.roles as roles
 except ImportError:
     sys.stderr.write("abort: couldn't generate documentation: docutils "
                      "module is missing\n")
@@ -58,4 +61,4 @@
     writer = sys.argv[1]
     del sys.argv[1]
 
-    publish_cmdline(writer_name=writer)
+    core.publish_cmdline(writer_name=writer)
--- a/hg	Wed Oct 10 12:25:28 2018 -0400
+++ b/hg	Mon Oct 22 14:46:06 2018 -0400
@@ -27,15 +27,17 @@
         libdir = os.path.abspath(libdir)
     sys.path.insert(0, libdir)
 
-# enable importing on demand to reduce startup time
-try:
-    if sys.version_info[0] < 3 or sys.version_info >= (3, 6):
-        import hgdemandimport; hgdemandimport.enable()
-except ImportError:
-    sys.stderr.write("abort: couldn't find mercurial libraries in [%s]\n" %
-                     ' '.join(sys.path))
-    sys.stderr.write("(check your install and PYTHONPATH)\n")
-    sys.exit(-1)
+from hgdemandimport import tracing
+with tracing.log('hg script'):
+    # enable importing on demand to reduce startup time
+    try:
+        if sys.version_info[0] < 3 or sys.version_info >= (3, 6):
+            import hgdemandimport; hgdemandimport.enable()
+    except ImportError:
+        sys.stderr.write("abort: couldn't find mercurial libraries in [%s]\n" %
+                         ' '.join(sys.path))
+        sys.stderr.write("(check your install and PYTHONPATH)\n")
+        sys.exit(-1)
 
-from mercurial import dispatch
-dispatch.run()
+    from mercurial import dispatch
+    dispatch.run()
--- a/hgdemandimport/demandimportpy2.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgdemandimport/demandimportpy2.py	Mon Oct 22 14:46:06 2018 -0400
@@ -30,6 +30,8 @@
 import contextlib
 import sys
 
+from . import tracing
+
 contextmanager = contextlib.contextmanager
 
 _origimport = __import__
@@ -86,52 +88,55 @@
 
     def _load(self):
         if not self._module:
-            head, globals, locals, after, level, modrefs = self._data
-            mod = _hgextimport(_origimport, head, globals, locals, None, level)
-            if mod is self:
-                # In this case, _hgextimport() above should imply
-                # _demandimport(). Otherwise, _hgextimport() never
-                # returns _demandmod. This isn't intentional behavior,
-                # in fact. (see also issue5304 for detail)
-                #
-                # If self._module is already bound at this point, self
-                # should be already _load()-ed while _hgextimport().
-                # Otherwise, there is no way to import actual module
-                # as expected, because (re-)invoking _hgextimport()
-                # should cause same result.
-                # This is reason why _load() returns without any more
-                # setup but assumes self to be already bound.
-                mod = self._module
-                assert mod and mod is not self, "%s, %s" % (self, mod)
-                return
+            with tracing.log('demandimport %s', self._data[0]):
+                head, globals, locals, after, level, modrefs = self._data
+                mod = _hgextimport(
+                    _origimport, head, globals, locals, None, level)
+                if mod is self:
+                    # In this case, _hgextimport() above should imply
+                    # _demandimport(). Otherwise, _hgextimport() never
+                    # returns _demandmod. This isn't intentional behavior,
+                    # in fact. (see also issue5304 for detail)
+                    #
+                    # If self._module is already bound at this point, self
+                    # should be already _load()-ed while _hgextimport().
+                    # Otherwise, there is no way to import actual module
+                    # as expected, because (re-)invoking _hgextimport()
+                    # should cause same result.
+                    # This is reason why _load() returns without any more
+                    # setup but assumes self to be already bound.
+                    mod = self._module
+                    assert mod and mod is not self, "%s, %s" % (self, mod)
+                    return
 
-            # load submodules
-            def subload(mod, p):
-                h, t = p, None
-                if '.' in p:
-                    h, t = p.split('.', 1)
-                if getattr(mod, h, nothing) is nothing:
-                    setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__,
-                                               level=1))
-                elif t:
-                    subload(getattr(mod, h), t)
+                # load submodules
+                def subload(mod, p):
+                    h, t = p, None
+                    if '.' in p:
+                        h, t = p.split('.', 1)
+                    if getattr(mod, h, nothing) is nothing:
+                        setattr(mod, h, _demandmod(
+                            p, mod.__dict__, mod.__dict__, level=1))
+                    elif t:
+                        subload(getattr(mod, h), t)
 
-            for x in after:
-                subload(mod, x)
+                for x in after:
+                    subload(mod, x)
 
-            # Replace references to this proxy instance with the actual module.
-            if locals:
-                if locals.get(head) is self:
-                    locals[head] = mod
-                elif locals.get(head + r'mod') is self:
-                    locals[head + r'mod'] = mod
+                # Replace references to this proxy instance with the
+                # actual module.
+                if locals:
+                    if locals.get(head) is self:
+                        locals[head] = mod
+                    elif locals.get(head + r'mod') is self:
+                        locals[head + r'mod'] = mod
 
-            for modname in modrefs:
-                modref = sys.modules.get(modname, None)
-                if modref and getattr(modref, head, None) is self:
-                    setattr(modref, head, mod)
+                for modname in modrefs:
+                    modref = sys.modules.get(modname, None)
+                    if modref and getattr(modref, head, None) is self:
+                        setattr(modref, head, mod)
 
-            object.__setattr__(self, r"_module", mod)
+                object.__setattr__(self, r"_module", mod)
 
     def __repr__(self):
         if self._module:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgdemandimport/tracing.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,44 @@
+# Support code for event tracing in Mercurial. Lives in demandimport
+# so it can also be used in demandimport.
+#
+# Copyright 2018 Google LLC.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import contextlib
+import os
+
+_pipe = None
+_checked = False
+
+@contextlib.contextmanager
+def log(whencefmt, *whenceargs):
+    global _pipe, _session, _checked
+    if _pipe is None:
+        if _checked:
+            yield
+            return
+        _checked = True
+        if 'HGCATAPULTSERVERPIPE' not in os.environ:
+            yield
+            return
+        _pipe = open(os.environ['HGCATAPULTSERVERPIPE'], 'w', 1)
+        _session = os.environ.get('HGCATAPULTSESSION', 'none')
+    whence = whencefmt % whenceargs
+    try:
+        # Both writes to the pipe are wrapped in try/except to ignore
+        # errors, as we can see mysterious errors in here if the pager
+        # is active. Presumably other conditions could trigger
+        # problems too.
+        try:
+            _pipe.write('START %s %s\n' % (_session, whence))
+        except IOError:
+            pass
+        yield
+    finally:
+        try:
+            _pipe.write('END %s %s\n' % (_session, whence))
+        except IOError:
+            pass
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/absorb.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,1026 @@
+# absorb.py
+#
+# Copyright 2016 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""apply working directory changes to changesets (EXPERIMENTAL)
+
+The absorb extension provides a command to use annotate information to
+amend modified chunks into the corresponding non-public changesets.
+
+::
+
+    [absorb]
+    # only check 50 recent non-public changesets at most
+    max-stack-size = 50
+    # whether to add noise to new commits to avoid obsolescence cycle
+    add-noise = 1
+    # make `amend --correlated` a shortcut to the main command
+    amend-flag = correlated
+
+    [color]
+    absorb.description = yellow
+    absorb.node = blue bold
+    absorb.path = bold
+"""
+
+# TODO:
+#  * Rename config items to [commands] namespace
+#  * Converge getdraftstack() with other code in core
+#  * move many attributes on fixupstate to be private
+
+from __future__ import absolute_import
+
+import collections
+
+from mercurial.i18n import _
+from mercurial import (
+    cmdutil,
+    commands,
+    context,
+    crecord,
+    error,
+    linelog,
+    mdiff,
+    node,
+    obsolete,
+    patch,
+    phases,
+    pycompat,
+    registrar,
+    repair,
+    scmutil,
+    util,
+)
+from mercurial.utils import (
+    stringutil,
+)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem('absorb', 'add-noise', default=True)
+configitem('absorb', 'amend-flag', default=None)
+configitem('absorb', 'max-stack-size', default=50)
+
+colortable = {
+    'absorb.description': 'yellow',
+    'absorb.node': 'blue bold',
+    'absorb.path': 'bold',
+}
+
+defaultdict = collections.defaultdict
+
+class nullui(object):
+    """blank ui object doing nothing"""
+    debugflag = False
+    verbose = False
+    quiet = True
+
+    def __getitem__(name):
+        def nullfunc(*args, **kwds):
+            return
+        return nullfunc
+
+class emptyfilecontext(object):
+    """minimal filecontext representing an empty file"""
+    def data(self):
+        return ''
+
+    def node(self):
+        return node.nullid
+
+def uniq(lst):
+    """list -> list. remove duplicated items without changing the order"""
+    seen = set()
+    result = []
+    for x in lst:
+        if x not in seen:
+            seen.add(x)
+            result.append(x)
+    return result
+
+def getdraftstack(headctx, limit=None):
+    """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets.
+
+    changesets are sorted in topo order, oldest first.
+    return at most limit items, if limit is a positive number.
+
+    merges are considered as non-draft as well. i.e. every commit
+    returned has and only has 1 parent.
+    """
+    ctx = headctx
+    result = []
+    while ctx.phase() != phases.public:
+        if limit and len(result) >= limit:
+            break
+        parents = ctx.parents()
+        if len(parents) != 1:
+            break
+        result.append(ctx)
+        ctx = parents[0]
+    result.reverse()
+    return result
+
+def getfilestack(stack, path, seenfctxs=None):
+    """([ctx], str, set) -> [fctx], {ctx: fctx}
+
+    stack is a list of contexts, from old to new. usually they are what
+    "getdraftstack" returns.
+
+    follows renames, but not copies.
+
+    seenfctxs is a set of filecontexts that will be considered "immutable".
+    they are usually what this function returned in earlier calls, useful
+    to avoid issues that a file was "moved" to multiple places and was then
+    modified differently, like: "a" was copied to "b", "a" was also copied to
+    "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a"
+    and we enforce only one of them to be able to affect "a"'s content.
+
+    return an empty list and an empty dict, if the specified path does not
+    exist in stack[-1] (the top of the stack).
+
+    otherwise, return a list of de-duplicated filecontexts, and the map to
+    convert ctx in the stack to fctx, for possible mutable fctxs. the first item
+    of the list would be outside the stack and should be considered immutable.
+    the remaining items are within the stack.
+
+    for example, given the following changelog and corresponding filelog
+    revisions:
+
+      changelog: 3----4----5----6----7
+      filelog:   x    0----1----1----2 (x: no such file yet)
+
+    - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2})
+    - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a
+      dummy empty filecontext.
+    - if stack = [2], returns ([], {})
+    - if stack = [7], returns ([1, 2], {7: 2})
+    - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be
+      removed, since 1 is immutable.
+    """
+    if seenfctxs is None:
+        seenfctxs = set()
+    assert stack
+
+    if path not in stack[-1]:
+        return [], {}
+
+    fctxs = []
+    fctxmap = {}
+
+    pctx = stack[0].p1() # the public (immutable) ctx we stop at
+    for ctx in reversed(stack):
+        if path not in ctx: # the file is added in the next commit
+            pctx = ctx
+            break
+        fctx = ctx[path]
+        fctxs.append(fctx)
+        if fctx in seenfctxs: # treat fctx as the immutable one
+            pctx = None # do not add another immutable fctx
+            break
+        fctxmap[ctx] = fctx # only for mutable fctxs
+        renamed = fctx.renamed()
+        if renamed:
+            path = renamed[0] # follow rename
+            if path in ctx: # but do not follow copy
+                pctx = ctx.p1()
+                break
+
+    if pctx is not None: # need an extra immutable fctx
+        if path in pctx:
+            fctxs.append(pctx[path])
+        else:
+            fctxs.append(emptyfilecontext())
+
+    fctxs.reverse()
+    # note: we rely on a property of hg: filerev is not reused for linear
+    # history. i.e. it's impossible to have:
+    #   changelog:  4----5----6 (linear, no merges)
+    #   filelog:    1----2----1
+    #                         ^ reuse filerev (impossible)
+    # because parents are part of the hash. if that's not true, we need to
+    # remove uniq and find a different way to identify fctxs.
+    return uniq(fctxs), fctxmap
+
+class overlaystore(patch.filestore):
+    """read-only, hybrid store based on a dict and ctx.
+    memworkingcopy: {path: content}, overrides file contents.
+    """
+    def __init__(self, basectx, memworkingcopy):
+        self.basectx = basectx
+        self.memworkingcopy = memworkingcopy
+
+    def getfile(self, path):
+        """comply with mercurial.patch.filestore.getfile"""
+        if path not in self.basectx:
+            return None, None, None
+        fctx = self.basectx[path]
+        if path in self.memworkingcopy:
+            content = self.memworkingcopy[path]
+        else:
+            content = fctx.data()
+        mode = (fctx.islink(), fctx.isexec())
+        renamed = fctx.renamed() # False or (path, node)
+        return content, mode, (renamed and renamed[0])
+
+def overlaycontext(memworkingcopy, ctx, parents=None, extra=None):
+    """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx
+    memworkingcopy overrides file contents.
+    """
+    # parents must contain 2 items: (node1, node2)
+    if parents is None:
+        parents = ctx.repo().changelog.parents(ctx.node())
+    if extra is None:
+        extra = ctx.extra()
+    date = ctx.date()
+    desc = ctx.description()
+    user = ctx.user()
+    files = set(ctx.files()).union(memworkingcopy)
+    store = overlaystore(ctx, memworkingcopy)
+    return context.memctx(
+        repo=ctx.repo(), parents=parents, text=desc,
+        files=files, filectxfn=store, user=user, date=date,
+        branch=None, extra=extra)
+
+class filefixupstate(object):
+    """state needed to apply fixups to a single file
+
+    internally, it keeps file contents of several revisions and a linelog.
+
+    the linelog uses odd revision numbers for original contents (fctxs passed
+    to __init__), and even revision numbers for fixups, like:
+
+        linelog rev 1: self.fctxs[0] (from an immutable "public" changeset)
+        linelog rev 2: fixups made to self.fctxs[0]
+        linelog rev 3: self.fctxs[1] (a child of fctxs[0])
+        linelog rev 4: fixups made to self.fctxs[1]
+        ...
+
+    a typical use is like:
+
+        1. call diffwith, to calculate self.fixups
+        2. (optionally), present self.fixups to the user, or change it
+        3. call apply, to apply changes
+        4. read results from "finalcontents", or call getfinalcontent
+    """
+
+    def __init__(self, fctxs, path, ui=None, opts=None):
+        """([fctx], ui or None) -> None
+
+        fctxs should be linear, and sorted by topo order - oldest first.
+        fctxs[0] will be considered as "immutable" and will not be changed.
+        """
+        self.fctxs = fctxs
+        self.path = path
+        self.ui = ui or nullui()
+        self.opts = opts or {}
+
+        # following fields are built from fctxs. they exist for perf reason
+        self.contents = [f.data() for f in fctxs]
+        self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents)
+        self.linelog = self._buildlinelog()
+        if self.ui.debugflag:
+            assert self._checkoutlinelog() == self.contents
+
+        # following fields will be filled later
+        self.chunkstats = [0, 0] # [adopted, total : int]
+        self.targetlines = [] # [str]
+        self.fixups = [] # [(linelog rev, a1, a2, b1, b2)]
+        self.finalcontents = [] # [str]
+        self.ctxaffected = set()
+
+    def diffwith(self, targetfctx, fm=None):
+        """calculate fixups needed by examining the differences between
+        self.fctxs[-1] and targetfctx, chunk by chunk.
+
+        targetfctx is the target state we move towards. we may or may not be
+        able to get there because not all modified chunks can be amended into
+        a non-public fctx unambiguously.
+
+        call this only once, before apply().
+
+        update self.fixups, self.chunkstats, and self.targetlines.
+        """
+        a = self.contents[-1]
+        alines = self.contentlines[-1]
+        b = targetfctx.data()
+        blines = mdiff.splitnewlines(b)
+        self.targetlines = blines
+
+        self.linelog.annotate(self.linelog.maxrev)
+        annotated = self.linelog.annotateresult # [(linelog rev, linenum)]
+        assert len(annotated) == len(alines)
+        # add a dummy end line to make insertion at the end easier
+        if annotated:
+            dummyendline = (annotated[-1][0], annotated[-1][1] + 1)
+            annotated.append(dummyendline)
+
+        # analyse diff blocks
+        for chunk in self._alldiffchunks(a, b, alines, blines):
+            newfixups = self._analysediffchunk(chunk, annotated)
+            self.chunkstats[0] += bool(newfixups) # 1 or 0
+            self.chunkstats[1] += 1
+            self.fixups += newfixups
+            if fm is not None:
+                self._showchanges(fm, alines, blines, chunk, newfixups)
+
+    def apply(self):
+        """apply self.fixups. update self.linelog, self.finalcontents.
+
+        call this only once, before getfinalcontent(), after diffwith().
+        """
+        # the following is unnecessary, as it's done by "diffwith":
+        #   self.linelog.annotate(self.linelog.maxrev)
+        for rev, a1, a2, b1, b2 in reversed(self.fixups):
+            blines = self.targetlines[b1:b2]
+            if self.ui.debugflag:
+                idx = (max(rev - 1, 0)) // 2
+                self.ui.write(_('%s: chunk %d:%d -> %d lines\n')
+                              % (node.short(self.fctxs[idx].node()),
+                                 a1, a2, len(blines)))
+            self.linelog.replacelines(rev, a1, a2, b1, b2)
+        if self.opts.get('edit_lines', False):
+            self.finalcontents = self._checkoutlinelogwithedits()
+        else:
+            self.finalcontents = self._checkoutlinelog()
+
+    def getfinalcontent(self, fctx):
+        """(fctx) -> str. get modified file content for a given filecontext"""
+        idx = self.fctxs.index(fctx)
+        return self.finalcontents[idx]
+
+    def _analysediffchunk(self, chunk, annotated):
+        """analyse a different chunk and return new fixups found
+
+        return [] if no lines from the chunk can be safely applied.
+
+        the chunk (or lines) cannot be safely applied, if, for example:
+          - the modified (deleted) lines belong to a public changeset
+            (self.fctxs[0])
+          - the chunk is a pure insertion and the adjacent lines (at most 2
+            lines) belong to different non-public changesets, or do not belong
+            to any non-public changesets.
+          - the chunk is modifying lines from different changesets.
+            in this case, if the number of lines deleted equals to the number
+            of lines added, assume it's a simple 1:1 map (could be wrong).
+            otherwise, give up.
+          - the chunk is modifying lines from a single non-public changeset,
+            but other revisions touch the area as well. i.e. the lines are
+            not continuous as seen from the linelog.
+        """
+        a1, a2, b1, b2 = chunk
+        # find involved indexes from annotate result
+        involved = annotated[a1:a2]
+        if not involved and annotated: # a1 == a2 and a is not empty
+            # pure insertion, check nearby lines. ignore lines belong
+            # to the public (first) changeset (i.e. annotated[i][0] == 1)
+            nearbylinenums = {a2, max(0, a1 - 1)}
+            involved = [annotated[i]
+                        for i in nearbylinenums if annotated[i][0] != 1]
+        involvedrevs = list(set(r for r, l in involved))
+        newfixups = []
+        if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
+            # chunk belongs to a single revision
+            rev = involvedrevs[0]
+            if rev > 1:
+                fixuprev = rev + 1
+                newfixups.append((fixuprev, a1, a2, b1, b2))
+        elif a2 - a1 == b2 - b1 or b1 == b2:
+            # 1:1 line mapping, or chunk was deleted
+            for i in pycompat.xrange(a1, a2):
+                rev, linenum = annotated[i]
+                if rev > 1:
+                    if b1 == b2: # deletion, simply remove that single line
+                        nb1 = nb2 = 0
+                    else: # 1:1 line mapping, change the corresponding rev
+                        nb1 = b1 + i - a1
+                        nb2 = nb1 + 1
+                    fixuprev = rev + 1
+                    newfixups.append((fixuprev, i, i + 1, nb1, nb2))
+        return self._optimizefixups(newfixups)
+
+    @staticmethod
+    def _alldiffchunks(a, b, alines, blines):
+        """like mdiff.allblocks, but only care about differences"""
+        blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines)
+        for chunk, btype in blocks:
+            if btype != '!':
+                continue
+            yield chunk
+
+    def _buildlinelog(self):
+        """calculate the initial linelog based on self.content{,line}s.
+        this is similar to running a partial "annotate".
+        """
+        llog = linelog.linelog()
+        a, alines = '', []
+        for i in pycompat.xrange(len(self.contents)):
+            b, blines = self.contents[i], self.contentlines[i]
+            llrev = i * 2 + 1
+            chunks = self._alldiffchunks(a, b, alines, blines)
+            for a1, a2, b1, b2 in reversed(list(chunks)):
+                llog.replacelines(llrev, a1, a2, b1, b2)
+            a, alines = b, blines
+        return llog
+
+    def _checkoutlinelog(self):
+        """() -> [str]. check out file contents from linelog"""
+        contents = []
+        for i in pycompat.xrange(len(self.contents)):
+            rev = (i + 1) * 2
+            self.linelog.annotate(rev)
+            content = ''.join(map(self._getline, self.linelog.annotateresult))
+            contents.append(content)
+        return contents
+
+    def _checkoutlinelogwithedits(self):
+        """() -> [str]. prompt all lines for edit"""
+        alllines = self.linelog.getalllines()
+        # header
+        editortext = (_('HG: editing %s\nHG: "y" means the line to the right '
+                        'exists in the changeset to the top\nHG:\n')
+                      % self.fctxs[-1].path())
+        # [(idx, fctx)]. hide the dummy emptyfilecontext
+        visiblefctxs = [(i, f)
+                        for i, f in enumerate(self.fctxs)
+                        if not isinstance(f, emptyfilecontext)]
+        for i, (j, f) in enumerate(visiblefctxs):
+            editortext += (_('HG: %s/%s %s %s\n') %
+                           ('|' * i, '-' * (len(visiblefctxs) - i + 1),
+                            node.short(f.node()),
+                            f.description().split('\n',1)[0]))
+        editortext += _('HG: %s\n') % ('|' * len(visiblefctxs))
+        # figure out the lifetime of a line, this is relatively inefficient,
+        # but probably fine
+        lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}}
+        for i, f in visiblefctxs:
+            self.linelog.annotate((i + 1) * 2)
+            for l in self.linelog.annotateresult:
+                lineset[l].add(i)
+        # append lines
+        for l in alllines:
+            editortext += ('    %s : %s' %
+                           (''.join([('y' if i in lineset[l] else ' ')
+                                     for i, _f in visiblefctxs]),
+                            self._getline(l)))
+        # run editor
+        editedtext = self.ui.edit(editortext, '', action='absorb')
+        if not editedtext:
+            raise error.Abort(_('empty editor text'))
+        # parse edited result
+        contents = ['' for i in self.fctxs]
+        leftpadpos = 4
+        colonpos = leftpadpos + len(visiblefctxs) + 1
+        for l in mdiff.splitnewlines(editedtext):
+            if l.startswith('HG:'):
+                continue
+            if l[colonpos - 1:colonpos + 2] != ' : ':
+                raise error.Abort(_('malformed line: %s') % l)
+            linecontent = l[colonpos + 2:]
+            for i, ch in enumerate(l[leftpadpos:colonpos - 1]):
+                if ch == 'y':
+                    contents[visiblefctxs[i][0]] += linecontent
+        # chunkstats is hard to calculate if anything changes, therefore
+        # set them to just a simple value (1, 1).
+        if editedtext != editortext:
+            self.chunkstats = [1, 1]
+        return contents
+
+    def _getline(self, lineinfo):
+        """((rev, linenum)) -> str. convert rev+line number to line content"""
+        rev, linenum = lineinfo
+        if rev & 1: # odd: original line taken from fctxs
+            return self.contentlines[rev // 2][linenum]
+        else: # even: fixup line from targetfctx
+            return self.targetlines[linenum]
+
+    def _iscontinuous(self, a1, a2, closedinterval=False):
+        """(a1, a2 : int) -> bool
+
+        check if these lines are continuous. i.e. no other insertions or
+        deletions (from other revisions) among these lines.
+
+        closedinterval decides whether a2 should be included or not. i.e. is
+        it [a1, a2), or [a1, a2] ?
+        """
+        if a1 >= a2:
+            return True
+        llog = self.linelog
+        offset1 = llog.getoffset(a1)
+        offset2 = llog.getoffset(a2) + int(closedinterval)
+        linesinbetween = llog.getalllines(offset1, offset2)
+        return len(linesinbetween) == a2 - a1 + int(closedinterval)
+
+    def _optimizefixups(self, fixups):
+        """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)].
+        merge adjacent fixups to make them less fragmented.
+        """
+        result = []
+        pcurrentchunk = [[-1, -1, -1, -1, -1]]
+
+        def pushchunk():
+            if pcurrentchunk[0][0] != -1:
+                result.append(tuple(pcurrentchunk[0]))
+
+        for i, chunk in enumerate(fixups):
+            rev, a1, a2, b1, b2 = chunk
+            lastrev = pcurrentchunk[0][0]
+            lasta2 = pcurrentchunk[0][2]
+            lastb2 = pcurrentchunk[0][4]
+            if (a1 == lasta2 and b1 == lastb2 and rev == lastrev and
+                    self._iscontinuous(max(a1 - 1, 0), a1)):
+                # merge into currentchunk
+                pcurrentchunk[0][2] = a2
+                pcurrentchunk[0][4] = b2
+            else:
+                pushchunk()
+                pcurrentchunk[0] = list(chunk)
+        pushchunk()
+        return result
+
+    def _showchanges(self, fm, alines, blines, chunk, fixups):
+
+        def trim(line):
+            if line.endswith('\n'):
+                line = line[:-1]
+            return line
+
+        # this is not optimized for perf but _showchanges only gets executed
+        # with an extra command-line flag.
+        a1, a2, b1, b2 = chunk
+        aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1)
+        for idx, fa1, fa2, fb1, fb2 in fixups:
+            for i in pycompat.xrange(fa1, fa2):
+                aidxs[i - a1] = (max(idx, 1) - 1) // 2
+            for i in pycompat.xrange(fb1, fb2):
+                bidxs[i - b1] = (max(idx, 1) - 1) // 2
+
+        fm.startitem()
+        fm.write('hunk', '        %s\n',
+                 '@@ -%d,%d +%d,%d @@'
+                 % (a1, a2 - a1, b1, b2 - b1), label='diff.hunk')
+        fm.data(path=self.path, linetype='hunk')
+
+        def writeline(idx, diffchar, line, linetype, linelabel):
+            fm.startitem()
+            node = ''
+            if idx:
+                ctx = self.fctxs[idx]
+                fm.context(fctx=ctx)
+                node = ctx.hex()
+                self.ctxaffected.add(ctx.changectx())
+            fm.write('node', '%-7.7s ', node, label='absorb.node')
+            fm.write('diffchar ' + linetype, '%s%s\n', diffchar, line,
+                     label=linelabel)
+            fm.data(path=self.path, linetype=linetype)
+
+        for i in pycompat.xrange(a1, a2):
+            writeline(aidxs[i - a1], '-', trim(alines[i]), 'deleted',
+                      'diff.deleted')
+        for i in pycompat.xrange(b1, b2):
+            writeline(bidxs[i - b1], '+', trim(blines[i]), 'inserted',
+                      'diff.inserted')
+
+class fixupstate(object):
+    """state needed to run absorb
+
+    internally, it keeps paths and filefixupstates.
+
+    a typical use is like filefixupstates:
+
+        1. call diffwith, to calculate fixups
+        2. (optionally), present fixups to the user, or edit fixups
+        3. call apply, to apply changes to memory
+        4. call commit, to commit changes to hg database
+    """
+
+    def __init__(self, stack, ui=None, opts=None):
+        """([ctx], ui or None) -> None
+
+        stack: should be linear, and sorted by topo order - oldest first.
+        all commits in stack are considered mutable.
+        """
+        assert stack
+        self.ui = ui or nullui()
+        self.opts = opts or {}
+        self.stack = stack
+        self.repo = stack[-1].repo().unfiltered()
+
+        # following fields will be filled later
+        self.paths = [] # [str]
+        self.status = None # ctx.status output
+        self.fctxmap = {} # {path: {ctx: fctx}}
+        self.fixupmap = {} # {path: filefixupstate}
+        self.replacemap = {} # {oldnode: newnode or None}
+        self.finalnode = None # head after all fixups
+        self.ctxaffected = set() # ctx that will be absorbed into
+
+    def diffwith(self, targetctx, match=None, fm=None):
+        """diff and prepare fixups. update self.fixupmap, self.paths"""
+        # only care about modified files
+        self.status = self.stack[-1].status(targetctx, match)
+        self.paths = []
+        # but if --edit-lines is used, the user may want to edit files
+        # even if they are not modified
+        editopt = self.opts.get('edit_lines')
+        if not self.status.modified and editopt and match:
+            interestingpaths = match.files()
+        else:
+            interestingpaths = self.status.modified
+        # prepare the filefixupstate
+        seenfctxs = set()
+        # sorting is necessary to eliminate ambiguity for the "double move"
+        # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
+        for path in sorted(interestingpaths):
+            self.ui.debug('calculating fixups for %s\n' % path)
+            targetfctx = targetctx[path]
+            fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
+            # ignore symbolic links or binary, or unchanged files
+            if any(f.islink() or stringutil.binary(f.data())
+                   for f in [targetfctx] + fctxs
+                   if not isinstance(f, emptyfilecontext)):
+                continue
+            if targetfctx.data() == fctxs[-1].data() and not editopt:
+                continue
+            seenfctxs.update(fctxs[1:])
+            self.fctxmap[path] = ctx2fctx
+            fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts)
+            if fm is not None:
+                fm.startitem()
+                fm.plain('showing changes for ')
+                fm.write('path', '%s\n', path, label='absorb.path')
+                fm.data(linetype='path')
+            fstate.diffwith(targetfctx, fm)
+            self.fixupmap[path] = fstate
+            self.paths.append(path)
+            self.ctxaffected.update(fstate.ctxaffected)
+
+    def apply(self):
+        """apply fixups to individual filefixupstates"""
+        for path, state in self.fixupmap.iteritems():
+            if self.ui.debugflag:
+                self.ui.write(_('applying fixups to %s\n') % path)
+            state.apply()
+
+    @property
+    def chunkstats(self):
+        """-> {path: chunkstats}. collect chunkstats from filefixupstates"""
+        return dict((path, state.chunkstats)
+                    for path, state in self.fixupmap.iteritems())
+
+    def commit(self):
+        """commit changes. update self.finalnode, self.replacemap"""
+        with self.repo.wlock(), self.repo.lock():
+            with self.repo.transaction('absorb') as tr:
+                self._commitstack()
+                self._movebookmarks(tr)
+                if self.repo['.'].node() in self.replacemap:
+                    self._moveworkingdirectoryparent()
+                if self._useobsolete:
+                    self._obsoleteoldcommits()
+            if not self._useobsolete: # strip must be outside transactions
+                self._stripoldcommits()
+        return self.finalnode
+
+    def printchunkstats(self):
+        """print things like '1 of 2 chunk(s) applied'"""
+        ui = self.ui
+        chunkstats = self.chunkstats
+        if ui.verbose:
+            # chunkstats for each file
+            for path, stat in chunkstats.iteritems():
+                if stat[0]:
+                    ui.write(_('%s: %d of %d chunk(s) applied\n')
+                             % (path, stat[0], stat[1]))
+        elif not ui.quiet:
+            # a summary for all files
+            stats = chunkstats.values()
+            applied, total = (sum(s[i] for s in stats) for i in (0, 1))
+            ui.write(_('%d of %d chunk(s) applied\n') % (applied, total))
+
+    def _commitstack(self):
+        """make new commits. update self.finalnode, self.replacemap.
+        it is splitted from "commit" to avoid too much indentation.
+        """
+        # last node (20-char) committed by us
+        lastcommitted = None
+        # p1 which overrides the parent of the next commit, "None" means use
+        # the original parent unchanged
+        nextp1 = None
+        for ctx in self.stack:
+            memworkingcopy = self._getnewfilecontents(ctx)
+            if not memworkingcopy and not lastcommitted:
+                # nothing changed, nothing commited
+                nextp1 = ctx
+                continue
+            msg = ''
+            if self._willbecomenoop(memworkingcopy, ctx, nextp1):
+                # changeset is no longer necessary
+                self.replacemap[ctx.node()] = None
+                msg = _('became empty and was dropped')
+            else:
+                # changeset needs re-commit
+                nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1)
+                lastcommitted = self.repo[nodestr]
+                nextp1 = lastcommitted
+                self.replacemap[ctx.node()] = lastcommitted.node()
+                if memworkingcopy:
+                    msg = _('%d file(s) changed, became %s') % (
+                        len(memworkingcopy), self._ctx2str(lastcommitted))
+                else:
+                    msg = _('became %s') % self._ctx2str(lastcommitted)
+            if self.ui.verbose and msg:
+                self.ui.write(_('%s: %s\n') % (self._ctx2str(ctx), msg))
+        self.finalnode = lastcommitted and lastcommitted.node()
+
+    def _ctx2str(self, ctx):
+        if self.ui.debugflag:
+            return '%d:%s' % (ctx.rev(), ctx.hex())
+        else:
+            return '%d:%s' % (ctx.rev(), node.short(ctx.node()))
+
+    def _getnewfilecontents(self, ctx):
+        """(ctx) -> {path: str}
+
+        fetch file contents from filefixupstates.
+        return the working copy overrides - files different from ctx.
+        """
+        result = {}
+        for path in self.paths:
+            ctx2fctx = self.fctxmap[path] # {ctx: fctx}
+            if ctx not in ctx2fctx:
+                continue
+            fctx = ctx2fctx[ctx]
+            content = fctx.data()
+            newcontent = self.fixupmap[path].getfinalcontent(fctx)
+            if content != newcontent:
+                result[fctx.path()] = newcontent
+        return result
+
+    def _movebookmarks(self, tr):
+        repo = self.repo
+        needupdate = [(name, self.replacemap[hsh])
+                      for name, hsh in repo._bookmarks.iteritems()
+                      if hsh in self.replacemap]
+        changes = []
+        for name, hsh in needupdate:
+            if hsh:
+                changes.append((name, hsh))
+                if self.ui.verbose:
+                    self.ui.write(_('moving bookmark %s to %s\n')
+                                  % (name, node.hex(hsh)))
+            else:
+                changes.append((name, None))
+                if self.ui.verbose:
+                    self.ui.write(_('deleting bookmark %s\n') % name)
+        repo._bookmarks.applychanges(repo, tr, changes)
+
+    def _moveworkingdirectoryparent(self):
+        if not self.finalnode:
+            # Find the latest not-{obsoleted,stripped} parent.
+            revs = self.repo.revs('max(::. - %ln)', self.replacemap.keys())
+            ctx = self.repo[revs.first()]
+            self.finalnode = ctx.node()
+        else:
+            ctx = self.repo[self.finalnode]
+
+        dirstate = self.repo.dirstate
+        # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to
+        # be slow. in absorb's case, no need to invalidate fsmonitorstate.
+        noop = lambda: 0
+        restore = noop
+        if util.safehasattr(dirstate, '_fsmonitorstate'):
+            bak = dirstate._fsmonitorstate.invalidate
+            def restore():
+                dirstate._fsmonitorstate.invalidate = bak
+            dirstate._fsmonitorstate.invalidate = noop
+        try:
+            with dirstate.parentchange():
+                dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
+        finally:
+            restore()
+
+    @staticmethod
+    def _willbecomenoop(memworkingcopy, ctx, pctx=None):
+        """({path: content}, ctx, ctx) -> bool. test if a commit will be noop
+
+        if it will become an empty commit (does not change anything, after the
+        memworkingcopy overrides), return True. otherwise return False.
+        """
+        if not pctx:
+            parents = ctx.parents()
+            if len(parents) != 1:
+                return False
+            pctx = parents[0]
+        # ctx changes more files (not a subset of memworkingcopy)
+        if not set(ctx.files()).issubset(set(memworkingcopy)):
+            return False
+        for path, content in memworkingcopy.iteritems():
+            if path not in pctx or path not in ctx:
+                return False
+            fctx = ctx[path]
+            pfctx = pctx[path]
+            if pfctx.flags() != fctx.flags():
+                return False
+            if pfctx.data() != content:
+                return False
+        return True
+
+    def _commitsingle(self, memworkingcopy, ctx, p1=None):
+        """(ctx, {path: content}, node) -> node. make a single commit
+
+        the commit is a clone from ctx, with a (optionally) different p1, and
+        different file contents replaced by memworkingcopy.
+        """
+        parents = p1 and (p1, node.nullid)
+        extra = ctx.extra()
+        if self._useobsolete and self.ui.configbool('absorb', 'add-noise'):
+            extra['absorb_source'] = ctx.hex()
+        mctx = overlaycontext(memworkingcopy, ctx, parents, extra=extra)
+        # preserve phase
+        with mctx.repo().ui.configoverride({
+            ('phases', 'new-commit'): ctx.phase()}):
+            return mctx.commit()
+
+    @util.propertycache
+    def _useobsolete(self):
+        """() -> bool"""
+        return obsolete.isenabled(self.repo, obsolete.createmarkersopt)
+
+    def _obsoleteoldcommits(self):
+        relations = [(self.repo[k], v and (self.repo[v],) or ())
+                     for k, v in self.replacemap.iteritems()]
+        if relations:
+            obsolete.createmarkers(self.repo, relations)
+
+    def _stripoldcommits(self):
+        nodelist = self.replacemap.keys()
+        # make sure we don't strip innocent children
+        revs = self.repo.revs('%ln - (::(heads(%ln::)-%ln))', nodelist,
+                              nodelist, nodelist)
+        tonode = self.repo.changelog.node
+        nodelist = [tonode(r) for r in revs]
+        if nodelist:
+            repair.strip(self.repo.ui, self.repo, nodelist)
+
+def _parsechunk(hunk):
+    """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
+    if type(hunk) not in (crecord.uihunk, patch.recordhunk):
+        return None, None
+    path = hunk.header.filename()
+    a1 = hunk.fromline + len(hunk.before) - 1
+    # remove before and after context
+    hunk.before = hunk.after = []
+    buf = util.stringio()
+    hunk.write(buf)
+    patchlines = mdiff.splitnewlines(buf.getvalue())
+    # hunk.prettystr() will update hunk.removed
+    a2 = a1 + hunk.removed
+    blines = [l[1:] for l in patchlines[1:] if l[0] != '-']
+    return path, (a1, a2, blines)
+
+def overlaydiffcontext(ctx, chunks):
+    """(ctx, [crecord.uihunk]) -> memctx
+
+    return a memctx with some [1] patches (chunks) applied to ctx.
+    [1]: modifications are handled. renames, mode changes, etc. are ignored.
+    """
+    # sadly the applying-patch logic is hardly reusable, and messy:
+    # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it
+    #    needs a file stream of a patch and will re-parse it, while we have
+    #    structured hunk objects at hand.
+    # 2. a lot of different implementations about "chunk" (patch.hunk,
+    #    patch.recordhunk, crecord.uihunk)
+    # as we only care about applying changes to modified files, no mode
+    # change, no binary diff, and no renames, it's probably okay to
+    # re-invent the logic using much simpler code here.
+    memworkingcopy = {} # {path: content}
+    patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]}
+    for path, info in map(_parsechunk, chunks):
+        if not path or not info:
+            continue
+        patchmap[path].append(info)
+    for path, patches in patchmap.iteritems():
+        if path not in ctx or not patches:
+            continue
+        patches.sort(reverse=True)
+        lines = mdiff.splitnewlines(ctx[path].data())
+        for a1, a2, blines in patches:
+            lines[a1:a2] = blines
+        memworkingcopy[path] = ''.join(lines)
+    return overlaycontext(memworkingcopy, ctx)
+
+def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
+    """pick fixup chunks from targetctx, apply them to stack.
+
+    if targetctx is None, the working copy context will be used.
+    if stack is None, the current draft stack will be used.
+    return fixupstate.
+    """
+    if stack is None:
+        limit = ui.configint('absorb', 'max-stack-size')
+        stack = getdraftstack(repo['.'], limit)
+        if limit and len(stack) >= limit:
+            ui.warn(_('absorb: only the recent %d changesets will '
+                      'be analysed\n')
+                    % limit)
+    if not stack:
+        raise error.Abort(_('no mutable changeset to change'))
+    if targetctx is None: # default to working copy
+        targetctx = repo[None]
+    if pats is None:
+        pats = ()
+    if opts is None:
+        opts = {}
+    state = fixupstate(stack, ui=ui, opts=opts)
+    matcher = scmutil.match(targetctx, pats, opts)
+    if opts.get('interactive'):
+        diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
+        origchunks = patch.parsepatch(diff)
+        chunks = cmdutil.recordfilter(ui, origchunks)[0]
+        targetctx = overlaydiffcontext(stack[-1], chunks)
+    fm = None
+    if opts.get('print_changes') or not opts.get('apply_changes'):
+        fm = ui.formatter('absorb', opts)
+    state.diffwith(targetctx, matcher, fm)
+    if fm is not None:
+        fm.startitem()
+        fm.write("count", "\n%d changesets affected\n", len(state.ctxaffected))
+        fm.data(linetype='summary')
+        for ctx in reversed(stack):
+            if ctx not in state.ctxaffected:
+                continue
+            fm.startitem()
+            fm.context(ctx=ctx)
+            fm.data(linetype='changeset')
+            fm.write('node', '%-7.7s ', ctx.hex(), label='absorb.node')
+            descfirstline = ctx.description().splitlines()[0]
+            fm.write('descfirstline', '%s\n', descfirstline,
+                     label='absorb.description')
+        fm.end()
+    if not opts.get('dry_run'):
+        if not opts.get('apply_changes'):
+            if ui.promptchoice("apply changes (yn)? $$ &Yes $$ &No", default=1):
+                raise error.Abort(_('absorb cancelled\n'))
+
+        state.apply()
+        if state.commit():
+            state.printchunkstats()
+        elif not ui.quiet:
+            ui.write(_('nothing applied\n'))
+    return state
+
+@command('absorb',
+         [('a', 'apply-changes', None,
+           _('apply changes without prompting for confirmation')),
+          ('p', 'print-changes', None,
+           _('always print which changesets are modified by which changes')),
+          ('i', 'interactive', None,
+           _('interactively select which chunks to apply (EXPERIMENTAL)')),
+          ('e', 'edit-lines', None,
+           _('edit what lines belong to which changesets before commit '
+             '(EXPERIMENTAL)')),
+         ] + commands.dryrunopts + commands.templateopts + commands.walkopts,
+         _('hg absorb [OPTION] [FILE]...'),
+         helpcategory=command.CATEGORY_COMMITTING,
+         helpbasic=True)
+def absorbcmd(ui, repo, *pats, **opts):
+    """incorporate corrections into the stack of draft changesets
+
+    absorb analyzes each change in your working directory and attempts to
+    amend the changed lines into the changesets in your stack that first
+    introduced those lines.
+
+    If absorb cannot find an unambiguous changeset to amend for a change,
+    that change will be left in the working directory, untouched. They can be
+    observed by :hg:`status` or :hg:`diff` afterwards. In other words,
+    absorb does not write to the working directory.
+
+    Changesets outside the revset `::. and not public() and not merge()` will
+    not be changed.
+
+    Changesets that become empty after applying the changes will be deleted.
+
+    By default, absorb will show what it plans to do and prompt for
+    confirmation.  If you are confident that the changes will be absorbed
+    to the correct place, run :hg:`absorb -a` to apply the changes
+    immediately.
+
+    Returns 0 on success, 1 if all chunks were ignored and nothing amended.
+    """
+    opts = pycompat.byteskwargs(opts)
+    state = absorb(ui, repo, pats=pats, opts=opts)
+    if sum(s[0] for s in state.chunkstats.values()) == 0:
+        return 1
--- a/hgext/acl.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/acl.py	Mon Oct 22 14:46:06 2018 -0400
@@ -220,6 +220,7 @@
     error,
     extensions,
     match,
+    pycompat,
     registrar,
     util,
 )
@@ -374,9 +375,9 @@
         _txnhook(ui, repo, hooktype, node, source, user, **kwargs)
 
 def _pkhook(ui, repo, hooktype, node, source, user, **kwargs):
-    if kwargs['namespace'] == 'bookmarks':
-        bookmark = kwargs['key']
-        ctx = kwargs['new']
+    if kwargs[r'namespace'] == 'bookmarks':
+        bookmark = kwargs[r'key']
+        ctx = kwargs[r'new']
         allowbookmarks = buildmatch(ui, None, user, 'acl.allow.bookmarks')
         denybookmarks = buildmatch(ui, None, user, 'acl.deny.bookmarks')
 
@@ -403,7 +404,7 @@
     allow = buildmatch(ui, repo, user, 'acl.allow')
     deny = buildmatch(ui, repo, user, 'acl.deny')
 
-    for rev in xrange(repo[node].rev(), len(repo)):
+    for rev in pycompat.xrange(repo[node].rev(), len(repo)):
         ctx = repo[rev]
         branch = ctx.branch()
         if denybranches and denybranches(branch):
--- a/hgext/amend.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/amend.py	Mon Oct 22 14:46:06 2018 -0400
@@ -38,6 +38,7 @@
      ('n', 'note', '', _('store a note on the amend')),
     ] + cmdutil.walkopts + cmdutil.commitopts + cmdutil.commitopts2,
     _('[OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_COMMITTING,
     inferrepo=True)
 def amend(ui, repo, *pats, **opts):
     """amend the working copy parent with all or specified outstanding changes
--- a/hgext/beautifygraph.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/beautifygraph.py	Mon Oct 22 14:46:06 2018 -0400
@@ -18,6 +18,7 @@
     encoding,
     extensions,
     graphmod,
+    pycompat,
     templatekw,
 )
 
@@ -53,8 +54,10 @@
 def convertedges(line):
     line = ' %s ' % line
     pretty = []
-    for idx in xrange(len(line) - 2):
-        pretty.append(prettyedge(line[idx], line[idx + 1], line[idx + 2]))
+    for idx in pycompat.xrange(len(line) - 2):
+        pretty.append(prettyedge(line[idx:idx + 1],
+                                 line[idx + 1:idx + 2],
+                                 line[idx + 2:idx + 3]))
     return ''.join(pretty)
 
 def getprettygraphnode(orig, *args, **kwargs):
@@ -84,7 +87,7 @@
         ui.warn(_('beautifygraph: unsupported encoding, UTF-8 required\n'))
         return
 
-    if 'A' in encoding._wide:
+    if r'A' in encoding._wide:
         ui.warn(_('beautifygraph: unsupported terminal settings, '
                   'monospace narrow text required\n'))
         return
--- a/hgext/blackbox.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/blackbox.py	Mon Oct 22 14:46:06 2018 -0400
@@ -45,6 +45,7 @@
 
 from mercurial import (
     encoding,
+    pycompat,
     registrar,
     ui as uimod,
     util,
@@ -111,7 +112,7 @@
             if st.st_size >= maxsize:
                 path = vfs.join(name)
                 maxfiles = ui.configint('blackbox', 'maxfiles')
-                for i in xrange(maxfiles - 1, 1, -1):
+                for i in pycompat.xrange(maxfiles - 1, 1, -1):
                     rotate(oldpath='%s.%d' % (path, i - 1),
                            newpath='%s.%d' % (path, i))
                 rotate(oldpath=path,
@@ -225,10 +226,12 @@
 
     repo._wlockfreeprefix.add('blackbox.log')
 
-@command('^blackbox',
+@command('blackbox',
     [('l', 'limit', 10, _('the number of events to show')),
     ],
-    _('hg blackbox [OPTION]...'))
+    _('hg blackbox [OPTION]...'),
+    helpcategory=command.CATEGORY_MAINTENANCE,
+    helpbasic=True)
 def blackbox(ui, repo, *revs, **opts):
     '''view the recent repository events
     '''
--- a/hgext/censor.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/censor.py	Mon Oct 22 14:46:06 2018 -0400
@@ -33,9 +33,7 @@
 from mercurial import (
     error,
     registrar,
-    revlog,
     scmutil,
-    util,
 )
 
 cmdtable = {}
@@ -49,7 +47,8 @@
 @command('censor',
     [('r', 'rev', '', _('censor file from specified revision'), _('REV')),
      ('t', 'tombstone', '', _('replacement tombstone data'), _('TEXT'))],
-    _('-r REV [-t TEXT] [FILE]'))
+    _('-r REV [-t TEXT] [FILE]'),
+    helpcategory=command.CATEGORY_MAINTENANCE)
 def censor(ui, repo, path, rev='', tombstone='', **opts):
     with repo.wlock(), repo.lock():
         return _docensor(ui, repo, path, rev, tombstone, **opts)
@@ -82,8 +81,11 @@
         raise error.Abort(_('file does not exist at revision %s') % rev)
 
     fnode = fctx.filenode()
-    headctxs = [repo[c] for c in repo.heads()]
-    heads = [c for c in headctxs if path in c and c.filenode(path) == fnode]
+    heads = []
+    for headnode in repo.heads():
+        hc = repo[headnode]
+        if path in hc and hc.filenode(path) == fnode:
+            heads.append(hc)
     if heads:
         headlist = ', '.join([short(c.node()) for c in heads])
         raise error.Abort(_('cannot censor file in heads (%s)') % headlist,
@@ -94,90 +96,5 @@
         raise error.Abort(_('cannot censor working directory'),
             hint=_('clean/delete/update first'))
 
-    flogv = flog.version & 0xFFFF
-    if flogv != revlog.REVLOGV1:
-        raise error.Abort(
-            _('censor does not support revlog version %d') % (flogv,))
-
-    tombstone = revlog.packmeta({"censored": tombstone}, "")
-
-    crev = fctx.filerev()
-
-    if len(tombstone) > flog.rawsize(crev):
-        raise error.Abort(_(
-            'censor tombstone must be no longer than censored data'))
-
-    # Using two files instead of one makes it easy to rewrite entry-by-entry
-    idxread = repo.svfs(flog.indexfile, 'r')
-    idxwrite = repo.svfs(flog.indexfile, 'wb', atomictemp=True)
-    if flog.version & revlog.FLAG_INLINE_DATA:
-        dataread, datawrite = idxread, idxwrite
-    else:
-        dataread = repo.svfs(flog.datafile, 'r')
-        datawrite = repo.svfs(flog.datafile, 'wb', atomictemp=True)
-
-    # Copy all revlog data up to the entry to be censored.
-    rio = revlog.revlogio()
-    offset = flog.start(crev)
-
-    for chunk in util.filechunkiter(idxread, limit=crev * rio.size):
-        idxwrite.write(chunk)
-    for chunk in util.filechunkiter(dataread, limit=offset):
-        datawrite.write(chunk)
-
-    def rewriteindex(r, newoffs, newdata=None):
-        """Rewrite the index entry with a new data offset and optional new data.
-
-        The newdata argument, if given, is a tuple of three positive integers:
-        (new compressed, new uncompressed, added flag bits).
-        """
-        offlags, comp, uncomp, base, link, p1, p2, nodeid = flog.index[r]
-        flags = revlog.gettype(offlags)
-        if newdata:
-            comp, uncomp, nflags = newdata
-            flags |= nflags
-        offlags = revlog.offset_type(newoffs, flags)
-        e = (offlags, comp, uncomp, r, link, p1, p2, nodeid)
-        idxwrite.write(rio.packentry(e, None, flog.version, r))
-        idxread.seek(rio.size, 1)
-
-    def rewrite(r, offs, data, nflags=revlog.REVIDX_DEFAULT_FLAGS):
-        """Write the given full text to the filelog with the given data offset.
-
-        Returns:
-            The integer number of data bytes written, for tracking data offsets.
-        """
-        flag, compdata = flog.compress(data)
-        newcomp = len(flag) + len(compdata)
-        rewriteindex(r, offs, (newcomp, len(data), nflags))
-        datawrite.write(flag)
-        datawrite.write(compdata)
-        dataread.seek(flog.length(r), 1)
-        return newcomp
-
-    # Rewrite censored revlog entry with (padded) tombstone data.
-    pad = ' ' * (flog.rawsize(crev) - len(tombstone))
-    offset += rewrite(crev, offset, tombstone + pad, revlog.REVIDX_ISCENSORED)
-
-    # Rewrite all following filelog revisions fixing up offsets and deltas.
-    for srev in xrange(crev + 1, len(flog)):
-        if crev in flog.parentrevs(srev):
-            # Immediate children of censored node must be re-added as fulltext.
-            try:
-                revdata = flog.revision(srev)
-            except error.CensoredNodeError as e:
-                revdata = e.tombstone
-            dlen = rewrite(srev, offset, revdata)
-        else:
-            # Copy any other revision data verbatim after fixing up the offset.
-            rewriteindex(srev, offset)
-            dlen = flog.length(srev)
-            for chunk in util.filechunkiter(dataread, limit=dlen):
-                datawrite.write(chunk)
-        offset += dlen
-
-    idxread.close()
-    idxwrite.close()
-    if dataread is not idxread:
-        dataread.close()
-        datawrite.close()
+    with repo.transaction(b'censor') as tr:
+        flog.censorrevision(tr, fnode, tombstone=tombstone)
--- a/hgext/children.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/children.py	Mon Oct 22 14:46:06 2018 -0400
@@ -40,6 +40,7 @@
      _('show children of the specified revision'), _('REV')),
     ] + templateopts,
     _('hg children [-r REV] [FILE]'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
     inferrepo=True)
 def children(ui, repo, file_=None, **opts):
     """show the children of the given or working directory revision
--- a/hgext/churn.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/churn.py	Mon Oct 22 14:46:06 2018 -0400
@@ -8,7 +8,7 @@
 
 '''command to display statistics about repository history'''
 
-from __future__ import absolute_import
+from __future__ import absolute_import, division
 
 import datetime
 import os
@@ -52,7 +52,8 @@
         def getkey(ctx):
             t, tz = ctx.date()
             date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
-            return date.strftime(encoding.strfromlocal(opts['dateformat']))
+            return encoding.strtolocal(
+                date.strftime(encoding.strfromlocal(opts['dateformat'])))
     else:
         tmpl = opts.get('oldtemplate') or opts.get('template')
         tmpl = logcmdutil.maketemplater(ui, repo, tmpl)
@@ -115,6 +116,7 @@
     ('', 'aliases', '', _('file with email aliases'), _('FILE')),
     ] + cmdutil.walkopts,
     _("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]"),
+    helpcategory=command.CATEGORY_MAINTENANCE,
     inferrepo=True)
 def churn(ui, repo, *pats, **opts):
     '''histogram of changes to the repository
@@ -204,7 +206,7 @@
                                     '*' * charnum(sum(count)))
 
     def charnum(count):
-        return int(round(count * width / maxcount))
+        return int(count * width // maxcount)
 
     for name, count in rate:
         ui.write(format(name, count))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/closehead.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,84 @@
+# closehead.py - Close arbitrary heads without checking them out first
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''close arbitrary heads without checking them out first'''
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+    bookmarks,
+    cmdutil,
+    context,
+    error,
+    pycompat,
+    registrar,
+    scmutil,
+)
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+commitopts = cmdutil.commitopts
+commitopts2 = cmdutil.commitopts2
+commitopts3 = [('r', 'rev', [],
+               _('revision to check'), _('REV'))]
+
+@command('close-head|close-heads', commitopts + commitopts2 + commitopts3,
+    _('[OPTION]... [REV]...'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+    inferrepo=True)
+def close_branch(ui, repo, *revs, **opts):
+    """close the given head revisions
+
+    This is equivalent to checking out each revision in a clean tree and running
+    ``hg commit --close-branch``, except that it doesn't change the working
+    directory.
+
+    The commit message must be specified with -l or -m.
+    """
+    def docommit(rev):
+        cctx = context.memctx(repo, parents=[rev, None], text=message,
+                              files=[], filectxfn=None, user=opts.get('user'),
+                              date=opts.get('date'), extra=extra)
+        tr = repo.transaction('commit')
+        ret = repo.commitctx(cctx, True)
+        bookmarks.update(repo, [rev, None], ret)
+        cctx.markcommitted(ret)
+        tr.close()
+
+    opts = pycompat.byteskwargs(opts)
+
+    revs += tuple(opts.get('rev', []))
+    revs = scmutil.revrange(repo, revs)
+
+    if not revs:
+        raise error.Abort(_('no revisions specified'))
+
+    heads = []
+    for branch in repo.branchmap():
+        heads.extend(repo.branchheads(branch))
+    heads = set(repo[h].rev() for h in heads)
+    for rev in revs:
+        if rev not in heads:
+            raise error.Abort(_('revision is not an open head: %d') % rev)
+
+    message = cmdutil.logmessage(ui, opts)
+    if not message:
+        raise error.Abort(_("no commit message specified with -l or -m"))
+    extra = { 'close': '1' }
+
+    with repo.wlock(), repo.lock():
+        for rev in revs:
+            r = repo[rev]
+            branch = r.branch()
+            extra['branch'] = branch
+            docommit(r)
+    return 0
--- a/hgext/commitextras.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/commitextras.py	Mon Oct 22 14:46:06 2018 -0400
@@ -17,6 +17,7 @@
     error,
     extensions,
     registrar,
+    util,
 )
 
 cmdtable = {}
@@ -43,34 +44,29 @@
         _('set a changeset\'s extra values'), _("KEY=VALUE")))
 
 def _commit(orig, ui, repo, *pats, **opts):
-    origcommit = repo.commit
-    try:
-        def _wrappedcommit(*innerpats, **inneropts):
+    if util.safehasattr(repo, 'unfiltered'):
+        repo = repo.unfiltered()
+    class repoextra(repo.__class__):
+        def commit(self, *innerpats, **inneropts):
             extras = opts.get(r'extra')
-            if extras:
-                for raw in extras:
-                    if '=' not in raw:
-                        msg = _("unable to parse '%s', should follow "
-                                "KEY=VALUE format")
-                        raise error.Abort(msg % raw)
-                    k, v = raw.split('=', 1)
-                    if not k:
-                        msg = _("unable to parse '%s', keys can't be empty")
-                        raise error.Abort(msg % raw)
-                    if re.search('[^\w-]', k):
-                        msg = _("keys can only contain ascii letters, digits,"
-                                " '_' and '-'")
-                        raise error.Abort(msg)
-                    if k in usedinternally:
-                        msg = _("key '%s' is used internally, can't be set "
-                                "manually")
-                        raise error.Abort(msg % k)
-                    inneropts[r'extra'][k] = v
-            return origcommit(*innerpats, **inneropts)
-
-        # This __dict__ logic is needed because the normal
-        # extension.wrapfunction doesn't seem to work.
-        repo.__dict__[r'commit'] = _wrappedcommit
-        return orig(ui, repo, *pats, **opts)
-    finally:
-        del repo.__dict__[r'commit']
+            for raw in extras:
+                if '=' not in raw:
+                    msg = _("unable to parse '%s', should follow "
+                            "KEY=VALUE format")
+                    raise error.Abort(msg % raw)
+                k, v = raw.split('=', 1)
+                if not k:
+                    msg = _("unable to parse '%s', keys can't be empty")
+                    raise error.Abort(msg % raw)
+                if re.search('[^\w-]', k):
+                    msg = _("keys can only contain ascii letters, digits,"
+                            " '_' and '-'")
+                    raise error.Abort(msg)
+                if k in usedinternally:
+                    msg = _("key '%s' is used internally, can't be set "
+                            "manually")
+                    raise error.Abort(msg % k)
+                inneropts[r'extra'][k] = v
+            return super(repoextra, self).commit(*innerpats, **inneropts)
+    repo.__class__ = repoextra
+    return orig(ui, repo, *pats, **opts)
--- a/hgext/convert/common.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/convert/common.py	Mon Oct 22 14:46:06 2018 -0400
@@ -402,7 +402,8 @@
 
     def _run(self, cmd, *args, **kwargs):
         def popen(cmdline):
-            p = subprocess.Popen(cmdline, shell=True, bufsize=-1,
+            p = subprocess.Popen(procutil.tonativestr(cmdline),
+                                 shell=True, bufsize=-1,
                                  close_fds=procutil.closefds,
                                  stdout=subprocess.PIPE)
             return p
@@ -459,7 +460,7 @@
         # POSIX requires at least 4096 bytes for ARG_MAX
         argmax = 4096
         try:
-            argmax = os.sysconf("SC_ARG_MAX")
+            argmax = os.sysconf(r"SC_ARG_MAX")
         except (AttributeError, ValueError):
             pass
 
--- a/hgext/convert/cvs.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/convert/cvs.py	Mon Oct 22 14:46:06 2018 -0400
@@ -15,7 +15,6 @@
 from mercurial import (
     encoding,
     error,
-    pycompat,
     util,
 )
 from mercurial.utils import (
@@ -74,7 +73,7 @@
                 raise error.Abort(_('revision %s is not a patchset number')
                                  % self.revs[0])
 
-        d = pycompat.getcwd()
+        d = encoding.getcwd()
         try:
             os.chdir(self.path)
             id = None
--- a/hgext/convert/cvsps.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/convert/cvsps.py	Mon Oct 22 14:46:06 2018 -0400
@@ -763,7 +763,7 @@
             # branchpoints such that it is the latest possible
             # commit without any intervening, unrelated commits.
 
-            for candidate in xrange(i):
+            for candidate in pycompat.xrange(i):
                 if c.branch not in changesets[candidate].branchpoints:
                     if p is not None:
                         break
--- a/hgext/convert/gnuarch.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/convert/gnuarch.py	Mon Oct 22 14:46:06 2018 -0400
@@ -17,6 +17,8 @@
 from mercurial import (
     encoding,
     error,
+    pycompat,
+    util,
 )
 from mercurial.utils import (
     dateutil,
@@ -201,7 +203,7 @@
         cmdline += ['>', os.devnull, '2>', os.devnull]
         cmdline = procutil.quotecommand(' '.join(cmdline))
         self.ui.debug(cmdline, '\n')
-        return os.system(cmdline)
+        return os.system(pycompat.rapply(procutil.tonativestr, cmdline))
 
     def _update(self, rev):
         self.ui.debug('applying revision %s...\n' % rev)
@@ -221,13 +223,13 @@
     def _getfile(self, name, rev):
         mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
         if stat.S_ISLNK(mode):
-            data = os.readlink(os.path.join(self.tmppath, name))
+            data = util.readlink(os.path.join(self.tmppath, name))
             if mode:
                 mode = 'l'
             else:
                 mode = ''
         else:
-            data = open(os.path.join(self.tmppath, name), 'rb').read()
+            data = util.readfile(os.path.join(self.tmppath, name))
             mode = (mode & 0o111) and 'x' or ''
         return data, mode
 
--- a/hgext/convert/hg.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/convert/hg.py	Mon Oct 22 14:46:06 2018 -0400
@@ -358,7 +358,7 @@
             p2 = node
 
         if self.filemapmode and nparents == 1:
-            man = self.repo.manifestlog._revlog
+            man = self.repo.manifestlog.getstorage(b'')
             mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
             closed = 'close' in commit.extra
             if not closed and not man.cmp(m1node, man.revision(mnode)):
--- a/hgext/convert/monotone.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/convert/monotone.py	Mon Oct 22 14:46:06 2018 -0400
@@ -102,7 +102,7 @@
 
         command.append('l')
         for arg in args:
-            command += "%s:%s" % (len(arg), arg)
+            command += "%d:%s" % (len(arg), arg)
         command.append('e')
         command = ''.join(command)
 
@@ -345,7 +345,7 @@
 
         if version >= 12.0:
             self.automatestdio = True
-            self.ui.debug("mtn automate version %s - using automate stdio\n" %
+            self.ui.debug("mtn automate version %f - using automate stdio\n" %
                 version)
 
             # launch the long-running automate stdio process
--- a/hgext/convert/subversion.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/convert/subversion.py	Mon Oct 22 14:46:06 2018 -0400
@@ -1127,7 +1127,7 @@
         self.delexec = []
         self.copies = []
         self.wc = None
-        self.cwd = pycompat.getcwd()
+        self.cwd = encoding.getcwd()
 
         created = False
         if os.path.isfile(os.path.join(path, '.svn', 'entries')):
@@ -1138,7 +1138,7 @@
                 path = os.path.realpath(path)
                 if os.path.isdir(os.path.dirname(path)):
                     if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
-                        ui.status(_('initializing svn repository %r\n') %
+                        ui.status(_("initializing svn repository '%s'\n") %
                                   os.path.basename(path))
                         commandline(ui, 'svnadmin').run0('create', path)
                         created = path
@@ -1147,9 +1147,9 @@
                         path = '/' + path
                     path = 'file://' + path
 
-            wcpath = os.path.join(pycompat.getcwd(), os.path.basename(path) +
+            wcpath = os.path.join(encoding.getcwd(), os.path.basename(path) +
                                 '-wc')
-            ui.status(_('initializing svn working copy %r\n')
+            ui.status(_("initializing svn working copy '%s'\n")
                       % os.path.basename(wcpath))
             self.run0('checkout', path, wcpath)
 
@@ -1270,7 +1270,7 @@
         self.childmap[parent] = child
 
     def revid(self, rev):
-        return u"svn:%s@%s" % (self.uuid, rev)
+        return "svn:%s@%s" % (self.uuid, rev)
 
     def putcommit(self, files, copies, parents, commit, source, revmap, full,
                   cleanp2):
--- a/hgext/eol.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/eol.py	Mon Oct 22 14:46:06 2018 -0400
@@ -266,7 +266,7 @@
     ensureenabled(ui)
     files = set()
     revs = set()
-    for rev in xrange(repo[node].rev(), len(repo)):
+    for rev in pycompat.xrange(repo[node].rev(), len(repo)):
         revs.add(rev)
         if headsonly:
             ctx = repo[rev]
--- a/hgext/extdiff.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/extdiff.py	Mon Oct 22 14:46:06 2018 -0400
@@ -338,6 +338,7 @@
     [('p', 'program', '', _('comparison program to run'), _('CMD')),
      ] + extdiffopts,
     _('hg extdiff [OPT]... [FILE]...'),
+    helpcategory=command.CATEGORY_FILE_CONTENTS,
     inferrepo=True)
 def extdiff(ui, repo, *pats, **opts):
     '''use external program to diff repository (or selected files)
--- a/hgext/factotum.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/factotum.py	Mon Oct 22 14:46:06 2018 -0400
@@ -49,6 +49,9 @@
 
 import os
 from mercurial.i18n import _
+from mercurial.utils import (
+    procutil,
+)
 from mercurial import (
     error,
     httpconnection,
@@ -83,7 +86,7 @@
     if 'user=' not in params:
         params = '%s user?' % params
     params = '%s !password?' % params
-    os.system("%s -g '%s'" % (_executable, params))
+    os.system(procutil.tonativestr("%s -g '%s'" % (_executable, params)))
 
 def auth_getuserpasswd(self, getkey, params):
     params = 'proto=pass %s' % params
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/__init__.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,193 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# fastannotate: faster annotate implementation using linelog
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""yet another annotate implementation that might be faster (EXPERIMENTAL)
+
+The fastannotate extension provides a 'fastannotate' command that makes
+use of the linelog data structure as a cache layer and is expected to
+be faster than the vanilla 'annotate' if the cache is present.
+
+In most cases, fastannotate requires a setup that mainbranch is some pointer
+that always moves forward, to be most efficient.
+
+Using fastannotate together with linkrevcache would speed up building the
+annotate cache greatly. Run "debugbuildlinkrevcache" before
+"debugbuildannotatecache".
+
+::
+
+    [fastannotate]
+    # specify the main branch head. the internal linelog will only contain
+    # the linear (ignoring p2) "mainbranch". since linelog cannot move
+    # backwards without a rebuild, this should be something that always moves
+    # forward, usually it is "master" or "@".
+    mainbranch = master
+
+    # fastannotate supports different modes to expose its feature.
+    # a list of combination:
+    # - fastannotate: expose the feature via the "fastannotate" command which
+    #   deals with everything in a most efficient way, and provides extra
+    #   features like --deleted etc.
+    # - fctx: replace fctx.annotate implementation. note:
+    #     a. it is less efficient than the "fastannotate" command
+    #     b. it will make it practically impossible to access the old (disk
+    #        side-effect free) annotate implementation
+    #     c. it implies "hgweb".
+    # - hgweb: replace hgweb's annotate implementation. conflict with "fctx".
+    # (default: fastannotate)
+    modes = fastannotate
+
+    # default format when no format flags are used (default: number)
+    defaultformat = changeset, user, date
+
+    # serve the annotate cache via wire protocol (default: False)
+    # tip: the .hg/fastannotate directory is portable - can be rsynced
+    server = True
+
+    # build annotate cache on demand for every client request (default: True)
+    # disabling it could make server response faster, useful when there is a
+    # cronjob building the cache.
+    serverbuildondemand = True
+
+    # update local annotate cache from remote on demand
+    client = False
+
+    # path to use when connecting to the remote server (default: default)
+    remotepath = default
+
+    # minimal length of the history of a file required to fetch linelog from
+    # the server. (default: 10)
+    clientfetchthreshold = 10
+
+    # use flock instead of the file existence lock
+    # flock may not work well on some network filesystems, but they avoid
+    # creating and deleting files frequently, which is faster when updating
+    # the annotate cache in batch. if you have issues with this option, set it
+    # to False. (default: True if flock is supported, False otherwise)
+    useflock = True
+
+    # for "fctx" mode, always follow renames regardless of command line option.
+    # this is a BC with the original command but will reduced the space needed
+    # for annotate cache, and is useful for client-server setup since the
+    # server will only provide annotate cache with default options (i.e. with
+    # follow). do not affect "fastannotate" mode. (default: True)
+    forcefollow = True
+
+    # for "fctx" mode, always treat file as text files, to skip the "isbinary"
+    # check. this is consistent with the "fastannotate" command and could help
+    # to avoid a file fetch if remotefilelog is used. (default: True)
+    forcetext = True
+
+    # use unfiltered repo for better performance.
+    unfilteredrepo = True
+
+    # sacrifice correctness in some corner cases for performance. it does not
+    # affect the correctness of the annotate cache being built. the option
+    # is experimental and may disappear in the future (default: False)
+    perfhack = True
+"""
+
+# TODO from import:
+# * `branch` is probably the wrong term, throughout the code.
+#
+# * replace the fastannotate `modes` configuration with a collection
+#   of booleans.
+#
+# * Use the templater instead of bespoke formatting
+#
+# * rename the config knob for updating the local cache from a remote server
+#
+# * move `flock` based locking to a common area
+#
+# * revise wireprotocol for sharing annotate files
+#
+# * figure out a sensible default for `mainbranch` (with the caveat
+#   that we probably also want to figure out a better term than
+#   `branch`, see above)
+#
+# * format changes to the revmap file (maybe use length-encoding
+#   instead of null-terminated file paths at least?)
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+    configitems,
+    error as hgerror,
+    localrepo,
+    registrar,
+)
+
+from . import (
+    commands,
+    context,
+    protocol,
+)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+cmdtable = commands.cmdtable
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem('fastannotate', 'modes', default=['fastannotate'])
+configitem('fastannotate', 'server', default=False)
+configitem('fastannotate', 'useflock', default=configitems.dynamicdefault)
+configitem('fastannotate', 'client', default=False)
+configitem('fastannotate', 'unfilteredrepo', default=True)
+configitem('fastannotate', 'defaultformat', default=['number'])
+configitem('fastannotate', 'perfhack', default=False)
+configitem('fastannotate', 'mainbranch')
+configitem('fastannotate', 'forcetext', default=True)
+configitem('fastannotate', 'forcefollow', default=True)
+configitem('fastannotate', 'clientfetchthreshold', default=10)
+configitem('fastannotate', 'serverbuildondemand', default=True)
+configitem('fastannotate', 'remotepath', default='default')
+
+def _flockavailable():
+    try:
+        import fcntl
+        fcntl.flock
+    except (AttributeError, ImportError):
+        return False
+    else:
+        return True
+
+def uisetup(ui):
+    modes = set(ui.configlist('fastannotate', 'modes'))
+    if 'fctx' in modes:
+        modes.discard('hgweb')
+    for name in modes:
+        if name == 'fastannotate':
+            commands.registercommand()
+        elif name == 'hgweb':
+            from . import support
+            support.replacehgwebannotate()
+        elif name == 'fctx':
+            from . import support
+            support.replacefctxannotate()
+            commands.wrapdefault()
+        else:
+            raise hgerror.Abort(_('fastannotate: invalid mode: %s') % name)
+
+    if ui.configbool('fastannotate', 'server'):
+        protocol.serveruisetup(ui)
+
+    if ui.configbool('fastannotate', 'useflock', _flockavailable()):
+        context.pathhelper.lock = context.pathhelper._lockflock
+
+def extsetup(ui):
+    # fastannotate has its own locking, without depending on repo lock
+    # TODO: avoid mutating this unless the specific repo has it enabled
+    localrepo.localrepository._wlockfreeprefix.add('fastannotate/')
+
+def reposetup(ui, repo):
+    if ui.configbool('fastannotate', 'client'):
+        protocol.clientreposetup(ui, repo)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/commands.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,285 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# commands: fastannotate commands
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import os
+
+from mercurial.i18n import _
+from mercurial import (
+    commands,
+    encoding,
+    error,
+    extensions,
+    patch,
+    pycompat,
+    registrar,
+    scmutil,
+    util,
+)
+
+from . import (
+    context as facontext,
+    error as faerror,
+    formatter as faformatter,
+)
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+def _matchpaths(repo, rev, pats, opts, aopts=facontext.defaultopts):
+    """generate paths matching given patterns"""
+    perfhack = repo.ui.configbool('fastannotate', 'perfhack')
+
+    # disable perfhack if:
+    # a) any walkopt is used
+    # b) if we treat pats as plain file names, some of them do not have
+    #    corresponding linelog files
+    if perfhack:
+        # cwd related to reporoot
+        reporoot = os.path.dirname(repo.path)
+        reldir = os.path.relpath(encoding.getcwd(), reporoot)
+        if reldir == '.':
+            reldir = ''
+        if any(opts.get(o[1]) for o in commands.walkopts): # a)
+            perfhack = False
+        else: # b)
+            relpats = [os.path.relpath(p, reporoot) if os.path.isabs(p) else p
+                       for p in pats]
+            # disable perfhack on '..' since it allows escaping from the repo
+            if any(('..' in f or
+                    not os.path.isfile(
+                        facontext.pathhelper(repo, f, aopts).linelogpath))
+                   for f in relpats):
+                perfhack = False
+
+    # perfhack: emit paths directory without checking with manifest
+    # this can be incorrect if the rev dos not have file.
+    if perfhack:
+        for p in relpats:
+            yield os.path.join(reldir, p)
+    else:
+        def bad(x, y):
+            raise error.Abort("%s: %s" % (x, y))
+        ctx = scmutil.revsingle(repo, rev)
+        m = scmutil.match(ctx, pats, opts, badfn=bad)
+        for p in ctx.walk(m):
+            yield p
+
+fastannotatecommandargs = {
+    r'options': [
+        ('r', 'rev', '.', _('annotate the specified revision'), _('REV')),
+        ('u', 'user', None, _('list the author (long with -v)')),
+        ('f', 'file', None, _('list the filename')),
+        ('d', 'date', None, _('list the date (short with -q)')),
+        ('n', 'number', None, _('list the revision number (default)')),
+        ('c', 'changeset', None, _('list the changeset')),
+        ('l', 'line-number', None, _('show line number at the first '
+                                     'appearance')),
+        ('e', 'deleted', None, _('show deleted lines (slow) (EXPERIMENTAL)')),
+        ('', 'no-content', None, _('do not show file content (EXPERIMENTAL)')),
+        ('', 'no-follow', None, _("don't follow copies and renames")),
+        ('', 'linear', None, _('enforce linear history, ignore second parent '
+                               'of merges (EXPERIMENTAL)')),
+        ('', 'long-hash', None, _('show long changeset hash (EXPERIMENTAL)')),
+        ('', 'rebuild', None, _('rebuild cache even if it exists '
+                                '(EXPERIMENTAL)')),
+    ] + commands.diffwsopts + commands.walkopts + commands.formatteropts,
+    r'synopsis': _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
+    r'inferrepo': True,
+}
+
+def fastannotate(ui, repo, *pats, **opts):
+    """show changeset information by line for each file
+
+    List changes in files, showing the revision id responsible for each line.
+
+    This command is useful for discovering when a change was made and by whom.
+
+    By default this command prints revision numbers. If you include --file,
+    --user, or --date, the revision number is suppressed unless you also
+    include --number. The default format can also be customized by setting
+    fastannotate.defaultformat.
+
+    Returns 0 on success.
+
+    .. container:: verbose
+
+        This command uses an implementation different from the vanilla annotate
+        command, which may produce slightly different (while still reasonable)
+        outputs for some cases.
+
+        Unlike the vanilla anootate, fastannotate follows rename regardless of
+        the existence of --file.
+
+        For the best performance when running on a full repo, use -c, -l,
+        avoid -u, -d, -n. Use --linear and --no-content to make it even faster.
+
+        For the best performance when running on a shallow (remotefilelog)
+        repo, avoid --linear, --no-follow, or any diff options. As the server
+        won't be able to populate annotate cache when non-default options
+        affecting results are used.
+    """
+    if not pats:
+        raise error.Abort(_('at least one filename or pattern is required'))
+
+    # performance hack: filtered repo can be slow. unfilter by default.
+    if ui.configbool('fastannotate', 'unfilteredrepo'):
+        repo = repo.unfiltered()
+
+    opts = pycompat.byteskwargs(opts)
+
+    rev = opts.get('rev', '.')
+    rebuild = opts.get('rebuild', False)
+
+    diffopts = patch.difffeatureopts(ui, opts, section='annotate',
+                                     whitespace=True)
+    aopts = facontext.annotateopts(
+        diffopts=diffopts,
+        followmerge=not opts.get('linear', False),
+        followrename=not opts.get('no_follow', False))
+
+    if not any(opts.get(s)
+               for s in ['user', 'date', 'file', 'number', 'changeset']):
+        # default 'number' for compatibility. but fastannotate is more
+        # efficient with "changeset", "line-number" and "no-content".
+        for name in ui.configlist('fastannotate', 'defaultformat', ['number']):
+            opts[name] = True
+
+    ui.pager('fastannotate')
+    template = opts.get('template')
+    if template == 'json':
+        formatter = faformatter.jsonformatter(ui, repo, opts)
+    else:
+        formatter = faformatter.defaultformatter(ui, repo, opts)
+    showdeleted = opts.get('deleted', False)
+    showlines = not bool(opts.get('no_content'))
+    showpath = opts.get('file', False)
+
+    # find the head of the main (master) branch
+    master = ui.config('fastannotate', 'mainbranch') or rev
+
+    # paths will be used for prefetching and the real annotating
+    paths = list(_matchpaths(repo, rev, pats, opts, aopts))
+
+    # for client, prefetch from the server
+    if util.safehasattr(repo, 'prefetchfastannotate'):
+        repo.prefetchfastannotate(paths)
+
+    for path in paths:
+        result = lines = existinglines = None
+        while True:
+            try:
+                with facontext.annotatecontext(repo, path, aopts, rebuild) as a:
+                    result = a.annotate(rev, master=master, showpath=showpath,
+                                        showlines=(showlines and
+                                                   not showdeleted))
+                    if showdeleted:
+                        existinglines = set((l[0], l[1]) for l in result)
+                        result = a.annotatealllines(
+                            rev, showpath=showpath, showlines=showlines)
+                break
+            except (faerror.CannotReuseError, faerror.CorruptedFileError):
+                # happens if master moves backwards, or the file was deleted
+                # and readded, or renamed to an existing name, or corrupted.
+                if rebuild: # give up since we have tried rebuild already
+                    raise
+                else: # try a second time rebuilding the cache (slow)
+                    rebuild = True
+                    continue
+
+        if showlines:
+            result, lines = result
+
+        formatter.write(result, lines, existinglines=existinglines)
+    formatter.end()
+
+_newopts = set([])
+_knownopts = set([opt[1].replace('-', '_') for opt in
+                  (fastannotatecommandargs[r'options'] + commands.globalopts)])
+
+def _annotatewrapper(orig, ui, repo, *pats, **opts):
+    """used by wrapdefault"""
+    # we need this hack until the obsstore has 0.0 seconds perf impact
+    if ui.configbool('fastannotate', 'unfilteredrepo'):
+        repo = repo.unfiltered()
+
+    # treat the file as text (skip the isbinary check)
+    if ui.configbool('fastannotate', 'forcetext'):
+        opts[r'text'] = True
+
+    # check if we need to do prefetch (client-side)
+    rev = opts.get(r'rev')
+    if util.safehasattr(repo, 'prefetchfastannotate') and rev is not None:
+        paths = list(_matchpaths(repo, rev, pats, pycompat.byteskwargs(opts)))
+        repo.prefetchfastannotate(paths)
+
+    return orig(ui, repo, *pats, **opts)
+
+def registercommand():
+    """register the fastannotate command"""
+    name = 'fastannotate|fastblame|fa'
+    command(name, helpbasic=True, **fastannotatecommandargs)(fastannotate)
+
+def wrapdefault():
+    """wrap the default annotate command, to be aware of the protocol"""
+    extensions.wrapcommand(commands.table, 'annotate', _annotatewrapper)
+
+@command('debugbuildannotatecache',
+         [('r', 'rev', '', _('build up to the specific revision'), _('REV'))
+         ] + commands.walkopts,
+         _('[-r REV] FILE...'))
+def debugbuildannotatecache(ui, repo, *pats, **opts):
+    """incrementally build fastannotate cache up to REV for specified files
+
+    If REV is not specified, use the config 'fastannotate.mainbranch'.
+
+    If fastannotate.client is True, download the annotate cache from the
+    server. Otherwise, build the annotate cache locally.
+
+    The annotate cache will be built using the default diff and follow
+    options and lives in '.hg/fastannotate/default'.
+    """
+    opts = pycompat.byteskwargs(opts)
+    rev = opts.get('REV') or ui.config('fastannotate', 'mainbranch')
+    if not rev:
+        raise error.Abort(_('you need to provide a revision'),
+                          hint=_('set fastannotate.mainbranch or use --rev'))
+    if ui.configbool('fastannotate', 'unfilteredrepo'):
+        repo = repo.unfiltered()
+    ctx = scmutil.revsingle(repo, rev)
+    m = scmutil.match(ctx, pats, opts)
+    paths = list(ctx.walk(m))
+    if util.safehasattr(repo, 'prefetchfastannotate'):
+        # client
+        if opts.get('REV'):
+            raise error.Abort(_('--rev cannot be used for client'))
+        repo.prefetchfastannotate(paths)
+    else:
+        # server, or full repo
+        for i, path in enumerate(paths):
+            ui.progress(_('building'), i, total=len(paths))
+            with facontext.annotatecontext(repo, path) as actx:
+                try:
+                    if actx.isuptodate(rev):
+                        continue
+                    actx.annotate(rev, rev)
+                except (faerror.CannotReuseError, faerror.CorruptedFileError):
+                    # the cache is broken (could happen with renaming so the
+                    # file history gets invalidated). rebuild and try again.
+                    ui.debug('fastannotate: %s: rebuilding broken cache\n'
+                             % path)
+                    actx.rebuild()
+                    try:
+                        actx.annotate(rev, rev)
+                    except Exception as ex:
+                        # possibly a bug, but should not stop us from building
+                        # cache for other files.
+                        ui.warn(_('fastannotate: %s: failed to '
+                                  'build cache: %r\n') % (path, ex))
+        # clear the progress bar
+        ui.write()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/context.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,828 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# context: context needed to annotate a file
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import collections
+import contextlib
+import hashlib
+import os
+
+from mercurial.i18n import _
+from mercurial import (
+    error,
+    linelog as linelogmod,
+    lock as lockmod,
+    mdiff,
+    node,
+    pycompat,
+    scmutil,
+    util,
+)
+from mercurial.utils import (
+    stringutil,
+)
+
+from . import (
+    error as faerror,
+    revmap as revmapmod,
+)
+
+# given path, get filelog, cached
+@util.lrucachefunc
+def _getflog(repo, path):
+    return repo.file(path)
+
+# extracted from mercurial.context.basefilectx.annotate
+def _parents(f, follow=True):
+    # Cut _descendantrev here to mitigate the penalty of lazy linkrev
+    # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
+    # from the topmost introrev (= srcrev) down to p.linkrev() if it
+    # isn't an ancestor of the srcrev.
+    f._changeid
+    pl = f.parents()
+
+    # Don't return renamed parents if we aren't following.
+    if not follow:
+        pl = [p for p in pl if p.path() == f.path()]
+
+    # renamed filectx won't have a filelog yet, so set it
+    # from the cache to save time
+    for p in pl:
+        if not '_filelog' in p.__dict__:
+            p._filelog = _getflog(f._repo, p.path())
+
+    return pl
+
+# extracted from mercurial.context.basefilectx.annotate. slightly modified
+# so it takes a fctx instead of a pair of text and fctx.
+def _decorate(fctx):
+    text = fctx.data()
+    linecount = text.count('\n')
+    if text and not text.endswith('\n'):
+        linecount += 1
+    return ([(fctx, i) for i in pycompat.xrange(linecount)], text)
+
+# extracted from mercurial.context.basefilectx.annotate. slightly modified
+# so it takes an extra "blocks" parameter calculated elsewhere, instead of
+# calculating diff here.
+def _pair(parent, child, blocks):
+    for (a1, a2, b1, b2), t in blocks:
+        # Changed blocks ('!') or blocks made only of blank lines ('~')
+        # belong to the child.
+        if t == '=':
+            child[0][b1:b2] = parent[0][a1:a2]
+    return child
+
+# like scmutil.revsingle, but with lru cache, so their states (like manifests)
+# could be reused
+_revsingle = util.lrucachefunc(scmutil.revsingle)
+
+def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
+    """(repo, str, str) -> fctx
+
+    get the filectx object from repo, rev, path, in an efficient way.
+
+    if resolverev is True, "rev" is a revision specified by the revset
+    language, otherwise "rev" is a nodeid, or a revision number that can
+    be consumed by repo.__getitem__.
+
+    if adjustctx is not None, the returned fctx will point to a changeset
+    that introduces the change (last modified the file). if adjustctx
+    is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
+    faster for big repos but is incorrect for some cases.
+    """
+    if resolverev and not isinstance(rev, int) and rev is not None:
+        ctx = _revsingle(repo, rev)
+    else:
+        ctx = repo[rev]
+
+    # If we don't need to adjust the linkrev, create the filectx using the
+    # changectx instead of using ctx[path]. This means it already has the
+    # changectx information, so blame -u will be able to look directly at the
+    # commitctx object instead of having to resolve it by going through the
+    # manifest. In a lazy-manifest world this can prevent us from downloading a
+    # lot of data.
+    if adjustctx is None:
+        # ctx.rev() is None means it's the working copy, which is a special
+        # case.
+        if ctx.rev() is None:
+            fctx = ctx[path]
+        else:
+            fctx = repo.filectx(path, changeid=ctx.rev())
+    else:
+        fctx = ctx[path]
+        if adjustctx == 'linkrev':
+            introrev = fctx.linkrev()
+        else:
+            introrev = fctx.introrev()
+        if introrev != ctx.rev():
+            fctx._changeid = introrev
+            fctx._changectx = repo[introrev]
+    return fctx
+
+# like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
+def encodedir(path):
+    return (path
+            .replace('.hg/', '.hg.hg/')
+            .replace('.l/', '.l.hg/')
+            .replace('.m/', '.m.hg/')
+            .replace('.lock/', '.lock.hg/'))
+
+def hashdiffopts(diffopts):
+    diffoptstr = stringutil.pprint(sorted(
+        (k, getattr(diffopts, k))
+        for k in mdiff.diffopts.defaults
+    ))
+    return hashlib.sha1(diffoptstr).hexdigest()[:6]
+
+_defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
+
+class annotateopts(object):
+    """like mercurial.mdiff.diffopts, but is for annotate
+
+    followrename: follow renames, like "hg annotate -f"
+    followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
+    """
+
+    defaults = {
+        'diffopts': None,
+        'followrename': True,
+        'followmerge': True,
+    }
+
+    def __init__(self, **opts):
+        for k, v in self.defaults.iteritems():
+            setattr(self, k, opts.get(k, v))
+
+    @util.propertycache
+    def shortstr(self):
+        """represent opts in a short string, suitable for a directory name"""
+        result = ''
+        if not self.followrename:
+            result += 'r0'
+        if not self.followmerge:
+            result += 'm0'
+        if self.diffopts is not None:
+            assert isinstance(self.diffopts, mdiff.diffopts)
+            diffopthash = hashdiffopts(self.diffopts)
+            if diffopthash != _defaultdiffopthash:
+                result += 'i' + diffopthash
+        return result or 'default'
+
+defaultopts = annotateopts()
+
+class _annotatecontext(object):
+    """do not use this class directly as it does not use lock to protect
+    writes. use "with annotatecontext(...)" instead.
+    """
+
+    def __init__(self, repo, path, linelogpath, revmappath, opts):
+        self.repo = repo
+        self.ui = repo.ui
+        self.path = path
+        self.opts = opts
+        self.linelogpath = linelogpath
+        self.revmappath = revmappath
+        self._linelog = None
+        self._revmap = None
+        self._node2path = {} # {str: str}
+
+    @property
+    def linelog(self):
+        if self._linelog is None:
+            if os.path.exists(self.linelogpath):
+                with open(self.linelogpath, 'rb') as f:
+                    try:
+                        self._linelog = linelogmod.linelog.fromdata(f.read())
+                    except linelogmod.LineLogError:
+                        self._linelog = linelogmod.linelog()
+            else:
+                self._linelog = linelogmod.linelog()
+        return self._linelog
+
+    @property
+    def revmap(self):
+        if self._revmap is None:
+            self._revmap = revmapmod.revmap(self.revmappath)
+        return self._revmap
+
+    def close(self):
+        if self._revmap is not None:
+            self._revmap.flush()
+            self._revmap = None
+        if self._linelog is not None:
+            with open(self.linelogpath, 'wb') as f:
+                f.write(self._linelog.encode())
+            self._linelog = None
+
+    __del__ = close
+
+    def rebuild(self):
+        """delete linelog and revmap, useful for rebuilding"""
+        self.close()
+        self._node2path.clear()
+        _unlinkpaths([self.revmappath, self.linelogpath])
+
+    @property
+    def lastnode(self):
+        """return last node in revmap, or None if revmap is empty"""
+        if self._revmap is None:
+            # fast path, read revmap without loading its full content
+            return revmapmod.getlastnode(self.revmappath)
+        else:
+            return self._revmap.rev2hsh(self._revmap.maxrev)
+
+    def isuptodate(self, master, strict=True):
+        """return True if the revmap / linelog is up-to-date, or the file
+        does not exist in the master revision. False otherwise.
+
+        it tries to be fast and could return false negatives, because of the
+        use of linkrev instead of introrev.
+
+        useful for both server and client to decide whether to update
+        fastannotate cache or not.
+
+        if strict is True, even if fctx exists in the revmap, but is not the
+        last node, isuptodate will return False. it's good for performance - no
+        expensive check was done.
+
+        if strict is False, if fctx exists in the revmap, this function may
+        return True. this is useful for the client to skip downloading the
+        cache if the client's master is behind the server's.
+        """
+        lastnode = self.lastnode
+        try:
+            f = self._resolvefctx(master, resolverev=True)
+            # choose linkrev instead of introrev as the check is meant to be
+            # *fast*.
+            linknode = self.repo.changelog.node(f.linkrev())
+            if not strict and lastnode and linknode != lastnode:
+                # check if f.node() is in the revmap. note: this loads the
+                # revmap and can be slow.
+                return self.revmap.hsh2rev(linknode) is not None
+            # avoid resolving old manifest, or slow adjustlinkrev to be fast,
+            # false negatives are acceptable in this case.
+            return linknode == lastnode
+        except LookupError:
+            # master does not have the file, or the revmap is ahead
+            return True
+
+    def annotate(self, rev, master=None, showpath=False, showlines=False):
+        """incrementally update the cache so it includes revisions in the main
+        branch till 'master'. and run annotate on 'rev', which may or may not be
+        included in the main branch.
+
+        if master is None, do not update linelog.
+
+        the first value returned is the annotate result, it is [(node, linenum)]
+        by default. [(node, linenum, path)] if showpath is True.
+
+        if showlines is True, a second value will be returned, it is a list of
+        corresponding line contents.
+        """
+
+        # the fast path test requires commit hash, convert rev number to hash,
+        # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
+        # command could give us a revision number even if the user passes a
+        # commit hash.
+        if isinstance(rev, int):
+            rev = node.hex(self.repo.changelog.node(rev))
+
+        # fast path: if rev is in the main branch already
+        directly, revfctx = self.canannotatedirectly(rev)
+        if directly:
+            if self.ui.debugflag:
+                self.ui.debug('fastannotate: %s: using fast path '
+                              '(resolved fctx: %s)\n'
+                              % (self.path,
+                                 stringutil.pprint(util.safehasattr(revfctx,
+                                                                    'node'))))
+            return self.annotatedirectly(revfctx, showpath, showlines)
+
+        # resolve master
+        masterfctx = None
+        if master:
+            try:
+                masterfctx = self._resolvefctx(master, resolverev=True,
+                                               adjustctx=True)
+            except LookupError: # master does not have the file
+                pass
+            else:
+                if masterfctx in self.revmap: # no need to update linelog
+                    masterfctx = None
+
+        #                  ... - @ <- rev (can be an arbitrary changeset,
+        #                 /                not necessarily a descendant
+        #      master -> o                 of master)
+        #                |
+        #     a merge -> o         'o': new changesets in the main branch
+        #                |\        '#': revisions in the main branch that
+        #                o *            exist in linelog / revmap
+        #                | .       '*': changesets in side branches, or
+        # last master -> # .            descendants of master
+        #                | .
+        #                # *       joint: '#', and is a parent of a '*'
+        #                |/
+        #     a joint -> # ^^^^ --- side branches
+        #                |
+        #                ^ --- main branch (in linelog)
+
+        # these DFSes are similar to the traditional annotate algorithm.
+        # we cannot really reuse the code for perf reason.
+
+        # 1st DFS calculates merges, joint points, and needed.
+        # "needed" is a simple reference counting dict to free items in
+        # "hist", reducing its memory usage otherwise could be huge.
+        initvisit = [revfctx]
+        if masterfctx:
+            if masterfctx.rev() is None:
+                raise error.Abort(_('cannot update linelog to wdir()'),
+                                  hint=_('set fastannotate.mainbranch'))
+            initvisit.append(masterfctx)
+        visit = initvisit[:]
+        pcache = {}
+        needed = {revfctx: 1}
+        hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
+        while visit:
+            f = visit.pop()
+            if f in pcache or f in hist:
+                continue
+            if f in self.revmap: # in the old main branch, it's a joint
+                llrev = self.revmap.hsh2rev(f.node())
+                self.linelog.annotate(llrev)
+                result = self.linelog.annotateresult
+                hist[f] = (result, f.data())
+                continue
+            pl = self._parentfunc(f)
+            pcache[f] = pl
+            for p in pl:
+                needed[p] = needed.get(p, 0) + 1
+                if p not in pcache:
+                    visit.append(p)
+
+        # 2nd (simple) DFS calculates new changesets in the main branch
+        # ('o' nodes in # the above graph), so we know when to update linelog.
+        newmainbranch = set()
+        f = masterfctx
+        while f and f not in self.revmap:
+            newmainbranch.add(f)
+            pl = pcache[f]
+            if pl:
+                f = pl[0]
+            else:
+                f = None
+                break
+
+        # f, if present, is the position where the last build stopped at, and
+        # should be the "master" last time. check to see if we can continue
+        # building the linelog incrementally. (we cannot if diverged)
+        if masterfctx is not None:
+            self._checklastmasterhead(f)
+
+        if self.ui.debugflag:
+            if newmainbranch:
+                self.ui.debug('fastannotate: %s: %d new changesets in the main'
+                              ' branch\n' % (self.path, len(newmainbranch)))
+            elif not hist: # no joints, no updates
+                self.ui.debug('fastannotate: %s: linelog cannot help in '
+                              'annotating this revision\n' % self.path)
+
+        # prepare annotateresult so we can update linelog incrementally
+        self.linelog.annotate(self.linelog.maxrev)
+
+        # 3rd DFS does the actual annotate
+        visit = initvisit[:]
+        progress = 0
+        while visit:
+            f = visit[-1]
+            if f in hist:
+                visit.pop()
+                continue
+
+            ready = True
+            pl = pcache[f]
+            for p in pl:
+                if p not in hist:
+                    ready = False
+                    visit.append(p)
+            if not ready:
+                continue
+
+            visit.pop()
+            blocks = None # mdiff blocks, used for appending linelog
+            ismainbranch = (f in newmainbranch)
+            # curr is the same as the traditional annotate algorithm,
+            # if we only care about linear history (do not follow merge),
+            # then curr is not actually used.
+            assert f not in hist
+            curr = _decorate(f)
+            for i, p in enumerate(pl):
+                bs = list(self._diffblocks(hist[p][1], curr[1]))
+                if i == 0 and ismainbranch:
+                    blocks = bs
+                curr = _pair(hist[p], curr, bs)
+                if needed[p] == 1:
+                    del hist[p]
+                    del needed[p]
+                else:
+                    needed[p] -= 1
+
+            hist[f] = curr
+            del pcache[f]
+
+            if ismainbranch: # need to write to linelog
+                if not self.ui.quiet:
+                    progress += 1
+                    self.ui.progress(_('building cache'), progress,
+                                     total=len(newmainbranch))
+                bannotated = None
+                if len(pl) == 2 and self.opts.followmerge: # merge
+                    bannotated = curr[0]
+                if blocks is None: # no parents, add an empty one
+                    blocks = list(self._diffblocks('', curr[1]))
+                self._appendrev(f, blocks, bannotated)
+            elif showpath: # not append linelog, but we need to record path
+                self._node2path[f.node()] = f.path()
+
+        if progress: # clean progress bar
+            self.ui.write()
+
+        result = [
+            ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
+            for fr, l in hist[revfctx][0]] # [(node, linenumber)]
+        return self._refineannotateresult(result, revfctx, showpath, showlines)
+
+    def canannotatedirectly(self, rev):
+        """(str) -> bool, fctx or node.
+        return (True, f) if we can annotate without updating the linelog, pass
+        f to annotatedirectly.
+        return (False, f) if we need extra calculation. f is the fctx resolved
+        from rev.
+        """
+        result = True
+        f = None
+        if not isinstance(rev, int) and rev is not None:
+            hsh = {20: bytes, 40: node.bin}.get(len(rev), lambda x: None)(rev)
+            if hsh is not None and (hsh, self.path) in self.revmap:
+                f = hsh
+        if f is None:
+            adjustctx = 'linkrev' if self._perfhack else True
+            f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
+            result = f in self.revmap
+            if not result and self._perfhack:
+                # redo the resolution without perfhack - as we are going to
+                # do write operations, we need a correct fctx.
+                f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
+        return result, f
+
+    def annotatealllines(self, rev, showpath=False, showlines=False):
+        """(rev : str) -> [(node : str, linenum : int, path : str)]
+
+        the result has the same format with annotate, but include all (including
+        deleted) lines up to rev. call this after calling annotate(rev, ...) for
+        better performance and accuracy.
+        """
+        revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
+
+        # find a chain from rev to anything in the mainbranch
+        if revfctx not in self.revmap:
+            chain = [revfctx]
+            a = ''
+            while True:
+                f = chain[-1]
+                pl = self._parentfunc(f)
+                if not pl:
+                    break
+                if pl[0] in self.revmap:
+                    a = pl[0].data()
+                    break
+                chain.append(pl[0])
+
+            # both self.linelog and self.revmap is backed by filesystem. now
+            # we want to modify them but do not want to write changes back to
+            # files. so we create in-memory objects and copy them. it's like
+            # a "fork".
+            linelog = linelogmod.linelog()
+            linelog.copyfrom(self.linelog)
+            linelog.annotate(linelog.maxrev)
+            revmap = revmapmod.revmap()
+            revmap.copyfrom(self.revmap)
+
+            for f in reversed(chain):
+                b = f.data()
+                blocks = list(self._diffblocks(a, b))
+                self._doappendrev(linelog, revmap, f, blocks)
+                a = b
+        else:
+            # fastpath: use existing linelog, revmap as we don't write to them
+            linelog = self.linelog
+            revmap = self.revmap
+
+        lines = linelog.getalllines()
+        hsh = revfctx.node()
+        llrev = revmap.hsh2rev(hsh)
+        result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
+        # cannot use _refineannotateresult since we need custom logic for
+        # resolving line contents
+        if showpath:
+            result = self._addpathtoresult(result, revmap)
+        if showlines:
+            linecontents = self._resolvelines(result, revmap, linelog)
+            result = (result, linecontents)
+        return result
+
+    def _resolvelines(self, annotateresult, revmap, linelog):
+        """(annotateresult) -> [line]. designed for annotatealllines.
+        this is probably the most inefficient code in the whole fastannotate
+        directory. but we have made a decision that the linelog does not
+        store line contents. so getting them requires random accesses to
+        the revlog data, since they can be many, it can be very slow.
+        """
+        # [llrev]
+        revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
+        result = [None] * len(annotateresult)
+        # {(rev, linenum): [lineindex]}
+        key2idxs = collections.defaultdict(list)
+        for i in pycompat.xrange(len(result)):
+            key2idxs[(revs[i], annotateresult[i][1])].append(i)
+        while key2idxs:
+            # find an unresolved line and its linelog rev to annotate
+            hsh = None
+            try:
+                for (rev, _linenum), idxs in key2idxs.iteritems():
+                    if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
+                        continue
+                    hsh = annotateresult[idxs[0]][0]
+                    break
+            except StopIteration: # no more unresolved lines
+                return result
+            if hsh is None:
+                # the remaining key2idxs are not in main branch, resolving them
+                # using the hard way...
+                revlines = {}
+                for (rev, linenum), idxs in key2idxs.iteritems():
+                    if rev not in revlines:
+                        hsh = annotateresult[idxs[0]][0]
+                        if self.ui.debugflag:
+                            self.ui.debug('fastannotate: reading %s line #%d '
+                                          'to resolve lines %r\n'
+                                          % (node.short(hsh), linenum, idxs))
+                        fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
+                        lines = mdiff.splitnewlines(fctx.data())
+                        revlines[rev] = lines
+                    for idx in idxs:
+                        result[idx] = revlines[rev][linenum]
+                assert all(x is not None for x in result)
+                return result
+
+            # run the annotate and the lines should match to the file content
+            self.ui.debug('fastannotate: annotate %s to resolve lines\n'
+                          % node.short(hsh))
+            linelog.annotate(rev)
+            fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
+            annotated = linelog.annotateresult
+            lines = mdiff.splitnewlines(fctx.data())
+            if len(lines) != len(annotated):
+                raise faerror.CorruptedFileError('unexpected annotated lines')
+            # resolve lines from the annotate result
+            for i, line in enumerate(lines):
+                k = annotated[i]
+                if k in key2idxs:
+                    for idx in key2idxs[k]:
+                        result[idx] = line
+                    del key2idxs[k]
+        return result
+
+    def annotatedirectly(self, f, showpath, showlines):
+        """like annotate, but when we know that f is in linelog.
+        f can be either a 20-char str (node) or a fctx. this is for perf - in
+        the best case, the user provides a node and we don't need to read the
+        filelog or construct any filecontext.
+        """
+        if isinstance(f, str):
+            hsh = f
+        else:
+            hsh = f.node()
+        llrev = self.revmap.hsh2rev(hsh)
+        if not llrev:
+            raise faerror.CorruptedFileError('%s is not in revmap'
+                                             % node.hex(hsh))
+        if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
+            raise faerror.CorruptedFileError('%s is not in revmap mainbranch'
+                                             % node.hex(hsh))
+        self.linelog.annotate(llrev)
+        result = [(self.revmap.rev2hsh(r), l)
+                  for r, l in self.linelog.annotateresult]
+        return self._refineannotateresult(result, f, showpath, showlines)
+
+    def _refineannotateresult(self, result, f, showpath, showlines):
+        """add the missing path or line contents, they can be expensive.
+        f could be either node or fctx.
+        """
+        if showpath:
+            result = self._addpathtoresult(result)
+        if showlines:
+            if isinstance(f, str): # f: node or fctx
+                llrev = self.revmap.hsh2rev(f)
+                fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
+            else:
+                fctx = f
+            lines = mdiff.splitnewlines(fctx.data())
+            if len(lines) != len(result): # linelog is probably corrupted
+                raise faerror.CorruptedFileError()
+            result = (result, lines)
+        return result
+
+    def _appendrev(self, fctx, blocks, bannotated=None):
+        self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
+
+    def _diffblocks(self, a, b):
+        return mdiff.allblocks(a, b, self.opts.diffopts)
+
+    @staticmethod
+    def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
+        """append a revision to linelog and revmap"""
+
+        def getllrev(f):
+            """(fctx) -> int"""
+            # f should not be a linelog revision
+            if isinstance(f, int):
+                raise error.ProgrammingError('f should not be an int')
+            # f is a fctx, allocate linelog rev on demand
+            hsh = f.node()
+            rev = revmap.hsh2rev(hsh)
+            if rev is None:
+                rev = revmap.append(hsh, sidebranch=True, path=f.path())
+            return rev
+
+        # append sidebranch revisions to revmap
+        siderevs = []
+        siderevmap = {} # node: int
+        if bannotated is not None:
+            for (a1, a2, b1, b2), op in blocks:
+                if op != '=':
+                    # f could be either linelong rev, or fctx.
+                    siderevs += [f for f, l in bannotated[b1:b2]
+                                 if not isinstance(f, int)]
+        siderevs = set(siderevs)
+        if fctx in siderevs: # mainnode must be appended seperately
+            siderevs.remove(fctx)
+        for f in siderevs:
+            siderevmap[f] = getllrev(f)
+
+        # the changeset in the main branch, could be a merge
+        llrev = revmap.append(fctx.node(), path=fctx.path())
+        siderevmap[fctx] = llrev
+
+        for (a1, a2, b1, b2), op in reversed(blocks):
+            if op == '=':
+                continue
+            if bannotated is None:
+                linelog.replacelines(llrev, a1, a2, b1, b2)
+            else:
+                blines = [((r if isinstance(r, int) else siderevmap[r]), l)
+                          for r, l in bannotated[b1:b2]]
+                linelog.replacelines_vec(llrev, a1, a2, blines)
+
+    def _addpathtoresult(self, annotateresult, revmap=None):
+        """(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
+        if revmap is None:
+            revmap = self.revmap
+
+        def _getpath(nodeid):
+            path = self._node2path.get(nodeid)
+            if path is None:
+                path = revmap.rev2path(revmap.hsh2rev(nodeid))
+                self._node2path[nodeid] = path
+            return path
+
+        return [(n, l, _getpath(n)) for n, l in annotateresult]
+
+    def _checklastmasterhead(self, fctx):
+        """check if fctx is the master's head last time, raise if not"""
+        if fctx is None:
+            llrev = 0
+        else:
+            llrev = self.revmap.hsh2rev(fctx.node())
+            if not llrev:
+                raise faerror.CannotReuseError()
+        if self.linelog.maxrev != llrev:
+            raise faerror.CannotReuseError()
+
+    @util.propertycache
+    def _parentfunc(self):
+        """-> (fctx) -> [fctx]"""
+        followrename = self.opts.followrename
+        followmerge = self.opts.followmerge
+        def parents(f):
+            pl = _parents(f, follow=followrename)
+            if not followmerge:
+                pl = pl[:1]
+            return pl
+        return parents
+
+    @util.propertycache
+    def _perfhack(self):
+        return self.ui.configbool('fastannotate', 'perfhack')
+
+    def _resolvefctx(self, rev, path=None, **kwds):
+        return resolvefctx(self.repo, rev, (path or self.path), **kwds)
+
+def _unlinkpaths(paths):
+    """silent, best-effort unlink"""
+    for path in paths:
+        try:
+            util.unlink(path)
+        except OSError:
+            pass
+
+class pathhelper(object):
+    """helper for getting paths for lockfile, linelog and revmap"""
+
+    def __init__(self, repo, path, opts=defaultopts):
+        # different options use different directories
+        self._vfspath = os.path.join('fastannotate',
+                                     opts.shortstr, encodedir(path))
+        self._repo = repo
+
+    @property
+    def dirname(self):
+        return os.path.dirname(self._repo.vfs.join(self._vfspath))
+
+    @property
+    def linelogpath(self):
+        return self._repo.vfs.join(self._vfspath + '.l')
+
+    def lock(self):
+        return lockmod.lock(self._repo.vfs, self._vfspath + '.lock')
+
+    @contextlib.contextmanager
+    def _lockflock(self):
+        """the same as 'lock' but use flock instead of lockmod.lock, to avoid
+        creating temporary symlinks."""
+        import fcntl
+        lockpath = self.linelogpath
+        util.makedirs(os.path.dirname(lockpath))
+        lockfd = os.open(lockpath, os.O_RDONLY | os.O_CREAT, 0o664)
+        fcntl.flock(lockfd, fcntl.LOCK_EX)
+        try:
+            yield
+        finally:
+            fcntl.flock(lockfd, fcntl.LOCK_UN)
+            os.close(lockfd)
+
+    @property
+    def revmappath(self):
+        return self._repo.vfs.join(self._vfspath + '.m')
+
+@contextlib.contextmanager
+def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
+    """context needed to perform (fast) annotate on a file
+
+    an annotatecontext of a single file consists of two structures: the
+    linelog and the revmap. this function takes care of locking. only 1
+    process is allowed to write that file's linelog and revmap at a time.
+
+    when something goes wrong, this function will assume the linelog and the
+    revmap are in a bad state, and remove them from disk.
+
+    use this function in the following way:
+
+        with annotatecontext(...) as actx:
+            actx. ....
+    """
+    helper = pathhelper(repo, path, opts)
+    util.makedirs(helper.dirname)
+    revmappath = helper.revmappath
+    linelogpath = helper.linelogpath
+    actx = None
+    try:
+        with helper.lock():
+            actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
+            if rebuild:
+                actx.rebuild()
+            yield actx
+    except Exception:
+        if actx is not None:
+            actx.rebuild()
+        repo.ui.debug('fastannotate: %s: cache broken and deleted\n' % path)
+        raise
+    finally:
+        if actx is not None:
+            actx.close()
+
+def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
+    """like annotatecontext but get the context from a fctx. convenient when
+    used in fctx.annotate
+    """
+    repo = fctx._repo
+    path = fctx._path
+    if repo.ui.configbool('fastannotate', 'forcefollow', True):
+        follow = True
+    aopts = annotateopts(diffopts=diffopts, followrename=follow)
+    return annotatecontext(repo, path, aopts, rebuild)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/error.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,13 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# error: errors used in fastannotate
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+class CorruptedFileError(Exception):
+    pass
+
+class CannotReuseError(Exception):
+    """cannot reuse or update the cache incrementally"""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/formatter.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,161 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# format: defines the format used to output annotate result
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+from mercurial import (
+    encoding,
+    node,
+    pycompat,
+    templatefilters,
+    util,
+)
+from mercurial.utils import (
+        dateutil,
+)
+
+# imitating mercurial.commands.annotate, not using the vanilla formatter since
+# the data structures are a bit different, and we have some fast paths.
+class defaultformatter(object):
+    """the default formatter that does leftpad and support some common flags"""
+
+    def __init__(self, ui, repo, opts):
+        self.ui = ui
+        self.opts = opts
+
+        if ui.quiet:
+            datefunc = dateutil.shortdate
+        else:
+            datefunc = dateutil.datestr
+        datefunc = util.cachefunc(datefunc)
+        getctx = util.cachefunc(lambda x: repo[x[0]])
+        hexfunc = self._hexfunc
+
+        # special handling working copy "changeset" and "rev" functions
+        if self.opts.get('rev') == 'wdir()':
+            orig = hexfunc
+            hexfunc = lambda x: None if x is None else orig(x)
+            wnode = hexfunc(repo[None].p1().node()) + '+'
+            wrev = str(repo[None].p1().rev())
+            wrevpad = ''
+            if not opts.get('changeset'): # only show + if changeset is hidden
+                wrev += '+'
+                wrevpad = ' '
+            revenc = lambda x: wrev if x is None else str(x) + wrevpad
+            csetenc = lambda x: wnode if x is None else str(x) + ' '
+        else:
+            revenc = csetenc = str
+
+        # opt name, separator, raw value (for json/plain), encoder (for plain)
+        opmap = [('user', ' ', lambda x: getctx(x).user(), ui.shortuser),
+                 ('number', ' ', lambda x: getctx(x).rev(), revenc),
+                 ('changeset', ' ', lambda x: hexfunc(x[0]), csetenc),
+                 ('date', ' ', lambda x: getctx(x).date(), datefunc),
+                 ('file', ' ', lambda x: x[2], str),
+                 ('line_number', ':', lambda x: x[1] + 1, str)]
+        fieldnamemap = {'number': 'rev', 'changeset': 'node'}
+        funcmap = [(get, sep, fieldnamemap.get(op, op), enc)
+                   for op, sep, get, enc in opmap
+                   if opts.get(op)]
+        # no separator for first column
+        funcmap[0] = list(funcmap[0])
+        funcmap[0][1] = ''
+        self.funcmap = funcmap
+
+    def write(self, annotatedresult, lines=None, existinglines=None):
+        """(annotateresult, [str], set([rev, linenum])) -> None. write output.
+        annotateresult can be [(node, linenum, path)], or [(node, linenum)]
+        """
+        pieces = [] # [[str]]
+        maxwidths = [] # [int]
+
+        # calculate padding
+        for f, sep, name, enc in self.funcmap:
+            l = [enc(f(x)) for x in annotatedresult]
+            pieces.append(l)
+            if name in ['node', 'date']: # node and date has fixed size
+                l = l[:1]
+            widths = pycompat.maplist(encoding.colwidth, set(l))
+            maxwidth = (max(widths) if widths else 0)
+            maxwidths.append(maxwidth)
+
+        # buffered output
+        result = ''
+        for i in pycompat.xrange(len(annotatedresult)):
+            for j, p in enumerate(pieces):
+                sep = self.funcmap[j][1]
+                padding = ' ' * (maxwidths[j] - len(p[i]))
+                result += sep + padding + p[i]
+            if lines:
+                if existinglines is None:
+                    result += ': ' + lines[i]
+                else: # extra formatting showing whether a line exists
+                    key = (annotatedresult[i][0], annotatedresult[i][1])
+                    if key in existinglines:
+                        result += ':  ' + lines[i]
+                    else:
+                        result += ': ' + self.ui.label('-' + lines[i],
+                                                       'diff.deleted')
+
+            if result[-1] != '\n':
+                result += '\n'
+
+        self.ui.write(result)
+
+    @util.propertycache
+    def _hexfunc(self):
+        if self.ui.debugflag or self.opts.get('long_hash'):
+            return node.hex
+        else:
+            return node.short
+
+    def end(self):
+        pass
+
+class jsonformatter(defaultformatter):
+    def __init__(self, ui, repo, opts):
+        super(jsonformatter, self).__init__(ui, repo, opts)
+        self.ui.write('[')
+        self.needcomma = False
+
+    def write(self, annotatedresult, lines=None, existinglines=None):
+        if annotatedresult:
+            self._writecomma()
+
+        pieces = [(name, map(f, annotatedresult))
+                  for f, sep, name, enc in self.funcmap]
+        if lines is not None:
+            pieces.append(('line', lines))
+        pieces.sort()
+
+        seps = [','] * len(pieces[:-1]) + ['']
+
+        result = ''
+        lasti = len(annotatedresult) - 1
+        for i in pycompat.xrange(len(annotatedresult)):
+            result += '\n {\n'
+            for j, p in enumerate(pieces):
+                k, vs = p
+                result += ('  "%s": %s%s\n'
+                           % (k, templatefilters.json(vs[i], paranoid=False),
+                              seps[j]))
+            result += ' }%s' % ('' if i == lasti else ',')
+        if lasti >= 0:
+            self.needcomma = True
+
+        self.ui.write(result)
+
+    def _writecomma(self):
+        if self.needcomma:
+            self.ui.write(',')
+            self.needcomma = False
+
+    @util.propertycache
+    def _hexfunc(self):
+        return node.hex
+
+    def end(self):
+        self.ui.write('\n]\n')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/protocol.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,228 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# protocol: logic for a server providing fastannotate support
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import contextlib
+import os
+
+from mercurial.i18n import _
+from mercurial import (
+    error,
+    extensions,
+    hg,
+    util,
+    wireprotov1peer,
+    wireprotov1server,
+)
+from . import context
+
+# common
+
+def _getmaster(ui):
+    """get the mainbranch, and enforce it is set"""
+    master = ui.config('fastannotate', 'mainbranch')
+    if not master:
+        raise error.Abort(_('fastannotate.mainbranch is required '
+                            'for both the client and the server'))
+    return master
+
+# server-side
+
+def _capabilities(orig, repo, proto):
+    result = orig(repo, proto)
+    result.append('getannotate')
+    return result
+
+def _getannotate(repo, proto, path, lastnode):
+    # output:
+    #   FILE := vfspath + '\0' + str(size) + '\0' + content
+    #   OUTPUT := '' | FILE + OUTPUT
+    result = ''
+    buildondemand = repo.ui.configbool('fastannotate', 'serverbuildondemand',
+                                       True)
+    with context.annotatecontext(repo, path) as actx:
+        if buildondemand:
+            # update before responding to the client
+            master = _getmaster(repo.ui)
+            try:
+                if not actx.isuptodate(master):
+                    actx.annotate(master, master)
+            except Exception:
+                # non-fast-forward move or corrupted. rebuild automically.
+                actx.rebuild()
+                try:
+                    actx.annotate(master, master)
+                except Exception:
+                    actx.rebuild() # delete files
+            finally:
+                # although the "with" context will also do a close/flush, we
+                # need to do it early so we can send the correct respond to
+                # client.
+                actx.close()
+        # send back the full content of revmap and linelog, in the future we
+        # may want to do some rsync-like fancy updating.
+        # the lastnode check is not necessary if the client and the server
+        # agree where the main branch is.
+        if actx.lastnode != lastnode:
+            for p in [actx.revmappath, actx.linelogpath]:
+                if not os.path.exists(p):
+                    continue
+                content = ''
+                with open(p, 'rb') as f:
+                    content = f.read()
+                vfsbaselen = len(repo.vfs.base + '/')
+                relpath = p[vfsbaselen:]
+                result += '%s\0%d\0%s' % (relpath, len(content), content)
+    return result
+
+def _registerwireprotocommand():
+    if 'getannotate' in wireprotov1server.commands:
+        return
+    wireprotov1server.wireprotocommand(
+        'getannotate', 'path lastnode')(_getannotate)
+
+def serveruisetup(ui):
+    _registerwireprotocommand()
+    extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
+
+# client-side
+
+def _parseresponse(payload):
+    result = {}
+    i = 0
+    l = len(payload) - 1
+    state = 0 # 0: vfspath, 1: size
+    vfspath = size = ''
+    while i < l:
+        ch = payload[i]
+        if ch == '\0':
+            if state == 1:
+                result[vfspath] = buffer(payload, i + 1, int(size))
+                i += int(size)
+                state = 0
+                vfspath = size = ''
+            elif state == 0:
+                state = 1
+        else:
+            if state == 1:
+                size += ch
+            elif state == 0:
+                vfspath += ch
+        i += 1
+    return result
+
+def peersetup(ui, peer):
+    class fastannotatepeer(peer.__class__):
+        @wireprotov1peer.batchable
+        def getannotate(self, path, lastnode=None):
+            if not self.capable('getannotate'):
+                ui.warn(_('remote peer cannot provide annotate cache\n'))
+                yield None, None
+            else:
+                args = {'path': path, 'lastnode': lastnode or ''}
+                f = wireprotov1peer.future()
+                yield args, f
+                yield _parseresponse(f.value)
+    peer.__class__ = fastannotatepeer
+
+@contextlib.contextmanager
+def annotatepeer(repo):
+    ui = repo.ui
+
+    remotepath = ui.expandpath(
+        ui.config('fastannotate', 'remotepath', 'default'))
+    peer = hg.peer(ui, {}, remotepath)
+
+    try:
+        yield peer
+    finally:
+        peer.close()
+
+def clientfetch(repo, paths, lastnodemap=None, peer=None):
+    """download annotate cache from the server for paths"""
+    if not paths:
+        return
+
+    if peer is None:
+        with annotatepeer(repo) as peer:
+            return clientfetch(repo, paths, lastnodemap, peer)
+
+    if lastnodemap is None:
+        lastnodemap = {}
+
+    ui = repo.ui
+    results = []
+    with peer.commandexecutor() as batcher:
+        ui.debug('fastannotate: requesting %d files\n' % len(paths))
+        for p in paths:
+            results.append(batcher.callcommand(
+                'getannotate',
+                {'path': p, 'lastnode':lastnodemap.get(p)}))
+
+        for result in results:
+            r = result.result()
+            # TODO: pconvert these paths on the server?
+            r = {util.pconvert(p): v for p, v in r.iteritems()}
+            for path in sorted(r):
+                # ignore malicious paths
+                if (not path.startswith('fastannotate/')
+                    or '/../' in (path + '/')):
+                    ui.debug('fastannotate: ignored malicious path %s\n' % path)
+                    continue
+                content = r[path]
+                if ui.debugflag:
+                    ui.debug('fastannotate: writing %d bytes to %s\n'
+                             % (len(content), path))
+                repo.vfs.makedirs(os.path.dirname(path))
+                with repo.vfs(path, 'wb') as f:
+                    f.write(content)
+
+def _filterfetchpaths(repo, paths):
+    """return a subset of paths whose history is long and need to fetch linelog
+    from the server. works with remotefilelog and non-remotefilelog repos.
+    """
+    threshold = repo.ui.configint('fastannotate', 'clientfetchthreshold', 10)
+    if threshold <= 0:
+        return paths
+
+    result = []
+    for path in paths:
+        try:
+            if len(repo.file(path)) >= threshold:
+                result.append(path)
+        except Exception: # file not found etc.
+            result.append(path)
+
+    return result
+
+def localreposetup(ui, repo):
+    class fastannotaterepo(repo.__class__):
+        def prefetchfastannotate(self, paths, peer=None):
+            master = _getmaster(self.ui)
+            needupdatepaths = []
+            lastnodemap = {}
+            try:
+                for path in _filterfetchpaths(self, paths):
+                    with context.annotatecontext(self, path) as actx:
+                        if not actx.isuptodate(master, strict=False):
+                            needupdatepaths.append(path)
+                            lastnodemap[path] = actx.lastnode
+                if needupdatepaths:
+                    clientfetch(self, needupdatepaths, lastnodemap, peer)
+            except Exception as ex:
+                # could be directory not writable or so, not fatal
+                self.ui.debug('fastannotate: prefetch failed: %r\n' % ex)
+    repo.__class__ = fastannotaterepo
+
+def clientreposetup(ui, repo):
+    _registerwireprotocommand()
+    if repo.local():
+        localreposetup(ui, repo)
+    # TODO: this mutates global state, but only if at least one repo
+    # has the extension enabled. This is probably bad for hgweb.
+    if peersetup not in hg.wirepeersetupfuncs:
+        hg.wirepeersetupfuncs.append(peersetup)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/revmap.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,254 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# revmap: trivial hg hash - linelog rev bidirectional map
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import bisect
+import os
+import struct
+
+from mercurial.node import hex
+from mercurial import (
+    error as hgerror,
+    pycompat,
+)
+from . import error
+
+# the revmap file format is straightforward:
+#
+#    8 bytes: header
+#    1 byte : flag for linelog revision 1
+#    ? bytes: (optional) '\0'-terminated path string
+#             only exists if (flag & renameflag) != 0
+#   20 bytes: hg hash for linelog revision 1
+#    1 byte : flag for linelog revision 2
+#    ? bytes: (optional) '\0'-terminated path string
+#   20 bytes: hg hash for linelog revision 2
+#   ....
+#
+# the implementation is kinda stupid: __init__ loads the whole revmap.
+# no laziness. benchmark shows loading 10000 revisions is about 0.015
+# seconds, which looks enough for our use-case. if this implementation
+# becomes a bottleneck, we can change it to lazily read the file
+# from the end.
+
+# whether the changeset is in the side branch. i.e. not in the linear main
+# branch but only got referenced by lines in merge changesets.
+sidebranchflag = 1
+
+# whether the changeset changes the file path (ie. is a rename)
+renameflag = 2
+
+# len(mercurial.node.nullid)
+_hshlen = 20
+
+class revmap(object):
+    """trivial hg bin hash - linelog rev bidirectional map
+
+    also stores a flag (uint8) for each revision, and track renames.
+    """
+
+    HEADER = b'REVMAP1\0'
+
+    def __init__(self, path=None):
+        """create or load the revmap, optionally associate to a file
+
+        if path is None, the revmap is entirely in-memory. the caller is
+        responsible for locking. concurrent writes to a same file is unsafe.
+        the caller needs to make sure one file is associated to at most one
+        revmap object at a time."""
+        self.path = path
+        self._rev2hsh = [None]
+        self._rev2flag = [None]
+        self._hsh2rev = {}
+        # since rename does not happen frequently, do not store path for every
+        # revision. self._renamerevs can be used for bisecting.
+        self._renamerevs = [0]
+        self._renamepaths = ['']
+        self._lastmaxrev = -1
+        if path:
+            if os.path.exists(path):
+                self._load()
+            else:
+                # write the header so "append" can do incremental updates
+                self.flush()
+
+    def copyfrom(self, rhs):
+        """copy the map data from another revmap. do not affect self.path"""
+        self._rev2hsh = rhs._rev2hsh[:]
+        self._rev2flag = rhs._rev2flag[:]
+        self._hsh2rev = rhs._hsh2rev.copy()
+        self._renamerevs = rhs._renamerevs[:]
+        self._renamepaths = rhs._renamepaths[:]
+        self._lastmaxrev = -1
+
+    @property
+    def maxrev(self):
+        """return max linelog revision number"""
+        return len(self._rev2hsh) - 1
+
+    def append(self, hsh, sidebranch=False, path=None, flush=False):
+        """add a binary hg hash and return the mapped linelog revision.
+        if flush is True, incrementally update the file.
+        """
+        if hsh in self._hsh2rev:
+            raise error.CorruptedFileError('%r is in revmap already' % hex(hsh))
+        if len(hsh) != _hshlen:
+            raise hgerror.ProgrammingError('hsh must be %d-char long' % _hshlen)
+        idx = len(self._rev2hsh)
+        flag = 0
+        if sidebranch:
+            flag |= sidebranchflag
+        if path is not None and path != self._renamepaths[-1]:
+            flag |= renameflag
+            self._renamerevs.append(idx)
+            self._renamepaths.append(path)
+        self._rev2hsh.append(hsh)
+        self._rev2flag.append(flag)
+        self._hsh2rev[hsh] = idx
+        if flush:
+            self.flush()
+        return idx
+
+    def rev2hsh(self, rev):
+        """convert linelog revision to hg hash. return None if not found."""
+        if rev > self.maxrev or rev < 0:
+            return None
+        return self._rev2hsh[rev]
+
+    def rev2flag(self, rev):
+        """get the flag (uint8) for a given linelog revision.
+        return None if revision does not exist.
+        """
+        if rev > self.maxrev or rev < 0:
+            return None
+        return self._rev2flag[rev]
+
+    def rev2path(self, rev):
+        """get the path for a given linelog revision.
+        return None if revision does not exist.
+        """
+        if rev > self.maxrev or rev < 0:
+            return None
+        idx = bisect.bisect_right(self._renamerevs, rev) - 1
+        return self._renamepaths[idx]
+
+    def hsh2rev(self, hsh):
+        """convert hg hash to linelog revision. return None if not found."""
+        return self._hsh2rev.get(hsh)
+
+    def clear(self, flush=False):
+        """make the map empty. if flush is True, write to disk"""
+        # rev 0 is reserved, real rev starts from 1
+        self._rev2hsh = [None]
+        self._rev2flag = [None]
+        self._hsh2rev = {}
+        self._rev2path = ['']
+        self._lastmaxrev = -1
+        if flush:
+            self.flush()
+
+    def flush(self):
+        """write the state down to the file"""
+        if not self.path:
+            return
+        if self._lastmaxrev == -1: # write the entire file
+            with open(self.path, 'wb') as f:
+                f.write(self.HEADER)
+                for i in pycompat.xrange(1, len(self._rev2hsh)):
+                    self._writerev(i, f)
+        else: # append incrementally
+            with open(self.path, 'ab') as f:
+                for i in pycompat.xrange(self._lastmaxrev + 1,
+                                         len(self._rev2hsh)):
+                    self._writerev(i, f)
+        self._lastmaxrev = self.maxrev
+
+    def _load(self):
+        """load state from file"""
+        if not self.path:
+            return
+        # use local variables in a loop. CPython uses LOAD_FAST for them,
+        # which is faster than both LOAD_CONST and LOAD_GLOBAL.
+        flaglen = 1
+        hshlen = _hshlen
+        with open(self.path, 'rb') as f:
+            if f.read(len(self.HEADER)) != self.HEADER:
+                raise error.CorruptedFileError()
+            self.clear(flush=False)
+            while True:
+                buf = f.read(flaglen)
+                if not buf:
+                    break
+                flag = ord(buf)
+                rev = len(self._rev2hsh)
+                if flag & renameflag:
+                    path = self._readcstr(f)
+                    self._renamerevs.append(rev)
+                    self._renamepaths.append(path)
+                hsh = f.read(hshlen)
+                if len(hsh) != hshlen:
+                    raise error.CorruptedFileError()
+                self._hsh2rev[hsh] = rev
+                self._rev2flag.append(flag)
+                self._rev2hsh.append(hsh)
+        self._lastmaxrev = self.maxrev
+
+    def _writerev(self, rev, f):
+        """append a revision data to file"""
+        flag = self._rev2flag[rev]
+        hsh = self._rev2hsh[rev]
+        f.write(struct.pack('B', flag))
+        if flag & renameflag:
+            path = self.rev2path(rev)
+            if path is None:
+                raise error.CorruptedFileError('cannot find path for %s' % rev)
+            f.write(path + '\0')
+        f.write(hsh)
+
+    @staticmethod
+    def _readcstr(f):
+        """read a C-language-like '\0'-terminated string"""
+        buf = ''
+        while True:
+            ch = f.read(1)
+            if not ch: # unexpected eof
+                raise error.CorruptedFileError()
+            if ch == '\0':
+                break
+            buf += ch
+        return buf
+
+    def __contains__(self, f):
+        """(fctx or (node, path)) -> bool.
+        test if (node, path) is in the map, and is not in a side branch.
+        f can be either a tuple of (node, path), or a fctx.
+        """
+        if isinstance(f, tuple): # f: (node, path)
+            hsh, path = f
+        else: # f: fctx
+            hsh, path = f.node(), f.path()
+        rev = self.hsh2rev(hsh)
+        if rev is None:
+            return False
+        if path is not None and path != self.rev2path(rev):
+            return False
+        return (self.rev2flag(rev) & sidebranchflag) == 0
+
+def getlastnode(path):
+    """return the last hash in a revmap, without loading its full content.
+    this is equivalent to `m = revmap(path); m.rev2hsh(m.maxrev)`, but faster.
+    """
+    hsh = None
+    try:
+        with open(path, 'rb') as f:
+            f.seek(-_hshlen, 2)
+            if f.tell() > len(revmap.HEADER):
+                hsh = f.read(_hshlen)
+    except IOError:
+        pass
+    return hsh
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/fastannotate/support.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,122 @@
+# Copyright 2016-present Facebook. All Rights Reserved.
+#
+# support: fastannotate support for hgweb, and filectx
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+    context as hgcontext,
+    dagop,
+    extensions,
+    hgweb,
+    patch,
+    util,
+)
+
+from . import (
+    context,
+    revmap,
+)
+
+class _lazyfctx(object):
+    """delegates to fctx but do not construct fctx when unnecessary"""
+
+    def __init__(self, repo, node, path):
+        self._node = node
+        self._path = path
+        self._repo = repo
+
+    def node(self):
+        return self._node
+
+    def path(self):
+        return self._path
+
+    @util.propertycache
+    def _fctx(self):
+        return context.resolvefctx(self._repo, self._node, self._path)
+
+    def __getattr__(self, name):
+        return getattr(self._fctx, name)
+
+def _convertoutputs(repo, annotated, contents):
+    """convert fastannotate outputs to vanilla annotate format"""
+    # fastannotate returns: [(nodeid, linenum, path)], [linecontent]
+    # convert to what fctx.annotate returns: [annotateline]
+    results = []
+    fctxmap = {}
+    annotateline = dagop.annotateline
+    for i, (hsh, linenum, path) in enumerate(annotated):
+        if (hsh, path) not in fctxmap:
+            fctxmap[(hsh, path)] = _lazyfctx(repo, hsh, path)
+        # linenum: the user wants 1-based, we have 0-based.
+        lineno = linenum + 1
+        fctx = fctxmap[(hsh, path)]
+        line = contents[i]
+        results.append(annotateline(fctx=fctx, lineno=lineno, text=line))
+    return results
+
+def _getmaster(fctx):
+    """(fctx) -> str"""
+    return fctx._repo.ui.config('fastannotate', 'mainbranch') or 'default'
+
+def _doannotate(fctx, follow=True, diffopts=None):
+    """like the vanilla fctx.annotate, but do it via fastannotate, and make
+    the output format compatible with the vanilla fctx.annotate.
+    may raise Exception, and always return line numbers.
+    """
+    master = _getmaster(fctx)
+    annotated = contents = None
+
+    with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
+        try:
+            annotated, contents = ac.annotate(fctx.rev(), master=master,
+                                              showpath=True, showlines=True)
+        except Exception:
+            ac.rebuild() # try rebuild once
+            fctx._repo.ui.debug('fastannotate: %s: rebuilding broken cache\n'
+                                % fctx._path)
+            try:
+                annotated, contents = ac.annotate(fctx.rev(), master=master,
+                                                  showpath=True, showlines=True)
+            except Exception:
+                raise
+
+    assert annotated and contents
+    return _convertoutputs(fctx._repo, annotated, contents)
+
+def _hgwebannotate(orig, fctx, ui):
+    diffopts = patch.difffeatureopts(ui, untrusted=True,
+                                     section='annotate', whitespace=True)
+    return _doannotate(fctx, diffopts=diffopts)
+
+def _fctxannotate(orig, self, follow=False, linenumber=False, skiprevs=None,
+                  diffopts=None):
+    if skiprevs:
+        # skiprevs is not supported yet
+        return orig(self, follow, linenumber, skiprevs=skiprevs,
+                    diffopts=diffopts)
+    try:
+        return _doannotate(self, follow, diffopts)
+    except Exception as ex:
+        self._repo.ui.debug('fastannotate: falling back to the vanilla '
+                            'annotate: %r\n' % ex)
+        return orig(self, follow=follow, skiprevs=skiprevs,
+                    diffopts=diffopts)
+
+def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None):
+    # skipset: a set-like used to test if a fctx needs to be downloaded
+    skipset = None
+    with context.fctxannotatecontext(self, follow, diffopts) as ac:
+        skipset = revmap.revmap(ac.revmappath)
+    return orig(self, follow, skiprevs=skiprevs, diffopts=diffopts,
+                prefetchskip=skipset)
+
+def replacehgwebannotate():
+    extensions.wrapfunction(hgweb.webutil, 'annotate', _hgwebannotate)
+
+def replacefctxannotate():
+    extensions.wrapfunction(hgcontext.basefilectx, 'annotate', _fctxannotate)
--- a/hgext/fetch.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/fetch.py	Mon Oct 22 14:46:06 2018 -0400
@@ -41,7 +41,8 @@
     ('', 'force-editor', None, _('edit commit message (DEPRECATED)')),
     ('', 'switch-parent', None, _('switch parents when merging')),
     ] + cmdutil.commitopts + cmdutil.commitopts2 + cmdutil.remoteopts,
-    _('hg fetch [SOURCE]'))
+    _('hg fetch [SOURCE]'),
+    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT)
 def fetch(ui, repo, source='default', **opts):
     '''pull changes from a remote repository, merge new changes if needed.
 
--- a/hgext/fix.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/fix.py	Mon Oct 22 14:46:06 2018 -0400
@@ -58,6 +58,10 @@
 from mercurial.node import nullrev
 from mercurial.node import wdirrev
 
+from mercurial.utils import (
+    procutil,
+)
+
 from mercurial import (
     cmdutil,
     context,
@@ -96,15 +100,17 @@
 # user.
 configitem('fix', 'maxfilesize', default='2MB')
 
-@command('fix',
-    [('', 'all', False, _('fix all non-public non-obsolete revisions')),
-     ('', 'base', [], _('revisions to diff against (overrides automatic '
-                        'selection, and applies to every revision being '
-                        'fixed)'), _('REV')),
-     ('r', 'rev', [], _('revisions to fix'), _('REV')),
-     ('w', 'working-dir', False, _('fix the working directory')),
-     ('', 'whole', False, _('always fix every line of a file'))],
-    _('[OPTION]... [FILE]...'))
+allopt = ('', 'all', False, _('fix all non-public non-obsolete revisions'))
+baseopt = ('', 'base', [], _('revisions to diff against (overrides automatic '
+                             'selection, and applies to every revision being '
+                             'fixed)'), _('REV'))
+revopt = ('r', 'rev', [], _('revisions to fix'), _('REV'))
+wdiropt = ('w', 'working-dir', False, _('fix the working directory'))
+wholeopt = ('', 'whole', False, _('always fix every line of a file'))
+usage = _('[OPTION]... [FILE]...')
+
+@command('fix', [allopt, baseopt, revopt, wdiropt, wholeopt], usage,
+        helpcategory=command.CATEGORY_FILE_CONTENTS)
 def fix(ui, repo, *pats, **opts):
     """rewrite file content in changesets or working directory
 
@@ -161,6 +167,7 @@
         # it makes the results more easily reproducible.
         filedata = collections.defaultdict(dict)
         replacements = {}
+        wdirwritten = False
         commitorder = sorted(revstofix, reverse=True)
         with ui.makeprogress(topic=_('fixing'), unit=_('files'),
                              total=sum(numitems.values())) as progress:
@@ -178,12 +185,28 @@
                     ctx = repo[rev]
                     if rev == wdirrev:
                         writeworkingdir(repo, ctx, filedata[rev], replacements)
+                        wdirwritten = bool(filedata[rev])
                     else:
                         replacerev(ui, repo, ctx, filedata[rev], replacements)
                     del filedata[rev]
 
-        replacements = {prec: [succ] for prec, succ in replacements.iteritems()}
-        scmutil.cleanupnodes(repo, replacements, 'fix', fixphase=True)
+        cleanup(repo, replacements, wdirwritten)
+
+def cleanup(repo, replacements, wdirwritten):
+    """Calls scmutil.cleanupnodes() with the given replacements.
+
+    "replacements" is a dict from nodeid to nodeid, with one key and one value
+    for every revision that was affected by fixing. This is slightly different
+    from cleanupnodes().
+
+    "wdirwritten" is a bool which tells whether the working copy was affected by
+    fixing, since it has no entry in "replacements".
+
+    Useful as a hook point for extending "hg fix" with output summarizing the
+    effects of the command, though we choose not to output anything here.
+    """
+    replacements = {prec: [succ] for prec, succ in replacements.iteritems()}
+    scmutil.cleanupnodes(repo, replacements, 'fix', fixphase=True)
 
 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
     """"Constructs the list of files to be fixed at specific revisions
@@ -267,8 +290,8 @@
     """
     files = set()
     for basectx in basectxs:
-        stat = repo.status(
-            basectx, fixctx, match=match, clean=bool(pats), unknown=bool(pats))
+        stat = basectx.status(fixctx, match=match, listclean=bool(pats),
+                              listunknown=bool(pats))
         files.update(
             set(itertools.chain(stat.added, stat.modified, stat.clean,
                                 stat.unknown)))
@@ -417,27 +440,33 @@
     starting with the file's content in the fixctx. Fixers that support line
     ranges will affect lines that have changed relative to any of the basectxs
     (i.e. they will only avoid lines that are common to all basectxs).
+
+    A fixer tool's stdout will become the file's new content if and only if it
+    exits with code zero.
     """
     newdata = fixctx[path].data()
     for fixername, fixer in fixers.iteritems():
         if fixer.affects(opts, fixctx, path):
-            ranges = lineranges(opts, path, basectxs, fixctx, newdata)
-            command = fixer.command(ui, path, ranges)
+            rangesfn = lambda: lineranges(opts, path, basectxs, fixctx, newdata)
+            command = fixer.command(ui, path, rangesfn)
             if command is None:
                 continue
             ui.debug('subprocess: %s\n' % (command,))
             proc = subprocess.Popen(
-                command,
+                procutil.tonativestr(command),
                 shell=True,
-                cwd='/',
+                cwd=procutil.tonativestr(b'/'),
                 stdin=subprocess.PIPE,
                 stdout=subprocess.PIPE,
                 stderr=subprocess.PIPE)
             newerdata, stderr = proc.communicate(newdata)
             if stderr:
                 showstderr(ui, fixctx.rev(), fixername, stderr)
-            else:
+            if proc.returncode == 0:
                 newdata = newerdata
+            elif not stderr:
+                showstderr(ui, fixctx.rev(), fixername,
+                           _('exited with status %d\n') % (proc.returncode,))
     return newdata
 
 def showstderr(ui, rev, fixername, stderr):
@@ -567,7 +596,7 @@
         """Should this fixer run on the file at the given path and context?"""
         return scmutil.match(fixctx, [self._fileset], opts)(path)
 
-    def command(self, ui, path, ranges):
+    def command(self, ui, path, rangesfn):
         """A shell command to use to invoke this fixer on the given file/lines
 
         May return None if there is no appropriate command to run for the given
@@ -577,6 +606,7 @@
         parts = [expand(ui, self._command,
                         {'rootpath': path, 'basename': os.path.basename(path)})]
         if self._linerange:
+            ranges = rangesfn()
             if not ranges:
                 # No line ranges to fix, so don't run the fixer.
                 return None
--- a/hgext/fsmonitor/__init__.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/fsmonitor/__init__.py	Mon Oct 22 14:46:06 2018 -0400
@@ -460,7 +460,7 @@
                 f = open(fn, 'wb')
             else:
                 fn = 'fsmonitorfail.log'
-                f = self.opener(fn, 'wb')
+                f = self.vfs.open(fn, 'wb')
         except (IOError, OSError):
             self.ui.warn(_('warning: unable to write to %s\n') % fn)
             return
@@ -564,8 +564,10 @@
             self.ui.fout, self.ui.ferr = fout, ferr
 
         # clean isn't tested since it's set to True above
-        _cmpsets([modified, added, removed, deleted, unknown, ignored, clean],
-                 rv2)
+        with self.wlock():
+            _cmpsets(
+                [modified, added, removed, deleted, unknown, ignored, clean],
+                rv2)
         modified, added, removed, deleted, unknown, ignored, clean = rv2
 
     return scmutil.status(
--- a/hgext/fsmonitor/pywatchman/__init__.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/fsmonitor/pywatchman/__init__.py	Mon Oct 22 14:46:06 2018 -0400
@@ -48,6 +48,14 @@
 except ImportError:
     from . import pybser as bser
 
+from mercurial.utils import (
+    procutil,
+)
+
+from mercurial import (
+    pycompat,
+)
+
 from . import (
     capabilities,
     compat,
@@ -580,7 +588,8 @@
             '--no-pretty',
             '-j',
         ]
-        self.proc = subprocess.Popen(args,
+        self.proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr,
+                                                     args),
                                      stdin=subprocess.PIPE,
                                      stdout=subprocess.PIPE)
         return self.proc
@@ -822,7 +831,8 @@
                 startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
                 args['startupinfo'] = startupinfo
 
-            p = subprocess.Popen(cmd, **args)
+            p = subprocess.Popen(pycompat.rapply(procutil.tonativestr, cmd),
+                                 **args)
 
         except OSError as e:
             raise WatchmanError('"watchman" executable not in PATH (%s)' % e)
--- a/hgext/githelp.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/githelp.py	Mon Oct 22 14:46:06 2018 -0400
@@ -50,8 +50,9 @@
     s = re.sub('~$', '~1', s)
     return s
 
-@command('^githelp|git', [
-    ], _('hg githelp'))
+@command('githelp|git', [
+    ], _('hg githelp'),
+    helpcategory=command.CATEGORY_HELP, helpbasic=True)
 def githelp(ui, repo, *args, **kwargs):
     '''suggests the Mercurial equivalent of the given git command
 
--- a/hgext/gpg.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/gpg.py	Mon Oct 22 14:46:06 2018 -0400
@@ -14,6 +14,7 @@
 from mercurial import (
     cmdutil,
     error,
+    help,
     match,
     node as hgnode,
     pycompat,
@@ -46,6 +47,9 @@
     generic=True,
 )
 
+# Custom help category
+_HELP_CATEGORY = 'gpg'
+
 class gpg(object):
     def __init__(self, path, key=None):
         self.path = path
@@ -169,7 +173,7 @@
         validkeys.append((key[1], key[2], key[3]))
     return validkeys
 
-@command("sigs", [], _('hg sigs'))
+@command("sigs", [], _('hg sigs'), helpcategory=_HELP_CATEGORY)
 def sigs(ui, repo):
     """list signed changesets"""
     mygpg = newgpg(ui)
@@ -194,7 +198,7 @@
             r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
             ui.write("%-30s %s\n" % (keystr(ui, k), r))
 
-@command("sigcheck", [], _('hg sigcheck REV'))
+@command("sigcheck", [], _('hg sigcheck REV'), helpcategory=_HELP_CATEGORY)
 def sigcheck(ui, repo, rev):
     """verify all the signatures there may be for a particular revision"""
     mygpg = newgpg(ui)
@@ -237,7 +241,8 @@
            _('use text as commit message'), _('TEXT')),
           ('e', 'edit', False, _('invoke editor on commit messages')),
          ] + cmdutil.commitopts2,
-         _('hg sign [OPTION]... [REV]...'))
+         _('hg sign [OPTION]... [REV]...'),
+         helpcategory=_HELP_CATEGORY)
 def sign(ui, repo, *revs, **opts):
     """add a signature for the current or given revision
 
@@ -327,3 +332,10 @@
         return "%s\n" % hgnode.hex(node)
     else:
         raise error.Abort(_("unknown signature version"))
+
+def extsetup(ui):
+    # Add our category before "Repository maintenance".
+    help.CATEGORY_ORDER.insert(
+        help.CATEGORY_ORDER.index(command.CATEGORY_MAINTENANCE),
+        _HELP_CATEGORY)
+    help.CATEGORY_NAMES[_HELP_CATEGORY] = 'GPG signing'
--- a/hgext/graphlog.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/graphlog.py	Mon Oct 22 14:46:06 2018 -0400
@@ -54,6 +54,7 @@
      _('do not display revision or any of its ancestors'), _('REV')),
     ] + cmdutil.logopts + cmdutil.walkopts,
     _('[OPTION]... [FILE]'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
     inferrepo=True)
 def glog(ui, repo, *pats, **opts):
     """show revision history alongside an ASCII revision graph
--- a/hgext/hgk.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/hgk.py	Mon Oct 22 14:46:06 2018 -0400
@@ -227,7 +227,7 @@
             else:
                 i -= chunk
 
-            for x in xrange(chunk):
+            for x in pycompat.xrange(chunk):
                 if i + x >= count:
                     l[chunk - x:] = [0] * (chunk - x)
                     break
@@ -238,7 +238,7 @@
                 else:
                     if (i + x) in repo:
                         l[x] = 1
-            for x in xrange(chunk - 1, -1, -1):
+            for x in pycompat.xrange(chunk - 1, -1, -1):
                 if l[x] != 0:
                     yield (i + x, full is not None and l[x] or None)
             if i == 0:
@@ -249,7 +249,7 @@
         if len(ar) == 0:
             return 1
         mask = 0
-        for i in xrange(len(ar)):
+        for i in pycompat.xrange(len(ar)):
             if sha in reachable[i]:
                 mask |= 1 << i
 
@@ -345,7 +345,8 @@
 @command('view',
     [('l', 'limit', '',
      _('limit number of changes displayed'), _('NUM'))],
-    _('[-l LIMIT] [REVRANGE]'))
+    _('[-l LIMIT] [REVRANGE]'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION)
 def view(ui, repo, *etc, **opts):
     "start interactive history viewer"
     opts = pycompat.byteskwargs(opts)
--- a/hgext/histedit.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/histedit.py	Mon Oct 22 14:46:06 2018 -0400
@@ -386,7 +386,7 @@
         rules = []
         rulelen = int(lines[index])
         index += 1
-        for i in xrange(rulelen):
+        for i in pycompat.xrange(rulelen):
             ruleaction = lines[index]
             index += 1
             rule = lines[index]
@@ -397,7 +397,7 @@
         replacements = []
         replacementlen = int(lines[index])
         index += 1
-        for i in xrange(replacementlen):
+        for i in pycompat.xrange(replacementlen):
             replacement = lines[index]
             original = node.bin(replacement[:40])
             succ = [node.bin(replacement[i:i + 40]) for i in
@@ -830,8 +830,7 @@
 
     def run(self):
         if self.repo['.'].node() != self.node:
-            mergemod.update(self.repo, self.node, False, True)
-            #                                     branchmerge, force)
+            mergemod.update(self.repo, self.node, branchmerge=False, force=True)
         return self.continueclean()
 
     def continuedirty(self):
@@ -910,7 +909,7 @@
     if not outgoing.missing:
         raise error.Abort(_('no outgoing ancestors'))
     roots = list(repo.revs("roots(%ln)", outgoing.missing))
-    if 1 < len(roots):
+    if len(roots) > 1:
         msg = _('there are ambiguous outgoing revisions')
         hint = _("see 'hg help histedit' for more detail")
         raise error.Abort(msg, hint=hint)
@@ -929,7 +928,8 @@
       _('force outgoing even for unrelated repositories')),
      ('r', 'rev', [], _('first revision to be edited'), _('REV'))] +
     cmdutil.formatteropts,
-     _("[OPTIONS] ([ANCESTOR] | --outgoing [URL])"))
+     _("[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
 def histedit(ui, repo, *freeargs, **opts):
     """interactively edit changeset history
 
@@ -1084,7 +1084,7 @@
             raise error.Abort(_('only --commands argument allowed with '
                                '--edit-plan'))
     else:
-        if os.path.exists(os.path.join(repo.path, 'histedit-state')):
+        if state.inprogress():
             raise error.Abort(_('history edit already in progress, try '
                                '--continue or --abort'))
         if outg:
@@ -1202,7 +1202,8 @@
         mapping = {}
 
     for n in tmpnodes:
-        mapping[n] = ()
+        if n in repo:
+            mapping[n] = ()
 
     # remove entries about unknown nodes
     nodemap = repo.unfiltered().changelog.nodemap
@@ -1624,8 +1625,8 @@
 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
     if isinstance(nodelist, str):
         nodelist = [nodelist]
-    if os.path.exists(os.path.join(repo.path, 'histedit-state')):
-        state = histeditstate(repo)
+    state = histeditstate(repo)
+    if state.inprogress():
         state.read()
         histedit_nodes = {action.node for action
                           in state.actions if action.node}
@@ -1638,9 +1639,9 @@
 extensions.wrapfunction(repair, 'strip', stripwrapper)
 
 def summaryhook(ui, repo):
-    if not os.path.exists(repo.vfs.join('histedit-state')):
+    state = histeditstate(repo)
+    if not state.inprogress():
         return
-    state = histeditstate(repo)
     state.read()
     if state.actions:
         # i18n: column positioning for "hg summary"
--- a/hgext/infinitepush/__init__.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/infinitepush/__init__.py	Mon Oct 22 14:46:06 2018 -0400
@@ -357,8 +357,7 @@
     if not self.capable('pushkey'):
         yield {}, None
     f = wireprotov1peer.future()
-    self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
-                  (namespace, patterns))
+    self.ui.debug('preparing listkeys for "%s"\n' % namespace)
     yield {
         'namespace': encoding.fromlocal(namespace),
         'patterns': wireprototypes.encodelist(patterns)
@@ -696,8 +695,8 @@
     return common, True, remoteheads
 
 def _push(orig, ui, repo, dest=None, *args, **opts):
-
-    bookmark = opts.get(r'bookmark')
+    opts = pycompat.byteskwargs(opts)
+    bookmark = opts.get('bookmark')
     # we only support pushing one infinitepush bookmark at once
     if len(bookmark) == 1:
         bookmark = bookmark[0]
@@ -718,7 +717,7 @@
         if scratchpush:
             # this is an infinitepush, we don't want the bookmark to be applied
             # rather that should be stored in the bundlestore
-            opts[r'bookmark'] = []
+            opts['bookmark'] = []
             ui.setconfig(experimental, configscratchpush, True)
             oldphasemove = extensions.wrapfunction(exchange,
                                                    '_localphasemove',
@@ -732,7 +731,7 @@
         # Remote scratch bookmarks will be deleted because remotenames doesn't
         # know about them. Let's save it before push and restore after
         remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
-        result = orig(ui, repo, dest, *args, **opts)
+        result = orig(ui, repo, dest, *args, **pycompat.strkwargs(opts))
         if common.isremotebooksenabled(ui):
             if bookmark and scratchpush:
                 other = hg.peer(repo, opts, destpath)
@@ -899,7 +898,7 @@
                 if part.type in ('pushkey', 'changegroup'):
                     if op.reply is not None:
                         rpart = op.reply.newpart('reply:%s' % part.type)
-                        rpart.addparam('in-reply-to', str(part.id),
+                        rpart.addparam('in-reply-to', b'%d' % part.id,
                                        mandatory=False)
                         rpart.addparam('return', '1', mandatory=False)
 
@@ -1182,5 +1181,6 @@
         cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
                    '-R', root] + nodesargs
         # Process will run in background. We don't care about the return code
-        subprocess.Popen(cmdline, close_fds=True, shell=False,
+        subprocess.Popen(pycompat.rapply(procutil.tonativestr, cmdline),
+                         close_fds=True, shell=False,
                          stdin=devnull, stdout=devnull, stderr=devnull)
--- a/hgext/infinitepush/common.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/infinitepush/common.py	Mon Oct 22 14:46:06 2018 -0400
@@ -33,7 +33,7 @@
     fd, bundlefile = pycompat.mkstemp()
     try:  # guards bundlefile
         try:  # guards fp
-            fp = os.fdopen(fd, 'wb')
+            fp = os.fdopen(fd, r'wb')
             fp.write(data)
         finally:
             fp.close()
--- a/hgext/infinitepush/store.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/infinitepush/store.py	Mon Oct 22 14:46:06 2018 -0400
@@ -11,6 +11,14 @@
 import subprocess
 import tempfile
 
+from mercurial import (
+    node,
+    pycompat,
+)
+from mercurial.utils import (
+    procutil,
+)
+
 NamedTemporaryFile = tempfile.NamedTemporaryFile
 
 class BundleWriteException(Exception):
@@ -73,7 +81,7 @@
         return os.path.join(self._dirpath(filename), filename)
 
     def write(self, data):
-        filename = hashlib.sha1(data).hexdigest()
+        filename = node.hex(hashlib.sha1(data).digest())
         dirpath = self._dirpath(filename)
 
         if not os.path.exists(dirpath):
@@ -111,7 +119,8 @@
 
     def _call_binary(self, args):
         p = subprocess.Popen(
-            args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+            pycompat.rapply(procutil.tonativestr, args),
+            stdout=subprocess.PIPE, stderr=subprocess.PIPE,
             close_fds=True)
         stdout, stderr = p.communicate()
         returncode = p.returncode
--- a/hgext/journal.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/journal.py	Mon Oct 22 14:46:06 2018 -0400
@@ -440,7 +440,8 @@
         ('', 'all', None, 'show history for all names'),
         ('c', 'commits', None, 'show commit metadata'),
     ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
-    '[OPTION]... [BOOKMARKNAME]')
+    '[OPTION]... [BOOKMARKNAME]',
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def journal(ui, repo, *args, **opts):
     """show the previous position of bookmarks and the working copy
 
@@ -477,6 +478,8 @@
         name = args[0]
 
     fm = ui.formatter('journal', opts)
+    def formatnodes(nodes):
+        return fm.formatlist(map(fm.hexfunc, nodes), name='node', sep=',')
 
     if opts.get("template") != "json":
         if name is None:
@@ -491,31 +494,32 @@
     for count, entry in enumerate(repo.journal.filtered(name=name)):
         if count == limit:
             break
-        newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
-                                     name='node', sep=',')
-        oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
-                                     name='node', sep=',')
 
         fm.startitem()
-        fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
-        fm.write('newhashes', '%s', newhashesstr)
+        fm.condwrite(ui.verbose, 'oldnodes', '%s -> ',
+                     formatnodes(entry.oldhashes))
+        fm.write('newnodes', '%s', formatnodes(entry.newhashes))
         fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
         fm.condwrite(
             opts.get('all') or name.startswith('re:'),
             'name', '  %-8s', entry.name)
 
-        timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
-        fm.condwrite(ui.verbose, 'date', ' %s', timestring)
+        fm.condwrite(ui.verbose, 'date', ' %s',
+                     fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2'))
         fm.write('command', '  %s\n', entry.command)
 
         if opts.get("commits"):
-            displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
+            if fm.isplain():
+                displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
+            else:
+                displayer = logcmdutil.changesetformatter(
+                    ui, repo, fm.nested('changesets'), diffopts=opts)
             for hash in entry.newhashes:
                 try:
                     ctx = repo[hash]
                     displayer.show(ctx)
                 except error.RepoLookupError as e:
-                    fm.write('repolookuperror', "%s\n\n", pycompat.bytestr(e))
+                    fm.plain("%s\n\n" % pycompat.bytestr(e))
             displayer.close()
 
     fm.end()
--- a/hgext/keyword.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/keyword.py	Mon Oct 22 14:46:06 2018 -0400
@@ -208,7 +208,7 @@
 def _shrinktext(text, subfunc):
     '''Helper for keyword expansion removal in text.
     Depending on subfunc also returns number of substitutions.'''
-    return subfunc(r'$\1$', text)
+    return subfunc(br'$\1$', text)
 
 def _preselect(wstatus, changed):
     '''Retrieves modified and added files from a working directory state
@@ -250,12 +250,12 @@
     @util.propertycache
     def rekw(self):
         '''Returns regex for unexpanded keywords.'''
-        return re.compile(r'\$(%s)\$' % self.escape)
+        return re.compile(br'\$(%s)\$' % self.escape)
 
     @util.propertycache
     def rekwexp(self):
         '''Returns regex for expanded keywords.'''
-        return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
+        return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
 
     def substitute(self, data, path, ctx, subfunc):
         '''Replaces keywords in data with expanded template.'''
@@ -430,6 +430,8 @@
     def demoitems(section, items):
         ui.write('[%s]\n' % section)
         for k, v in sorted(items):
+            if isinstance(v, bool):
+                v = stringutil.pprint(v)
             ui.write('%s = %s\n' % (k, v))
 
     fn = 'demo.txt'
@@ -439,7 +441,7 @@
         baseui = ui
     else:
         baseui = repo.baseui
-    repo = localrepo.localrepository(baseui, tmpdir, True)
+    repo = localrepo.instance(baseui, tmpdir, create=True)
     ui.setconfig('keyword', fn, '', 'keyword')
     svn = ui.configbool('keywordset', 'svn')
     # explicitly set keywordset for demo output
@@ -567,7 +569,7 @@
         showfiles += ([f for f in files if f not in kwfiles],
                       [f for f in status.unknown if f not in kwunknown])
     kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
-    kwstates = zip(kwlabels, 'K!kIi', showfiles)
+    kwstates = zip(kwlabels, pycompat.bytestr('K!kIi'), showfiles)
     fm = ui.formatter('kwfiles', opts)
     fmt = '%.0s%s\n'
     if opts.get('all') or ui.verbose:
@@ -576,8 +578,8 @@
         label = 'kwfiles.' + kwstate
         for f in filenames:
             fm.startitem()
-            fm.write('kwstatus path', fmt, char,
-                     repo.pathto(f, cwd), label=label)
+            fm.data(kwstatus=char, path=f)
+            fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
     fm.end()
 
 @command('kwshrink',
--- a/hgext/largefiles/basestore.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/largefiles/basestore.py	Mon Oct 22 14:46:06 2018 -0400
@@ -62,25 +62,24 @@
 
         at = 0
         available = self.exists(set(hash for (_filename, hash) in files))
-        progress = ui.makeprogress(_('getting largefiles'), unit=_('files'),
-                                   total=len(files))
-        for filename, hash in files:
-            progress.update(at)
-            at += 1
-            ui.note(_('getting %s:%s\n') % (filename, hash))
+        with ui.makeprogress(_('getting largefiles'), unit=_('files'),
+                             total=len(files)) as progress:
+            for filename, hash in files:
+                progress.update(at)
+                at += 1
+                ui.note(_('getting %s:%s\n') % (filename, hash))
 
-            if not available.get(hash):
-                ui.warn(_('%s: largefile %s not available from %s\n')
-                        % (filename, hash, util.hidepassword(self.url)))
-                missing.append(filename)
-                continue
+                if not available.get(hash):
+                    ui.warn(_('%s: largefile %s not available from %s\n')
+                            % (filename, hash, util.hidepassword(self.url)))
+                    missing.append(filename)
+                    continue
 
-            if self._gethash(filename, hash):
-                success.append((filename, hash))
-            else:
-                missing.append(filename)
+                if self._gethash(filename, hash):
+                    success.append((filename, hash))
+                else:
+                    missing.append(filename)
 
-        progress.complete()
         return (success, missing)
 
     def _gethash(self, filename, hash):
--- a/hgext/largefiles/lfcommands.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/largefiles/lfcommands.py	Mon Oct 22 14:46:06 2018 -0400
@@ -118,14 +118,13 @@
                 matcher = None
 
             lfiletohash = {}
-            progress = ui.makeprogress(_('converting revisions'),
-                                       unit=_('revisions'),
-                                       total=rsrc['tip'].rev())
-            for ctx in ctxs:
-                progress.update(ctx.rev())
-                _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
-                    lfiles, normalfiles, matcher, size, lfiletohash)
-            progress.complete()
+            with ui.makeprogress(_('converting revisions'),
+                                 unit=_('revisions'),
+                                 total=rsrc['tip'].rev()) as progress:
+                for ctx in ctxs:
+                    progress.update(ctx.rev())
+                    _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
+                        lfiles, normalfiles, matcher, size, lfiletohash)
 
             if rdst.wvfs.exists(lfutil.shortname):
                 rdst.wvfs.rmtree(lfutil.shortname)
@@ -210,6 +209,10 @@
             if f in ctx.manifest():
                 fctx = ctx.filectx(f)
                 renamed = fctx.renamed()
+                if renamed is None:
+                    # the code below assumes renamed to be a boolean or a list
+                    # and won't quite work with the value None
+                    renamed = False
                 renamedlfile = renamed and renamed[0] in lfiles
                 islfile |= renamedlfile
                 if 'l' in fctx.flags():
@@ -370,18 +373,17 @@
     files = [h for h in files if not retval[h]]
     ui.debug("%d largefiles need to be uploaded\n" % len(files))
 
-    progress = ui.makeprogress(_('uploading largefiles'), unit=_('files'),
-                               total=len(files))
-    for hash in files:
-        progress.update(at)
-        source = lfutil.findfile(rsrc, hash)
-        if not source:
-            raise error.Abort(_('largefile %s missing from store'
-                               ' (needs to be uploaded)') % hash)
-        # XXX check for errors here
-        store.put(source, hash)
-        at += 1
-    progress.complete()
+    with ui.makeprogress(_('uploading largefiles'), unit=_('files'),
+                         total=len(files)) as progress:
+        for hash in files:
+            progress.update(at)
+            source = lfutil.findfile(rsrc, hash)
+            if not source:
+                raise error.Abort(_('largefile %s missing from store'
+                                   ' (needs to be uploaded)') % hash)
+            # XXX check for errors here
+            store.put(source, hash)
+            at += 1
 
 def verifylfiles(ui, repo, all=False, contents=False):
     '''Verify that every largefile revision in the current changeset
--- a/hgext/largefiles/lfutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/largefiles/lfutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -501,37 +501,37 @@
     return filelist
 
 def getlfilestoupload(repo, missing, addfunc):
-    progress = repo.ui.makeprogress(_('finding outgoing largefiles'),
-                                    unit=_('revisions'), total=len(missing))
-    for i, n in enumerate(missing):
-        progress.update(i)
-        parents = [p for p in repo[n].parents() if p != node.nullid]
+    makeprogress = repo.ui.makeprogress
+    with makeprogress(_('finding outgoing largefiles'),
+                      unit=_('revisions'), total=len(missing)) as progress:
+        for i, n in enumerate(missing):
+            progress.update(i)
+            parents = [p for p in repo[n].parents() if p != node.nullid]
 
-        oldlfstatus = repo.lfstatus
-        repo.lfstatus = False
-        try:
-            ctx = repo[n]
-        finally:
-            repo.lfstatus = oldlfstatus
+            oldlfstatus = repo.lfstatus
+            repo.lfstatus = False
+            try:
+                ctx = repo[n]
+            finally:
+                repo.lfstatus = oldlfstatus
 
-        files = set(ctx.files())
-        if len(parents) == 2:
-            mc = ctx.manifest()
-            mp1 = ctx.parents()[0].manifest()
-            mp2 = ctx.parents()[1].manifest()
-            for f in mp1:
-                if f not in mc:
-                    files.add(f)
-            for f in mp2:
-                if f not in mc:
-                    files.add(f)
-            for f in mc:
-                if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
-                    files.add(f)
-        for fn in files:
-            if isstandin(fn) and fn in ctx:
-                addfunc(fn, readasstandin(ctx[fn]))
-    progress.complete()
+            files = set(ctx.files())
+            if len(parents) == 2:
+                mc = ctx.manifest()
+                mp1 = ctx.parents()[0].manifest()
+                mp2 = ctx.parents()[1].manifest()
+                for f in mp1:
+                    if f not in mc:
+                        files.add(f)
+                for f in mp2:
+                    if f not in mc:
+                        files.add(f)
+                for f in mc:
+                    if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
+                        files.add(f)
+            for fn in files:
+                if isstandin(fn) and fn in ctx:
+                    addfunc(fn, readasstandin(ctx[fn]))
 
 def updatestandinsbymatch(repo, match):
     '''Update standins in the working directory according to specified match
--- a/hgext/largefiles/overrides.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/largefiles/overrides.py	Mon Oct 22 14:46:06 2018 -0400
@@ -889,11 +889,6 @@
         if not repo:
             return result
 
-        # If largefiles is required for this repo, permanently enable it locally
-        if 'largefiles' in repo.requirements:
-            repo.vfs.append('hgrc',
-                            util.tonativeeol('\n[extensions]\nlargefiles=\n'))
-
         # Caching is implicitly limited to 'rev' option, since the dest repo was
         # truncated at that point.  The user may expect a download count with
         # this option, so attempt whether or not this is a largefile repo.
@@ -905,14 +900,6 @@
 
     return result
 
-def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
-    orig(sourcerepo, destrepo, bookmarks, defaultpath)
-
-    # If largefiles is required for this repo, permanently enable it locally
-    if 'largefiles' in destrepo.requirements:
-        destrepo.vfs.append('hgrc',
-                            util.tonativeeol('\n[extensions]\nlargefiles=\n'))
-
 def overriderebase(orig, ui, repo, **opts):
     if not util.safehasattr(repo, '_largefilesenabled'):
         return orig(ui, repo, **opts)
--- a/hgext/largefiles/uisetup.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/largefiles/uisetup.py	Mon Oct 22 14:46:06 2018 -0400
@@ -126,7 +126,6 @@
                  _('download all versions of all largefiles'))]
     entry[1].extend(cloneopt)
     extensions.wrapfunction(hg, 'clone', overrides.hgclone)
-    extensions.wrapfunction(hg, 'postshare', overrides.hgpostshare)
 
     entry = extensions.wrapcommand(commands.table, 'cat',
                                    overrides.overridecat)
--- a/hgext/lfs/__init__.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/lfs/__init__.py	Mon Oct 22 14:46:06 2018 -0400
@@ -124,6 +124,8 @@
 
 from __future__ import absolute_import
 
+import sys
+
 from mercurial.i18n import _
 
 from mercurial import (
@@ -136,13 +138,13 @@
     exchange,
     extensions,
     filelog,
-    fileset,
-    hg,
+    filesetlang,
     localrepo,
     minifileset,
     node,
     pycompat,
     registrar,
+    repository,
     revlog,
     scmutil,
     templateutil,
@@ -204,6 +206,12 @@
 templatekeyword = registrar.templatekeyword()
 filesetpredicate = registrar.filesetpredicate()
 
+lfsprocessor = (
+    wrapper.readfromstore,
+    wrapper.writetostore,
+    wrapper.bypasscheckhash,
+)
+
 def featuresetup(ui, supported):
     # don't die on seeing a repo with the lfs requirement
     supported |= {'lfs'}
@@ -244,6 +252,7 @@
                 if any(ctx[f].islfs() for f in ctx.files()
                        if f in ctx and match(f)):
                     repo.requirements.add('lfs')
+                    repo.features.add(repository.REPO_FEATURE_LFS)
                     repo._writerequirements()
                     repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
                     break
@@ -263,7 +272,7 @@
         # deprecated config: lfs.threshold
         threshold = repo.ui.configbytes('lfs', 'threshold')
         if threshold:
-            fileset.parse(trackspec)  # make sure syntax errors are confined
+            filesetlang.parse(trackspec)  # make sure syntax errors are confined
             trackspec = "(%s) | size('>%d')" % (trackspec, threshold)
 
         return minifileset.compile(trackspec)
@@ -303,11 +312,29 @@
     wrapfunction(filelog, 'renamed', wrapper.filelogrenamed)
     wrapfunction(filelog, 'size', wrapper.filelogsize)
 
+def _resolverevlogstorevfsoptions(orig, ui, requirements, features):
+    opts = orig(ui, requirements, features)
+    for name, module in extensions.extensions(ui):
+        if module is sys.modules[__name__]:
+            if revlog.REVIDX_EXTSTORED in opts[b'flagprocessors']:
+                msg = (_(b"cannot register multiple processors on flag '%#x'.")
+                       % revlog.REVIDX_EXTSTORED)
+                raise error.Abort(msg)
+
+            opts[b'flagprocessors'][revlog.REVIDX_EXTSTORED] = lfsprocessor
+            break
+
+    return opts
+
 def extsetup(ui):
     wrapfilelog(filelog.filelog)
 
     wrapfunction = extensions.wrapfunction
 
+    wrapfunction(localrepo, 'makefilestorage', wrapper.localrepomakefilestorage)
+    wrapfunction(localrepo, 'resolverevlogstorevfsoptions',
+                 _resolverevlogstorevfsoptions)
+
     wrapfunction(cmdutil, '_updatecatformatter', wrapper._updatecatformatter)
     wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink)
 
@@ -333,18 +360,6 @@
     wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary)
     context.basefilectx.islfs = wrapper.filectxislfs
 
-    revlog.addflagprocessor(
-        revlog.REVIDX_EXTSTORED,
-        (
-            wrapper.readfromstore,
-            wrapper.writetostore,
-            wrapper.bypasscheckhash,
-        ),
-    )
-
-    wrapfunction(hg, 'clone', wrapper.hgclone)
-    wrapfunction(hg, 'postshare', wrapper.hgpostshare)
-
     scmutil.fileprefetchhooks.add('lfs', wrapper._prefetchfiles)
 
     # Make bundle choose changegroup3 instead of changegroup2. This affects
@@ -359,11 +374,11 @@
     # when writing a bundle via "hg bundle" command, upload related LFS blobs
     wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
 
-@filesetpredicate('lfs()', callstatus=True)
+@filesetpredicate('lfs()')
 def lfsfileset(mctx, x):
     """File that uses LFS storage."""
     # i18n: "lfs" is a keyword
-    fileset.getargs(x, 0, 0, _("lfs takes no arguments"))
+    filesetlang.getargs(x, 0, 0, _("lfs takes no arguments"))
     ctx = mctx.ctx
     def lfsfilep(f):
         return wrapper.pointerfromctx(ctx, f, removed=True) is not None
--- a/hgext/lfs/blobstore.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/lfs/blobstore.py	Mon Oct 22 14:46:06 2018 -0400
@@ -168,6 +168,20 @@
 
         self._linktousercache(oid)
 
+    def linkfromusercache(self, oid):
+        """Link blobs found in the user cache into this store.
+
+        The server module needs to do this when it lets the client know not to
+        upload the blob, to ensure it is always available in this store.
+        Normally this is done implicitly when the client reads or writes the
+        blob, but that doesn't happen when the server tells the client that it
+        already has the blob.
+        """
+        if (not isinstance(self.cachevfs, nullvfs)
+            and not self.vfs.exists(oid)):
+            self.ui.note(_('lfs: found %s in the usercache\n') % oid)
+            lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
+
     def _linktousercache(self, oid):
         # XXX: should we verify the content of the cache, and hardlink back to
         # the local store on success, but truncate, write and link on failure?
@@ -405,8 +419,7 @@
         if len(objects) > 1:
             self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
                          % (len(objects), util.bytecount(total)))
-        progress = self.ui.makeprogress(topic, total=total)
-        progress.update(0)
+
         def transfer(chunk):
             for obj in chunk:
                 objsize = obj.get('size', 0)
@@ -439,14 +452,15 @@
         else:
             oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
 
-        processed = 0
-        blobs = 0
-        for _one, oid in oids:
-            processed += sizes[oid]
-            blobs += 1
-            progress.update(processed)
-            self.ui.note(_('lfs: processed: %s\n') % oid)
-        progress.complete()
+        with self.ui.makeprogress(topic, total=total) as progress:
+            progress.update(0)
+            processed = 0
+            blobs = 0
+            for _one, oid in oids:
+                processed += sizes[oid]
+                blobs += 1
+                progress.update(processed)
+                self.ui.note(_('lfs: processed: %s\n') % oid)
 
         if blobs > 0:
             if action == 'upload':
@@ -572,7 +586,7 @@
         raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
     return _storemap[scheme](repo, url)
 
-class LfsRemoteError(error.RevlogError):
+class LfsRemoteError(error.StorageError):
     pass
 
 class LfsCorruptionError(error.Abort):
--- a/hgext/lfs/pointer.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/lfs/pointer.py	Mon Oct 22 14:46:06 2018 -0400
@@ -19,7 +19,7 @@
     stringutil,
 )
 
-class InvalidPointer(error.RevlogError):
+class InvalidPointer(error.StorageError):
     pass
 
 class gitlfspointer(dict):
--- a/hgext/lfs/wireprotolfsserver.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/lfs/wireprotolfsserver.py	Mon Oct 22 14:46:06 2018 -0400
@@ -204,6 +204,10 @@
         # verified as the file is streamed to the caller.
         try:
             verifies = store.verify(oid)
+            if verifies and action == 'upload':
+                # The client will skip this upload, but make sure it remains
+                # available locally.
+                store.linkfromusercache(oid)
         except IOError as inst:
             if inst.errno != errno.ENOENT:
                 _logexception(req)
--- a/hgext/lfs/wrapper.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/lfs/wrapper.py	Mon Oct 22 14:46:06 2018 -0400
@@ -14,11 +14,13 @@
 
 from mercurial import (
     error,
+    repository,
     revlog,
     util,
 )
 
 from mercurial.utils import (
+    storageutil,
     stringutil,
 )
 
@@ -29,6 +31,12 @@
     pointer,
 )
 
+def localrepomakefilestorage(orig, requirements, features, **kwargs):
+    if b'lfs' in requirements:
+        features.add(repository.REPO_FEATURE_LFS)
+
+    return orig(requirements=requirements, features=features, **kwargs)
+
 def allsupportedversions(orig, ui):
     versions = orig(ui)
     versions.add('03')
@@ -38,7 +46,13 @@
     '''Wrap server command to announce lfs server capability'''
     caps = orig(repo, proto)
     if util.safehasattr(repo.svfs, 'lfslocalblobstore'):
-        # XXX: change to 'lfs=serve' when separate git server isn't required?
+        # Advertise a slightly different capability when lfs is *required*, so
+        # that the client knows it MUST load the extension.  If lfs is not
+        # required on the server, there's no reason to autoload the extension
+        # on the client.
+        if b'lfs' in repo.requirements:
+            caps.append('lfs-serve')
+
         caps.append('lfs')
     return caps
 
@@ -69,13 +83,13 @@
             name = k[len('x-hg-'):]
             hgmeta[name] = p[k]
     if hgmeta or text.startswith('\1\n'):
-        text = revlog.packmeta(hgmeta, text)
+        text = storageutil.packmeta(hgmeta, text)
 
     return (text, True)
 
 def writetostore(self, text):
     # hg filelog metadata (includes rename, etc)
-    hgmeta, offset = revlog.parsemeta(text)
+    hgmeta, offset = storageutil.parsemeta(text)
     if offset and offset > 0:
         # lfs blob does not contain hg filelog metadata
         text = text[offset:]
@@ -108,28 +122,28 @@
         if node is None:
             # both None - likely working copy content where node is not ready
             return False
-        rev = rlog.rev(node)
+        rev = rlog._revlog.rev(node)
     else:
-        node = rlog.node(rev)
+        node = rlog._revlog.node(rev)
     if node == nullid:
         return False
-    flags = rlog.flags(rev)
+    flags = rlog._revlog.flags(rev)
     return bool(flags & revlog.REVIDX_EXTSTORED)
 
 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
                        cachedelta=None, node=None,
                        flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
     # The matcher isn't available if reposetup() wasn't called.
-    lfstrack = self.opener.options.get('lfstrack')
+    lfstrack = self._revlog.opener.options.get('lfstrack')
 
     if lfstrack:
         textlen = len(text)
         # exclude hg rename meta from file size
-        meta, offset = revlog.parsemeta(text)
+        meta, offset = storageutil.parsemeta(text)
         if offset:
             textlen -= offset
 
-        if lfstrack(self.filename, textlen):
+        if lfstrack(self._revlog.filename, textlen):
             flags |= revlog.REVIDX_EXTSTORED
 
     return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
@@ -137,7 +151,7 @@
 
 def filelogrenamed(orig, self, node):
     if _islfs(self, node):
-        rawtext = self.revision(node, raw=True)
+        rawtext = self._revlog.revision(node, raw=True)
         if not rawtext:
             return False
         metadata = pointer.deserialize(rawtext)
@@ -150,7 +164,7 @@
 def filelogsize(orig, self, rev):
     if _islfs(self, rev=rev):
         # fast path: use lfs metadata to answer size
-        rawtext = self.revision(rev, raw=True)
+        rawtext = self._revlog.revision(rev, raw=True)
         metadata = pointer.deserialize(rawtext)
         return int(metadata['size'])
     return orig(self, rev)
@@ -199,10 +213,6 @@
                         self.repo.requirements.add('lfs')
                         self.repo._writerequirements()
 
-                        # Permanently enable lfs locally
-                        self.repo.vfs.append(
-                            'hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
-
                 return node
 
         sink.__class__ = lfssink
@@ -221,32 +231,6 @@
         if util.safehasattr(othervfs, name):
             setattr(self, name, getattr(othervfs, name))
 
-def hgclone(orig, ui, opts, *args, **kwargs):
-    result = orig(ui, opts, *args, **kwargs)
-
-    if result is not None:
-        sourcerepo, destrepo = result
-        repo = destrepo.local()
-
-        # When cloning to a remote repo (like through SSH), no repo is available
-        # from the peer.  Therefore the hgrc can't be updated.
-        if not repo:
-            return result
-
-        # If lfs is required for this repo, permanently enable it locally
-        if 'lfs' in repo.requirements:
-            repo.vfs.append('hgrc',
-                            util.tonativeeol('\n[extensions]\nlfs=\n'))
-
-    return result
-
-def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
-    orig(sourcerepo, destrepo, bookmarks, defaultpath)
-
-    # If lfs is required for this repo, permanently enable it locally
-    if 'lfs' in destrepo.requirements:
-        destrepo.vfs.append('hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
-
 def _prefetchfiles(repo, revs, match):
     """Ensure that required LFS blobs are present, fetching them as a group if
     needed."""
@@ -343,11 +327,15 @@
     """return a list of lfs pointers added by given revs"""
     repo.ui.debug('lfs: computing set of blobs to upload\n')
     pointers = {}
-    for r in revs:
-        ctx = repo[r]
-        for p in pointersfromctx(ctx).values():
-            pointers[p.oid()] = p
-    return sorted(pointers.values())
+
+    makeprogress = repo.ui.makeprogress
+    with makeprogress(_('lfs search'), _('changesets'), len(revs)) as progress:
+        for r in revs:
+            ctx = repo[r]
+            for p in pointersfromctx(ctx).values():
+                pointers[p.oid()] = p
+            progress.increment()
+        return sorted(pointers.values(), key=lambda p: p.oid())
 
 def pointerfromctx(ctx, f, removed=False):
     """return a pointer for the named file from the given changectx, or None if
@@ -386,7 +374,12 @@
     stored for the path is an empty dict.
     """
     result = {}
+    m = ctx.repo().narrowmatch()
+
+    # TODO: consider manifest.fastread() instead
     for f in ctx.files():
+        if not m(f):
+            continue
         p = pointerfromctx(ctx, f, removed=removed)
         if p is not None:
             result[f] = p
--- a/hgext/logtoprocess.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/logtoprocess.py	Mon Oct 22 14:46:06 2018 -0400
@@ -40,10 +40,13 @@
 import sys
 
 from mercurial import (
-    encoding,
     pycompat,
 )
 
+from mercurial.utils import (
+    procutil,
+)
+
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
@@ -62,7 +65,8 @@
             # we can't use close_fds *and* redirect stdin. I'm not sure that we
             # need to because the detached process has no console connection.
             subprocess.Popen(
-                script, shell=True, env=env, close_fds=True,
+                procutil.tonativestr(script),
+                shell=True, env=procutil.tonativeenv(env), close_fds=True,
                 creationflags=_creationflags)
     else:
         def runshellcommand(script, env):
@@ -79,10 +83,17 @@
             else:
                 newsession = {'start_new_session': True}
             try:
-                # connect stdin to devnull to make sure the subprocess can't
-                # muck up that stream for mercurial.
+                # connect std* to devnull to make sure the subprocess can't
+                # muck up these stream for mercurial.
+                # Connect all the streams to be more close to Windows behavior
+                # and pager will wait for scripts to end if we don't do that
+                nullrfd = open(os.devnull, 'r')
+                nullwfd = open(os.devnull, 'w')
                 subprocess.Popen(
-                    script, shell=True, stdin=open(os.devnull, 'r'), env=env,
+                    procutil.tonativestr(script),
+                    shell=True, stdin=nullrfd,
+                    stdout=nullwfd, stderr=nullwfd,
+                    env=procutil.tonativeenv(env),
                     close_fds=True, **newsession)
             finally:
                 # mission accomplished, this child needs to exit and not
@@ -102,10 +113,8 @@
                     # try to format the log message given the remaining
                     # arguments
                     try:
-                        # Python string formatting with % either uses a
-                        # dictionary *or* tuple, but not both. If we have
-                        # keyword options, assume we need a mapping.
-                        formatted = msg[0] % (opts or msg[1:])
+                        # Format the message as blackbox does
+                        formatted = msg[0] % msg[1:]
                     except (TypeError, KeyError):
                         # Failed to apply the arguments, ignore
                         formatted = msg[0]
@@ -121,7 +130,7 @@
                 optpairs = (
                     ('OPT_{0}'.format(key.upper()), str(value))
                     for key, value in opts.iteritems())
-                env = dict(itertools.chain(encoding.environ.items(),
+                env = dict(itertools.chain(procutil.shellenviron().items(),
                                            msgpairs, optpairs),
                            EVENT=event, HGPID=str(os.getpid()))
                 runshellcommand(script, env)
--- a/hgext/mq.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/mq.py	Mon Oct 22 14:46:06 2018 -0400
@@ -414,7 +414,7 @@
         the field and a blank line.'''
         if self.message:
             subj = 'subject: ' + self.message[0].lower()
-            for i in xrange(len(self.comments)):
+            for i in pycompat.xrange(len(self.comments)):
                 if subj == self.comments[i].lower():
                     del self.comments[i]
                     self.message = self.message[2:]
@@ -662,13 +662,13 @@
         exactneg = [g for g in patchguards
                     if g.startswith('-') and g[1:] in guards]
         if exactneg:
-            return False, pycompat.byterepr(exactneg[0])
+            return False, stringutil.pprint(exactneg[0])
         pos = [g for g in patchguards if g.startswith('+')]
         exactpos = [g for g in pos if g[1:] in guards]
         if pos:
             if exactpos:
-                return True, pycompat.byterepr(exactpos[0])
-            return False, ' '.join([pycompat.byterepr(p) for p in pos])
+                return True, stringutil.pprint(exactpos[0])
+            return False, ' '.join([stringutil.pprint(p) for p in pos])
         return True, ''
 
     def explainpushable(self, idx, all_patches=False):
@@ -980,10 +980,10 @@
                 files += mergedsubstate.keys()
 
             match = scmutil.matchfiles(repo, files or [])
-            oldtip = repo['tip']
+            oldtip = repo.changelog.tip()
             n = newcommit(repo, None, message, ph.user, ph.date, match=match,
                           force=True)
-            if repo['tip'] == oldtip:
+            if repo.changelog.tip() == oldtip:
                 raise error.Abort(_("qpush exactly duplicates child changeset"))
             if n is None:
                 raise error.Abort(_("repository commit failed"))
@@ -1800,7 +1800,7 @@
                 # if the patch excludes a modified file, mark that
                 # file with mtime=0 so status can see it.
                 mm = []
-                for i in xrange(len(m) - 1, -1, -1):
+                for i in pycompat.xrange(len(m) - 1, -1, -1):
                     if not match1(m[i]):
                         mm.append(m[i])
                         del m[i]
@@ -1908,7 +1908,7 @@
         else:
             start = self.series.index(patch) + 1
         unapplied = []
-        for i in xrange(start, len(self.series)):
+        for i in pycompat.xrange(start, len(self.series)):
             pushable, reason = self.pushable(i)
             if pushable:
                 unapplied.append((i, self.series[i]))
@@ -1946,7 +1946,7 @@
         if not missing:
             if self.ui.verbose:
                 idxwidth = len("%d" % (start + length - 1))
-            for i in xrange(start, start + length):
+            for i in pycompat.xrange(start, start + length):
                 patch = self.series[i]
                 if patch in applied:
                     char, state = 'A', 'applied'
@@ -2091,7 +2091,7 @@
         def nextpatch(start):
             if all_patches or start >= len(self.series):
                 return start
-            for i in xrange(start, len(self.series)):
+            for i in pycompat.xrange(start, len(self.series)):
                 p, reason = self.pushable(i)
                 if p:
                     return i
@@ -2266,7 +2266,8 @@
          [('k', 'keep', None, _('keep patch file')),
           ('r', 'rev', [],
            _('stop managing a revision (DEPRECATED)'), _('REV'))],
-         _('hg qdelete [-k] [PATCH]...'))
+         _('hg qdelete [-k] [PATCH]...'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def delete(ui, repo, *patches, **opts):
     """remove patches from queue
 
@@ -2284,7 +2285,8 @@
 @command("qapplied",
          [('1', 'last', None, _('show only the preceding applied patch'))
           ] + seriesopts,
-         _('hg qapplied [-1] [-s] [PATCH]'))
+         _('hg qapplied [-1] [-s] [PATCH]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def applied(ui, repo, patch=None, **opts):
     """print the patches already applied
 
@@ -2318,7 +2320,8 @@
 
 @command("qunapplied",
          [('1', 'first', None, _('show only the first patch'))] + seriesopts,
-         _('hg qunapplied [-1] [-s] [PATCH]'))
+         _('hg qunapplied [-1] [-s] [PATCH]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def unapplied(ui, repo, patch=None, **opts):
     """print the patches not yet applied
 
@@ -2353,7 +2356,8 @@
            _('place existing revisions under mq control'), _('REV')),
           ('g', 'git', None, _('use git extended diff format')),
           ('P', 'push', None, _('qpush after importing'))],
-         _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
+         _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
+         helpcategory=command.CATEGORY_IMPORT_EXPORT)
 def qimport(ui, repo, *filename, **opts):
     """import a patch or existing changeset
 
@@ -2429,9 +2433,11 @@
         commands.add(ui, r)
     return 0
 
-@command("^qinit",
+@command("qinit",
          [('c', 'create-repo', None, _('create queue repository'))],
-         _('hg qinit [-c]'))
+         _('hg qinit [-c]'),
+         helpcategory=command.CATEGORY_REPO_CREATION,
+         helpbasic=True)
 def init(ui, repo, **opts):
     """init a new queue repository (DEPRECATED)
 
@@ -2455,6 +2461,7 @@
            _('location of source patch repository'), _('REPO')),
          ] + cmdutil.remoteopts,
          _('hg qclone [OPTION]... SOURCE [DEST]'),
+         helpcategory=command.CATEGORY_REPO_CREATION,
          norepo=True)
 def clone(ui, source, dest=None, **opts):
     '''clone main and patch repository at same time
@@ -2534,8 +2541,9 @@
             hg.update(repo, repo.changelog.tip())
 
 @command("qcommit|qci",
-         commands.table["^commit|ci"][1],
+         commands.table["commit|ci"][1],
          _('hg qcommit [OPTION]... [FILE]...'),
+         helpcategory=command.CATEGORY_COMMITTING,
          inferrepo=True)
 def commit(ui, repo, *pats, **opts):
     """commit changes in the queue repository (DEPRECATED)
@@ -2550,7 +2558,8 @@
 @command("qseries",
          [('m', 'missing', None, _('print patches not in series')),
          ] + seriesopts,
-          _('hg qseries [-ms]'))
+          _('hg qseries [-ms]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def series(ui, repo, **opts):
     """print the entire series file
 
@@ -2559,7 +2568,8 @@
                     summary=opts.get(r'summary'))
     return 0
 
-@command("qtop", seriesopts, _('hg qtop [-s]'))
+@command("qtop", seriesopts, _('hg qtop [-s]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def top(ui, repo, **opts):
     """print the name of the current patch
 
@@ -2577,7 +2587,8 @@
         ui.write(_("no patches applied\n"))
         return 1
 
-@command("qnext", seriesopts, _('hg qnext [-s]'))
+@command("qnext", seriesopts, _('hg qnext [-s]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def next(ui, repo, **opts):
     """print the name of the next pushable patch
 
@@ -2589,7 +2600,8 @@
         return 1
     q.qseries(repo, start=end, length=1, summary=opts.get(r'summary'))
 
-@command("qprev", seriesopts, _('hg qprev [-s]'))
+@command("qprev", seriesopts, _('hg qprev [-s]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def prev(ui, repo, **opts):
     """print the name of the preceding applied patch
 
@@ -2612,7 +2624,7 @@
     if not opts.get('date') and opts.get('currentdate'):
         opts['date'] = "%d %d" % dateutil.makedate()
 
-@command("^qnew",
+@command("qnew",
          [('e', 'edit', None, _('invoke editor on commit messages')),
           ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
           ('g', 'git', None, _('use git extended diff format')),
@@ -2624,6 +2636,7 @@
            _('add "Date: <DATE>" to patch'), _('DATE'))
           ] + cmdutil.walkopts + cmdutil.commitopts,
          _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
+         helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
          inferrepo=True)
 def new(ui, repo, patch, *args, **opts):
     """create a new patch
@@ -2659,7 +2672,7 @@
     q.savedirty()
     return 0
 
-@command("^qrefresh",
+@command("qrefresh",
          [('e', 'edit', None, _('invoke editor on commit messages')),
           ('g', 'git', None, _('use git extended diff format')),
           ('s', 'short', None,
@@ -2674,6 +2687,7 @@
            _('add/update date field in patch with given date'), _('DATE'))
           ] + cmdutil.walkopts + cmdutil.commitopts,
          _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
+         helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
          inferrepo=True)
 def refresh(ui, repo, *pats, **opts):
     """update the current patch
@@ -2705,9 +2719,10 @@
         q.savedirty()
         return ret
 
-@command("^qdiff",
+@command("qdiff",
          cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
          _('hg qdiff [OPTION]... [FILE]...'),
+         helpcategory=command.CATEGORY_FILE_CONTENTS, helpbasic=True,
          inferrepo=True)
 def diff(ui, repo, *pats, **opts):
     """diff of the current patch and subsequent modifications
@@ -2732,7 +2747,8 @@
          [('e', 'edit', None, _('invoke editor on commit messages')),
           ('k', 'keep', None, _('keep folded patch files')),
          ] + cmdutil.commitopts,
-         _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
+         _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
+         helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
 def fold(ui, repo, *files, **opts):
     """fold the named patches into the current patch
 
@@ -2801,7 +2817,8 @@
            _('tolerate non-conflicting local changes')),
           ('f', 'force', None, _('overwrite any local changes')),
           ('', 'no-backup', None, _('do not save backup copies of files'))],
-         _('hg qgoto [OPTION]... PATCH'))
+         _('hg qgoto [OPTION]... PATCH'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def goto(ui, repo, patch, **opts):
     '''push or pop patches until named patch is at top of stack
 
@@ -2824,7 +2841,8 @@
 @command("qguard",
          [('l', 'list', None, _('list all patches and guards')),
           ('n', 'none', None, _('drop all guards'))],
-         _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
+         _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def guard(ui, repo, *args, **opts):
     '''set or print guards for a patch
 
@@ -2876,7 +2894,7 @@
         if args or opts.get(r'none'):
             raise error.Abort(_('cannot mix -l/--list with options or '
                                'arguments'))
-        for i in xrange(len(q.series)):
+        for i in pycompat.xrange(len(q.series)):
             status(i)
         return
     if not args or args[0][0:1] in '-+':
@@ -2896,7 +2914,8 @@
     else:
         status(q.series.index(q.lookup(patch)))
 
-@command("qheader", [], _('hg qheader [PATCH]'))
+@command("qheader", [], _('hg qheader [PATCH]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def header(ui, repo, patch=None):
     """print the header of the topmost or specified patch
 
@@ -2938,7 +2957,7 @@
     newpath = path + ".%d" % (index + 1)
     return newpath
 
-@command("^qpush",
+@command("qpush",
          [('', 'keep-changes', None,
            _('tolerate non-conflicting local changes')),
           ('f', 'force', None, _('apply on top of local changes')),
@@ -2952,7 +2971,9 @@
           ('', 'move', None,
            _('reorder patch series and apply only the patch')),
           ('', 'no-backup', None, _('do not save backup copies of files'))],
-         _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
+         _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+         helpbasic=True)
 def push(ui, repo, patch=None, **opts):
     """push the next patch onto the stack
 
@@ -2984,7 +3005,7 @@
                  keepchanges=opts.get('keep_changes'))
     return ret
 
-@command("^qpop",
+@command("qpop",
          [('a', 'all', None, _('pop all patches')),
           ('n', 'name', '',
            _('queue name to pop (DEPRECATED)'), _('NAME')),
@@ -2992,7 +3013,9 @@
            _('tolerate non-conflicting local changes')),
           ('f', 'force', None, _('forget any local changes to patched files')),
           ('', 'no-backup', None, _('do not save backup copies of files'))],
-         _('hg qpop [-a] [-f] [PATCH | INDEX]'))
+         _('hg qpop [-a] [-f] [PATCH | INDEX]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+         helpbasic=True)
 def pop(ui, repo, patch=None, **opts):
     """pop the current patch off the stack
 
@@ -3022,7 +3045,8 @@
     q.savedirty()
     return ret
 
-@command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
+@command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def rename(ui, repo, patch, name=None, **opts):
     """rename a patch
 
@@ -3080,7 +3104,8 @@
 @command("qrestore",
          [('d', 'delete', None, _('delete save entry')),
           ('u', 'update', None, _('update queue working directory'))],
-         _('hg qrestore [-d] [-u] REV'))
+         _('hg qrestore [-d] [-u] REV'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def restore(ui, repo, rev, **opts):
     """restore the queue state saved by a revision (DEPRECATED)
 
@@ -3098,7 +3123,8 @@
            _('copy directory name'), _('NAME')),
           ('e', 'empty', None, _('clear queue status file')),
           ('f', 'force', None, _('force copy'))] + cmdutil.commitopts,
-         _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
+         _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def save(ui, repo, **opts):
     """save current queue state (DEPRECATED)
 
@@ -3137,7 +3163,8 @@
           ('s', 'series', None, _('list all guards in series file')),
           ('', 'pop', None, _('pop to before first guarded applied patch')),
           ('', 'reapply', None, _('pop, then reapply patches'))],
-         _('hg qselect [OPTION]... [GUARD]...'))
+         _('hg qselect [OPTION]... [GUARD]...'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def select(ui, repo, *args, **opts):
     '''set or print guarded patches to push
 
@@ -3179,14 +3206,16 @@
     pushable = lambda i: q.pushable(q.applied[i].name)[0]
     if args or opts.get('none'):
         old_unapplied = q.unapplied(repo)
-        old_guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
+        old_guarded = [i for i in pycompat.xrange(len(q.applied))
+                       if not pushable(i)]
         q.setactive(args)
         q.savedirty()
         if not args:
             ui.status(_('guards deactivated\n'))
         if not opts.get('pop') and not opts.get('reapply'):
             unapplied = q.unapplied(repo)
-            guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
+            guarded = [i for i in pycompat.xrange(len(q.applied))
+                       if not pushable(i)]
             if len(unapplied) != len(old_unapplied):
                 ui.status(_('number of unguarded, unapplied patches has '
                             'changed from %d to %d\n') %
@@ -3225,7 +3254,7 @@
     reapply = opts.get('reapply') and q.applied and q.applied[-1].name
     popped = False
     if opts.get('pop') or opts.get('reapply'):
-        for i in xrange(len(q.applied)):
+        for i in pycompat.xrange(len(q.applied)):
             if not pushable(i):
                 ui.status(_('popping guarded patches\n'))
                 popped = True
@@ -3244,7 +3273,8 @@
 
 @command("qfinish",
          [('a', 'applied', None, _('finish all applied changesets'))],
-         _('hg qfinish [-a] [REV]...'))
+         _('hg qfinish [-a] [REV]...'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def finish(ui, repo, *revrange, **opts):
     """move applied patches into repository history
 
@@ -3292,7 +3322,8 @@
           ('', 'delete', False, _('delete reference to queue')),
           ('', 'purge', False, _('delete queue, and remove patch dir')),
          ],
-         _('[OPTION] [QUEUE]'))
+         _('[OPTION] [QUEUE]'),
+         helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def qqueue(ui, repo, name=None, **opts):
     '''manage multiple patch queues
 
@@ -3480,9 +3511,9 @@
 
         def invalidateall(self):
             super(mqrepo, self).invalidateall()
-            if localrepo.hasunfilteredcache(self, 'mq'):
+            if localrepo.hasunfilteredcache(self, r'mq'):
                 # recreate mq in case queue path was changed
-                delattr(self.unfiltered(), 'mq')
+                delattr(self.unfiltered(), r'mq')
 
         def abortifwdirpatched(self, errmsg, force=False):
             if self.mq.applied and self.mq.checkapplied and not force:
@@ -3583,7 +3614,7 @@
             raise error.Abort(_('only a local queue repository '
                                'may be initialized'))
     else:
-        repopath = cmdutil.findrepo(pycompat.getcwd())
+        repopath = cmdutil.findrepo(encoding.getcwd())
         if not repopath:
             raise error.Abort(_('there is no Mercurial repository here '
                                '(.hg not found)'))
--- a/hgext/narrow/TODO.rst	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/narrow/TODO.rst	Mon Oct 22 14:46:06 2018 -0400
@@ -1,37 +1,24 @@
 Integration with the share extension needs improvement. Right now
-we've seen some odd bugs, and the way we modify the contents of the
-.hg/shared file is unfortunate. See wrappostshare() and unsharenarrowspec().
+we've seen some odd bugs.
 
-Resolve commentary on narrowrepo.wraprepo.narrowrepository.status
-about the filtering of status being done at an awkward layer. This
-came up the import to hgext, but nobody's got concrete improvement
-ideas as of then.
-
-Fold most (or preferably all) of narrowrevlog.py into core.
-
-Address commentary in narrowrevlog.excludedmanifestrevlog.add -
+Address commentary in manifest.excludedmanifestrevlog.add -
 specifically we should improve the collaboration with core so that
 add() never gets called on an excluded directory and we can improve
 the stand-in to raise a ProgrammingError.
 
-Figure out how to correctly produce narrowmanifestrevlog and
-narrowfilelog instances instead of monkeypatching regular revlogs at
-runtime to our subclass. Even better, merge the narrowing logic
-directly into core.
-
 Reason more completely about rename-filtering logic in
 narrowfilelog. There could be some surprises lurking there.
 
-Formally document the narrowspec format. Unify with sparse, if at all
-possible. For bonus points, unify with the server-specified narrowspec
-format.
+Formally document the narrowspec format. For bonus points, unify with the
+server-specified narrowspec format.
 
 narrowrepo.setnarrowpats() or narrowspec.save() need to make sure
 they're holding the wlock.
 
-Implement a simple version of the expandnarrow wireproto command for
-core. Having configurable shorthands for narrowspecs has been useful
-at Google (and sparse has a similar feature from Facebook), so it
-probably makes sense to implement the feature in core. (Google's
-handler is entirely custom to Google, with a custom format related to
-bazel's build language, so it's not in the narrowhg distribution.)
+The follinwg places do an unrestricted dirstate walk (including files outside the
+narrowspec). Some of them should perhaps not do that.
+
+ * debugfileset
+ * perfwalk
+ * sparse (but restricted to sparse config)
+ * largefiles
--- a/hgext/narrow/__init__.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/narrow/__init__.py	Mon Oct 22 14:46:06 2018 -0400
@@ -15,22 +15,15 @@
 testedwith = 'ships-with-hg-core'
 
 from mercurial import (
-    changegroup,
-    extensions,
-    hg,
     localrepo,
     registrar,
-    verify as verifymod,
+    repository,
 )
 
 from . import (
     narrowbundle2,
-    narrowchangegroup,
     narrowcommands,
-    narrowcopies,
-    narrowpatch,
     narrowrepo,
-    narrowrevlog,
     narrowtemplates,
     narrowwirepeer,
 )
@@ -55,15 +48,13 @@
 cmdtable = narrowcommands.table
 
 def featuresetup(ui, features):
-    features.add(changegroup.NARROW_REQUIREMENT)
+    features.add(repository.NARROW_REQUIREMENT)
 
 def uisetup(ui):
     """Wraps user-facing mercurial commands with narrow-aware versions."""
     localrepo.featuresetupfuncs.add(featuresetup)
-    narrowrevlog.setup()
     narrowbundle2.setup()
     narrowcommands.setup()
-    narrowchangegroup.setup()
     narrowwirepeer.uisetup()
 
 def reposetup(ui, repo):
@@ -71,23 +62,10 @@
     if not repo.local():
         return
 
-    if changegroup.NARROW_REQUIREMENT in repo.requirements:
+    repo.ui.setconfig('experimental', 'narrow', True, 'narrow-ext')
+    if repository.NARROW_REQUIREMENT in repo.requirements:
         narrowrepo.wraprepo(repo)
-        narrowcopies.setup(repo)
-        narrowpatch.setup(repo)
         narrowwirepeer.reposetup(repo)
 
-def _verifierinit(orig, self, repo, matcher=None):
-    # The verifier's matcher argument was desgined for narrowhg, so it should
-    # be None from core. If another extension passes a matcher (unlikely),
-    # we'll have to fail until matchers can be composed more easily.
-    assert matcher is None
-    orig(self, repo, repo.narrowmatch())
-
-def extsetup(ui):
-    extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
-    extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare)
-    extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec)
-
 templatekeyword = narrowtemplates.templatekeyword
 revsetpredicate = narrowtemplates.revsetpredicate
--- a/hgext/narrow/narrowbundle2.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/narrow/narrowbundle2.py	Mon Oct 22 14:46:06 2018 -0400
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import collections
 import errno
 import struct
 
@@ -15,17 +14,16 @@
 from mercurial.node import (
     bin,
     nullid,
-    nullrev,
 )
 from mercurial import (
     bundle2,
     changegroup,
-    dagutil,
     error,
     exchange,
     extensions,
     narrowspec,
     repair,
+    repository,
     util,
     wireprototypes,
 )
@@ -52,171 +50,12 @@
     caps[NARROWCAP] = ['v0']
     return caps
 
-def _computeellipsis(repo, common, heads, known, match, depth=None):
-    """Compute the shape of a narrowed DAG.
-
-    Args:
-      repo: The repository we're transferring.
-      common: The roots of the DAG range we're transferring.
-              May be just [nullid], which means all ancestors of heads.
-      heads: The heads of the DAG range we're transferring.
-      match: The narrowmatcher that allows us to identify relevant changes.
-      depth: If not None, only consider nodes to be full nodes if they are at
-             most depth changesets away from one of heads.
-
-    Returns:
-      A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
-
-        visitnodes: The list of nodes (either full or ellipsis) which
-                    need to be sent to the client.
-        relevant_nodes: The set of changelog nodes which change a file inside
-                 the narrowspec. The client needs these as non-ellipsis nodes.
-        ellipsisroots: A dict of {rev: parents} that is used in
-                       narrowchangegroup to produce ellipsis nodes with the
-                       correct parents.
-    """
-    cl = repo.changelog
-    mfl = repo.manifestlog
-
-    cldag = dagutil.revlogdag(cl)
-    # dagutil does not like nullid/nullrev
-    commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
-    headsrevs = cldag.internalizeall(heads)
-    if depth:
-        revdepth = {h: 0 for h in headsrevs}
-
-    ellipsisheads = collections.defaultdict(set)
-    ellipsisroots = collections.defaultdict(set)
-
-    def addroot(head, curchange):
-        """Add a root to an ellipsis head, splitting heads with 3 roots."""
-        ellipsisroots[head].add(curchange)
-        # Recursively split ellipsis heads with 3 roots by finding the
-        # roots' youngest common descendant which is an elided merge commit.
-        # That descendant takes 2 of the 3 roots as its own, and becomes a
-        # root of the head.
-        while len(ellipsisroots[head]) > 2:
-            child, roots = splithead(head)
-            splitroots(head, child, roots)
-            head = child  # Recurse in case we just added a 3rd root
-
-    def splitroots(head, child, roots):
-        ellipsisroots[head].difference_update(roots)
-        ellipsisroots[head].add(child)
-        ellipsisroots[child].update(roots)
-        ellipsisroots[child].discard(child)
-
-    def splithead(head):
-        r1, r2, r3 = sorted(ellipsisroots[head])
-        for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
-            mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
-                            nr1, head, nr2, head)
-            for j in mid:
-                if j == nr2:
-                    return nr2, (nr1, nr2)
-                if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
-                    return j, (nr1, nr2)
-        raise error.Abort('Failed to split up ellipsis node! head: %d, '
-                          'roots: %d %d %d' % (head, r1, r2, r3))
-
-    missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
-    visit = reversed(missing)
-    relevant_nodes = set()
-    visitnodes = [cl.node(m) for m in missing]
-    required = set(headsrevs) | known
-    for rev in visit:
-        clrev = cl.changelogrevision(rev)
-        ps = cldag.parents(rev)
-        if depth is not None:
-            curdepth = revdepth[rev]
-            for p in ps:
-                revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
-        needed = False
-        shallow_enough = depth is None or revdepth[rev] <= depth
-        if shallow_enough:
-            curmf = mfl[clrev.manifest].read()
-            if ps:
-                # We choose to not trust the changed files list in
-                # changesets because it's not always correct. TODO: could
-                # we trust it for the non-merge case?
-                p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
-                needed = bool(curmf.diff(p1mf, match))
-                if not needed and len(ps) > 1:
-                    # For merge changes, the list of changed files is not
-                    # helpful, since we need to emit the merge if a file
-                    # in the narrow spec has changed on either side of the
-                    # merge. As a result, we do a manifest diff to check.
-                    p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
-                    needed = bool(curmf.diff(p2mf, match))
-            else:
-                # For a root node, we need to include the node if any
-                # files in the node match the narrowspec.
-                needed = any(curmf.walk(match))
-
-        if needed:
-            for head in ellipsisheads[rev]:
-                addroot(head, rev)
-            for p in ps:
-                required.add(p)
-            relevant_nodes.add(cl.node(rev))
-        else:
-            if not ps:
-                ps = [nullrev]
-            if rev in required:
-                for head in ellipsisheads[rev]:
-                    addroot(head, rev)
-                for p in ps:
-                    ellipsisheads[p].add(rev)
-            else:
-                for p in ps:
-                    ellipsisheads[p] |= ellipsisheads[rev]
-
-    # add common changesets as roots of their reachable ellipsis heads
-    for c in commonrevs:
-        for head in ellipsisheads[c]:
-            addroot(head, c)
-    return visitnodes, relevant_nodes, ellipsisroots
-
-def _packellipsischangegroup(repo, common, match, relevant_nodes,
-                             ellipsisroots, visitnodes, depth, source, version):
-    if version in ('01', '02'):
-        raise error.Abort(
-            'ellipsis nodes require at least cg3 on client and server, '
-            'but negotiated version %s' % version)
-    # We wrap cg1packer.revchunk, using a side channel to pass
-    # relevant_nodes into that area. Then if linknode isn't in the
-    # set, we know we have an ellipsis node and we should defer
-    # sending that node's data. We override close() to detect
-    # pending ellipsis nodes and flush them.
-    packer = changegroup.getbundler(version, repo)
-    # Let the packer have access to the narrow matcher so it can
-    # omit filelogs and dirlogs as needed
-    packer._narrow_matcher = lambda : match
-    # Give the packer the list of nodes which should not be
-    # ellipsis nodes. We store this rather than the set of nodes
-    # that should be an ellipsis because for very large histories
-    # we expect this to be significantly smaller.
-    packer.full_nodes = relevant_nodes
-    # Maps ellipsis revs to their roots at the changelog level.
-    packer.precomputed_ellipsis = ellipsisroots
-    # Maps CL revs to per-revlog revisions. Cleared in close() at
-    # the end of each group.
-    packer.clrev_to_localrev = {}
-    packer.next_clrev_to_localrev = {}
-    # Maps changelog nodes to changelog revs. Filled in once
-    # during changelog stage and then left unmodified.
-    packer.clnode_to_rev = {}
-    packer.changelog_done = False
-    # If true, informs the packer that it is serving shallow content and might
-    # need to pack file contents not introduced by the changes being packed.
-    packer.is_shallow = depth is not None
-
-    return packer.generate(common, visitnodes, False, source)
-
 # Serve a changegroup for a client with a narrow clone.
 def getbundlechangegrouppart_narrow(bundler, repo, source,
                                     bundlecaps=None, b2caps=None, heads=None,
                                     common=None, **kwargs):
+    assert repo.ui.configbool('experimental', 'narrowservebrokenellipses')
+
     cgversions = b2caps.get('changegroup')
     if cgversions:  # 3.1 and 3.2 ship with an empty value
         cgversions = [v for v in cgversions
@@ -231,32 +70,6 @@
     include = sorted(filter(bool, kwargs.get(r'includepats', [])))
     exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
     newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
-    if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
-        outgoing = exchange._computeoutgoing(repo, heads, common)
-        if not outgoing.missing:
-            return
-        def wrappedgetbundler(orig, *args, **kwargs):
-            bundler = orig(*args, **kwargs)
-            bundler._narrow_matcher = lambda : newmatch
-            return bundler
-        with extensions.wrappedfunction(changegroup, 'getbundler',
-                                        wrappedgetbundler):
-            cg = changegroup.makestream(repo, outgoing, version, source)
-        part = bundler.newpart('changegroup', data=cg)
-        part.addparam('version', version)
-        if 'treemanifest' in repo.requirements:
-            part.addparam('treemanifest', '1')
-
-        if include or exclude:
-            narrowspecpart = bundler.newpart(_SPECPART)
-            if include:
-                narrowspecpart.addparam(
-                    _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
-            if exclude:
-                narrowspecpart.addparam(
-                    _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
-
-        return
 
     depth = kwargs.get(r'depth', None)
     if depth is not None:
@@ -300,72 +113,49 @@
                 yield repo.changelog.node(r)
             yield _DONESIGNAL
         bundler.newpart(_CHANGESPECPART, data=genkills())
-        newvisit, newfull, newellipsis = _computeellipsis(
+        newvisit, newfull, newellipsis = exchange._computeellipsis(
             repo, set(), common, known, newmatch)
         if newvisit:
-            cg = _packellipsischangegroup(
-                repo, common, newmatch, newfull, newellipsis,
-                newvisit, depth, source, version)
-            part = bundler.newpart('changegroup', data=cg)
+            packer = changegroup.getbundler(version, repo,
+                                            matcher=newmatch,
+                                            ellipses=True,
+                                            shallow=depth is not None,
+                                            ellipsisroots=newellipsis,
+                                            fullnodes=newfull)
+            cgdata = packer.generate(common, newvisit, False, 'narrow_widen')
+
+            part = bundler.newpart('changegroup', data=cgdata)
             part.addparam('version', version)
             if 'treemanifest' in repo.requirements:
                 part.addparam('treemanifest', '1')
 
-    visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
+    visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
         repo, common, heads, set(), newmatch, depth=depth)
 
     repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
     if visitnodes:
-        cg = _packellipsischangegroup(
-            repo, common, newmatch, relevant_nodes, ellipsisroots,
-            visitnodes, depth, source, version)
-        part = bundler.newpart('changegroup', data=cg)
+        packer = changegroup.getbundler(version, repo,
+                                        matcher=newmatch,
+                                        ellipses=True,
+                                        shallow=depth is not None,
+                                        ellipsisroots=ellipsisroots,
+                                        fullnodes=relevant_nodes)
+        cgdata = packer.generate(common, visitnodes, False, 'narrow_widen')
+
+        part = bundler.newpart('changegroup', data=cgdata)
         part.addparam('version', version)
         if 'treemanifest' in repo.requirements:
             part.addparam('treemanifest', '1')
 
-def applyacl_narrow(repo, kwargs):
-    ui = repo.ui
-    username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
-    user_includes = ui.configlist(
-        _NARROWACL_SECTION, username + '.includes',
-        ui.configlist(_NARROWACL_SECTION, 'default.includes'))
-    user_excludes = ui.configlist(
-        _NARROWACL_SECTION, username + '.excludes',
-        ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
-    if not user_includes:
-        raise error.Abort(_("{} configuration for user {} is empty")
-                          .format(_NARROWACL_SECTION, username))
-
-    user_includes = [
-        'path:.' if p == '*' else 'path:' + p for p in user_includes]
-    user_excludes = [
-        'path:.' if p == '*' else 'path:' + p for p in user_excludes]
-
-    req_includes = set(kwargs.get(r'includepats', []))
-    req_excludes = set(kwargs.get(r'excludepats', []))
-
-    req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
-        req_includes, req_excludes, user_includes, user_excludes)
-
-    if invalid_includes:
-        raise error.Abort(
-            _("The following includes are not accessible for {}: {}")
-            .format(username, invalid_includes))
-
-    new_args = {}
-    new_args.update(kwargs)
-    new_args['includepats'] = req_includes
-    if req_excludes:
-        new_args['excludepats'] = req_excludes
-    return new_args
-
 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
 def _handlechangespec_2(op, inpart):
     includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
     excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
-    if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
-        op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
+    narrowspec.validatepatterns(includepats)
+    narrowspec.validatepatterns(excludepats)
+
+    if not repository.NARROW_REQUIREMENT in op.repo.requirements:
+        op.repo.requirements.add(repository.NARROW_REQUIREMENT)
         op.repo._writerequirements()
     op.repo.setnarrowpats(includepats, excludepats)
 
@@ -479,27 +269,15 @@
     def wrappedcgfn(*args, **kwargs):
         repo = args[1]
         if repo.ui.has_section(_NARROWACL_SECTION):
-            getbundlechangegrouppart_narrow(
-                *args, **applyacl_narrow(repo, kwargs))
-        elif kwargs.get(r'narrow', False):
+            kwargs = exchange.applynarrowacl(repo, kwargs)
+
+        if (kwargs.get(r'narrow', False) and
+            repo.ui.configbool('experimental', 'narrowservebrokenellipses')):
             getbundlechangegrouppart_narrow(*args, **kwargs)
         else:
             origcgfn(*args, **kwargs)
     exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
 
-    # disable rev branch cache exchange when serving a narrow bundle
-    # (currently incompatible with that part)
-    origrbcfn = exchange.getbundle2partsmapping['cache:rev-branch-cache']
-    def wrappedcgfn(*args, **kwargs):
-        repo = args[1]
-        if repo.ui.has_section(_NARROWACL_SECTION):
-            return
-        elif kwargs.get(r'narrow', False):
-            return
-        else:
-            origrbcfn(*args, **kwargs)
-    exchange.getbundle2partsmapping['cache:rev-branch-cache'] = wrappedcgfn
-
     # Extend changegroup receiver so client can fixup after widen requests.
     origcghandler = bundle2.parthandlermapping['changegroup']
     def wrappedcghandler(op, inpart):
--- a/hgext/narrow/narrowchangegroup.py	Wed Oct 10 12:25:28 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,373 +0,0 @@
-# narrowchangegroup.py - narrow clone changegroup creation and consumption
-#
-# Copyright 2017 Google, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-from mercurial.i18n import _
-from mercurial import (
-    changegroup,
-    error,
-    extensions,
-    manifest,
-    match as matchmod,
-    mdiff,
-    node,
-    revlog,
-    util,
-)
-
-def setup():
-
-    def _cgmatcher(cgpacker):
-        localmatcher = cgpacker._repo.narrowmatch()
-        remotematcher = getattr(cgpacker, '_narrow_matcher', lambda: None)()
-        if remotematcher:
-            return matchmod.intersectmatchers(localmatcher, remotematcher)
-        else:
-            return localmatcher
-
-    def prune(orig, self, revlog, missing, commonrevs):
-        if isinstance(revlog, manifest.manifestrevlog):
-            matcher = _cgmatcher(self)
-            if (matcher and
-                not matcher.visitdir(revlog._dir[:-1] or '.')):
-                return []
-        return orig(self, revlog, missing, commonrevs)
-
-    extensions.wrapfunction(changegroup.cg1packer, 'prune', prune)
-
-    def generatefiles(orig, self, changedfiles, linknodes, commonrevs,
-                      source):
-        matcher = _cgmatcher(self)
-        if matcher:
-            changedfiles = list(filter(matcher, changedfiles))
-        if getattr(self, 'is_shallow', False):
-            # See comment in generate() for why this sadness is a thing.
-            mfdicts = self._mfdicts
-            del self._mfdicts
-            # In a shallow clone, the linknodes callback needs to also include
-            # those file nodes that are in the manifests we sent but weren't
-            # introduced by those manifests.
-            commonctxs = [self._repo[c] for c in commonrevs]
-            oldlinknodes = linknodes
-            clrev = self._repo.changelog.rev
-            def linknodes(flog, fname):
-                for c in commonctxs:
-                    try:
-                        fnode = c.filenode(fname)
-                        self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
-                    except error.ManifestLookupError:
-                        pass
-                links = oldlinknodes(flog, fname)
-                if len(links) != len(mfdicts):
-                    for mf, lr in mfdicts:
-                        fnode = mf.get(fname, None)
-                        if fnode in links:
-                            links[fnode] = min(links[fnode], lr, key=clrev)
-                        elif fnode:
-                            links[fnode] = lr
-                return links
-        return orig(self, changedfiles, linknodes, commonrevs, source)
-    extensions.wrapfunction(
-        changegroup.cg1packer, 'generatefiles', generatefiles)
-
-    def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode):
-        n = revlog_.node(rev)
-        p1n, p2n = revlog_.node(p1), revlog_.node(p2)
-        flags = revlog_.flags(rev)
-        flags |= revlog.REVIDX_ELLIPSIS
-        meta = packer.builddeltaheader(
-            n, p1n, p2n, node.nullid, linknode, flags)
-        # TODO: try and actually send deltas for ellipsis data blocks
-        diffheader = mdiff.trivialdiffheader(len(data))
-        l = len(meta) + len(diffheader) + len(data)
-        return ''.join((changegroup.chunkheader(l),
-                        meta,
-                        diffheader,
-                        data))
-
-    def close(orig, self):
-        getattr(self, 'clrev_to_localrev', {}).clear()
-        if getattr(self, 'next_clrev_to_localrev', {}):
-            self.clrev_to_localrev = self.next_clrev_to_localrev
-            del self.next_clrev_to_localrev
-        self.changelog_done = True
-        return orig(self)
-    extensions.wrapfunction(changegroup.cg1packer, 'close', close)
-
-    # In a perfect world, we'd generate better ellipsis-ified graphs
-    # for non-changelog revlogs. In practice, we haven't started doing
-    # that yet, so the resulting DAGs for the manifestlog and filelogs
-    # are actually full of bogus parentage on all the ellipsis
-    # nodes. This has the side effect that, while the contents are
-    # correct, the individual DAGs might be completely out of whack in
-    # a case like 882681bc3166 and its ancestors (back about 10
-    # revisions or so) in the main hg repo.
-    #
-    # The one invariant we *know* holds is that the new (potentially
-    # bogus) DAG shape will be valid if we order the nodes in the
-    # order that they're introduced in dramatis personae by the
-    # changelog, so what we do is we sort the non-changelog histories
-    # by the order in which they are used by the changelog.
-    def _sortgroup(orig, self, revlog, nodelist, lookup):
-        if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev:
-            return orig(self, revlog, nodelist, lookup)
-        key = lambda n: self.clnode_to_rev[lookup(n)]
-        return [revlog.rev(n) for n in sorted(nodelist, key=key)]
-
-    extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup)
-
-    def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source):
-        '''yield a sequence of changegroup chunks (strings)'''
-        # Note: other than delegating to orig, the only deviation in
-        # logic from normal hg's generate is marked with BEGIN/END
-        # NARROW HACK.
-        if not util.safehasattr(self, 'full_nodes'):
-            # not sending a narrow bundle
-            for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source):
-                yield x
-            return
-
-        repo = self._repo
-        cl = repo.changelog
-        mfl = repo.manifestlog
-        mfrevlog = mfl._revlog
-
-        clrevorder = {}
-        mfs = {} # needed manifests
-        fnodes = {} # needed file nodes
-        changedfiles = set()
-
-        # Callback for the changelog, used to collect changed files and manifest
-        # nodes.
-        # Returns the linkrev node (identity in the changelog case).
-        def lookupcl(x):
-            c = cl.read(x)
-            clrevorder[x] = len(clrevorder)
-            # BEGIN NARROW HACK
-            #
-            # Only update mfs if x is going to be sent. Otherwise we
-            # end up with bogus linkrevs specified for manifests and
-            # we skip some manifest nodes that we should otherwise
-            # have sent.
-            if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis:
-                n = c[0]
-                # record the first changeset introducing this manifest version
-                mfs.setdefault(n, x)
-                # Set this narrow-specific dict so we have the lowest manifest
-                # revnum to look up for this cl revnum. (Part of mapping
-                # changelog ellipsis parents to manifest ellipsis parents)
-                self.next_clrev_to_localrev.setdefault(cl.rev(x),
-                                                       mfrevlog.rev(n))
-            # We can't trust the changed files list in the changeset if the
-            # client requested a shallow clone.
-            if self.is_shallow:
-                changedfiles.update(mfl[c[0]].read().keys())
-            else:
-                changedfiles.update(c[3])
-            # END NARROW HACK
-            # Record a complete list of potentially-changed files in
-            # this manifest.
-            return x
-
-        self._verbosenote(_('uncompressed size of bundle content:\n'))
-        size = 0
-        for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
-            size += len(chunk)
-            yield chunk
-        self._verbosenote(_('%8.i (changelog)\n') % size)
-
-        # We need to make sure that the linkrev in the changegroup refers to
-        # the first changeset that introduced the manifest or file revision.
-        # The fastpath is usually safer than the slowpath, because the filelogs
-        # are walked in revlog order.
-        #
-        # When taking the slowpath with reorder=None and the manifest revlog
-        # uses generaldelta, the manifest may be walked in the "wrong" order.
-        # Without 'clrevorder', we would get an incorrect linkrev (see fix in
-        # cc0ff93d0c0c).
-        #
-        # When taking the fastpath, we are only vulnerable to reordering
-        # of the changelog itself. The changelog never uses generaldelta, so
-        # it is only reordered when reorder=True. To handle this case, we
-        # simply take the slowpath, which already has the 'clrevorder' logic.
-        # This was also fixed in cc0ff93d0c0c.
-        fastpathlinkrev = fastpathlinkrev and not self._reorder
-        # Treemanifests don't work correctly with fastpathlinkrev
-        # either, because we don't discover which directory nodes to
-        # send along with files. This could probably be fixed.
-        fastpathlinkrev = fastpathlinkrev and (
-            'treemanifest' not in repo.requirements)
-        # Shallow clones also don't work correctly with fastpathlinkrev
-        # because file nodes may need to be sent for a manifest even if they
-        # weren't introduced by that manifest.
-        fastpathlinkrev = fastpathlinkrev and not self.is_shallow
-
-        for chunk in self.generatemanifests(commonrevs, clrevorder,
-                fastpathlinkrev, mfs, fnodes, source):
-            yield chunk
-        # BEGIN NARROW HACK
-        mfdicts = None
-        if self.is_shallow:
-            mfdicts = [(self._repo.manifestlog[n].read(), lr)
-                       for (n, lr) in mfs.iteritems()]
-        # END NARROW HACK
-        mfs.clear()
-        clrevs = set(cl.rev(x) for x in clnodes)
-
-        if not fastpathlinkrev:
-            def linknodes(unused, fname):
-                return fnodes.get(fname, {})
-        else:
-            cln = cl.node
-            def linknodes(filerevlog, fname):
-                llr = filerevlog.linkrev
-                fln = filerevlog.node
-                revs = ((r, llr(r)) for r in filerevlog)
-                return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
-
-        # BEGIN NARROW HACK
-        #
-        # We need to pass the mfdicts variable down into
-        # generatefiles(), but more than one command might have
-        # wrapped generatefiles so we can't modify the function
-        # signature. Instead, we pass the data to ourselves using an
-        # instance attribute. I'm sorry.
-        self._mfdicts = mfdicts
-        # END NARROW HACK
-        for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
-                                        source):
-            yield chunk
-
-        yield self.close()
-
-        if clnodes:
-            repo.hook('outgoing', node=node.hex(clnodes[0]), source=source)
-    extensions.wrapfunction(changegroup.cg1packer, 'generate', generate)
-
-    def revchunk(orig, self, revlog, rev, prev, linknode):
-        if not util.safehasattr(self, 'full_nodes'):
-            # not sending a narrow changegroup
-            for x in orig(self, revlog, rev, prev, linknode):
-                yield x
-            return
-        # build up some mapping information that's useful later. See
-        # the local() nested function below.
-        if not self.changelog_done:
-            self.clnode_to_rev[linknode] = rev
-            linkrev = rev
-            self.clrev_to_localrev[linkrev] = rev
-        else:
-            linkrev = self.clnode_to_rev[linknode]
-            self.clrev_to_localrev[linkrev] = rev
-        # This is a node to send in full, because the changeset it
-        # corresponds to was a full changeset.
-        if linknode in self.full_nodes:
-            for x in orig(self, revlog, rev, prev, linknode):
-                yield x
-            return
-        # At this point, a node can either be one we should skip or an
-        # ellipsis. If it's not an ellipsis, bail immediately.
-        if linkrev not in self.precomputed_ellipsis:
-            return
-        linkparents = self.precomputed_ellipsis[linkrev]
-        def local(clrev):
-            """Turn a changelog revnum into a local revnum.
-
-            The ellipsis dag is stored as revnums on the changelog,
-            but when we're producing ellipsis entries for
-            non-changelog revlogs, we need to turn those numbers into
-            something local. This does that for us, and during the
-            changelog sending phase will also expand the stored
-            mappings as needed.
-            """
-            if clrev == node.nullrev:
-                return node.nullrev
-            if not self.changelog_done:
-                # If we're doing the changelog, it's possible that we
-                # have a parent that is already on the client, and we
-                # need to store some extra mapping information so that
-                # our contained ellipsis nodes will be able to resolve
-                # their parents.
-                if clrev not in self.clrev_to_localrev:
-                    clnode = revlog.node(clrev)
-                    self.clnode_to_rev[clnode] = clrev
-                return clrev
-            # Walk the ellipsis-ized changelog breadth-first looking for a
-            # change that has been linked from the current revlog.
-            #
-            # For a flat manifest revlog only a single step should be necessary
-            # as all relevant changelog entries are relevant to the flat
-            # manifest.
-            #
-            # For a filelog or tree manifest dirlog however not every changelog
-            # entry will have been relevant, so we need to skip some changelog
-            # nodes even after ellipsis-izing.
-            walk = [clrev]
-            while walk:
-                p = walk[0]
-                walk = walk[1:]
-                if p in self.clrev_to_localrev:
-                    return self.clrev_to_localrev[p]
-                elif p in self.full_nodes:
-                    walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
-                                    if pp != node.nullrev])
-                elif p in self.precomputed_ellipsis:
-                    walk.extend([pp for pp in self.precomputed_ellipsis[p]
-                                    if pp != node.nullrev])
-                else:
-                    # In this case, we've got an ellipsis with parents
-                    # outside the current bundle (likely an
-                    # incremental pull). We "know" that we can use the
-                    # value of this same revlog at whatever revision
-                    # is pointed to by linknode. "Know" is in scare
-                    # quotes because I haven't done enough examination
-                    # of edge cases to convince myself this is really
-                    # a fact - it works for all the (admittedly
-                    # thorough) cases in our testsuite, but I would be
-                    # somewhat unsurprised to find a case in the wild
-                    # where this breaks down a bit. That said, I don't
-                    # know if it would hurt anything.
-                    for i in xrange(rev, 0, -1):
-                        if revlog.linkrev(i) == clrev:
-                            return i
-                    # We failed to resolve a parent for this node, so
-                    # we crash the changegroup construction.
-                    raise error.Abort(
-                        'unable to resolve parent while packing %r %r'
-                        ' for changeset %r' % (revlog.indexfile, rev, clrev))
-            return node.nullrev
-
-        if not linkparents or (
-            revlog.parentrevs(rev) == (node.nullrev, node.nullrev)):
-            p1, p2 = node.nullrev, node.nullrev
-        elif len(linkparents) == 1:
-            p1, = sorted(local(p) for p in linkparents)
-            p2 = node.nullrev
-        else:
-            p1, p2 = sorted(local(p) for p in linkparents)
-        n = revlog.node(rev)
-        yield ellipsisdata(
-            self, rev, revlog, p1, p2, revlog.revision(n), linknode)
-    extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk)
-
-    def deltaparent(orig, self, revlog, rev, p1, p2, prev):
-        if util.safehasattr(self, 'full_nodes'):
-            # TODO: send better deltas when in narrow mode.
-            #
-            # changegroup.group() loops over revisions to send,
-            # including revisions we'll skip. What this means is that
-            # `prev` will be a potentially useless delta base for all
-            # ellipsis nodes, as the client likely won't have it. In
-            # the future we should do bookkeeping about which nodes
-            # have been sent to the client, and try to be
-            # significantly smarter about delta bases. This is
-            # slightly tricky because this same code has to work for
-            # all revlogs, and we don't have the linkrev/linknode here.
-            return p1
-        return orig(self, revlog, rev, p1, p2, prev)
-    extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
--- a/hgext/narrow/narrowcommands.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/narrow/narrowcommands.py	Mon Oct 22 14:46:06 2018 -0400
@@ -7,13 +7,15 @@
 from __future__ import absolute_import
 
 import itertools
+import os
 
 from mercurial.i18n import _
 from mercurial import (
-    changegroup,
+    bundle2,
     cmdutil,
     commands,
     discovery,
+    encoding,
     error,
     exchange,
     extensions,
@@ -24,12 +26,11 @@
     pycompat,
     registrar,
     repair,
+    repository,
     repoview,
+    sparse,
     util,
-)
-
-from . import (
-    narrowbundle2,
+    wireprototypes,
 )
 
 table = {}
@@ -43,6 +44,8 @@
                      _("create a narrow clone of select files")))
     entry[1].append(('', 'depth', '',
                      _("limit the history fetched by distance from heads")))
+    entry[1].append(('', 'narrowspec', '',
+                     _("read narrowspecs from file")))
     # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
     if 'sparse' not in extensions.enabled():
         entry[1].append(('', 'include', [],
@@ -57,41 +60,36 @@
 
     extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
 
-def expandpull(pullop, includepats, excludepats):
-    if not narrowspec.needsexpansion(includepats):
-        return includepats, excludepats
-
-    heads = pullop.heads or pullop.rheads
-    includepats, excludepats = pullop.remote.expandnarrow(
-        includepats, excludepats, heads)
-    pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % (
-        includepats, excludepats))
-    return set(includepats), set(excludepats)
-
 def clonenarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
     opts = pycompat.byteskwargs(opts)
     wrappedextraprepare = util.nullcontextmanager()
-    opts_narrow = opts['narrow']
-    if opts_narrow:
-        def pullbundle2extraprepare_widen(orig, pullop, kwargs):
-            # Create narrow spec patterns from clone flags
-            includepats = narrowspec.parsepatterns(opts['include'])
-            excludepats = narrowspec.parsepatterns(opts['exclude'])
+    narrowspecfile = opts['narrowspec']
+
+    if narrowspecfile:
+        filepath = os.path.join(encoding.getcwd(), narrowspecfile)
+        ui.status(_("reading narrowspec from '%s'\n") % filepath)
+        try:
+            fdata = util.readfile(filepath)
+        except IOError as inst:
+            raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
+                              (filepath, encoding.strtolocal(inst.strerror)))
 
-            # If necessary, ask the server to expand the narrowspec.
-            includepats, excludepats = expandpull(
-                pullop, includepats, excludepats)
+        includes, excludes, profiles = sparse.parseconfig(ui, fdata, 'narrow')
+        if profiles:
+            raise error.Abort(_("cannot specify other files using '%include' in"
+                                " narrowspec"))
+
+        narrowspec.validatepatterns(includes)
+        narrowspec.validatepatterns(excludes)
 
-            if not includepats and excludepats:
-                # If nothing was included, we assume the user meant to include
-                # everything, except what they asked to exclude.
-                includepats = {'path:.'}
+        # narrowspec is passed so we should assume that user wants narrow clone
+        opts['narrow'] = True
+        opts['include'].extend(includes)
+        opts['exclude'].extend(excludes)
 
-            pullop.repo.setnarrowpats(includepats, excludepats)
-
-            # This will populate 'includepats' etc with the values from the
-            # narrowspec we just saved.
+    if opts['narrow']:
+        def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             orig(pullop, kwargs)
 
             if opts.get('depth'):
@@ -99,22 +97,13 @@
         wrappedextraprepare = extensions.wrappedfunction(exchange,
             '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
 
-    def pullnarrow(orig, repo, *args, **kwargs):
-        if opts_narrow:
-            repo.requirements.add(changegroup.NARROW_REQUIREMENT)
-            repo._writerequirements()
-
-        return orig(repo, *args, **kwargs)
-
-    wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow)
-
-    with wrappedextraprepare, wrappedpull:
+    with wrappedextraprepare:
         return orig(ui, repo, *args, **pycompat.strkwargs(opts))
 
 def pullnarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps pull command to allow modifying narrow spec."""
     wrappedextraprepare = util.nullcontextmanager()
-    if changegroup.NARROW_REQUIREMENT in repo.requirements:
+    if repository.NARROW_REQUIREMENT in repo.requirements:
 
         def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             orig(pullop, kwargs)
@@ -128,7 +117,7 @@
 
 def archivenarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps archive command to narrow the default includes."""
-    if changegroup.NARROW_REQUIREMENT in repo.requirements:
+    if repository.NARROW_REQUIREMENT in repo.requirements:
         repo_includes, repo_excludes = repo.narrowpats
         includes = set(opts.get(r'include', []))
         excludes = set(opts.get(r'exclude', []))
@@ -142,11 +131,11 @@
 
 def pullbundle2extraprepare(orig, pullop, kwargs):
     repo = pullop.repo
-    if changegroup.NARROW_REQUIREMENT not in repo.requirements:
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
         return orig(pullop, kwargs)
 
-    if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps:
-        raise error.Abort(_("server doesn't support narrow clones"))
+    if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
+        raise error.Abort(_("server does not support narrow clones"))
     orig(pullop, kwargs)
     kwargs['narrow'] = True
     include, exclude = repo.narrowpats
@@ -154,18 +143,31 @@
     kwargs['oldexcludepats'] = exclude
     kwargs['includepats'] = include
     kwargs['excludepats'] = exclude
-    kwargs['known'] = [node.hex(ctx.node()) for ctx in
-                       repo.set('::%ln', pullop.common)
-                       if ctx.node() != node.nullid]
-    if not kwargs['known']:
-        # Mercurial serialized an empty list as '' and deserializes it as
-        # [''], so delete it instead to avoid handling the empty string on the
-        # server.
-        del kwargs['known']
+    # calculate known nodes only in ellipses cases because in non-ellipses cases
+    # we have all the nodes
+    if wireprototypes.ELLIPSESCAP in pullop.remote.capabilities():
+        kwargs['known'] = [node.hex(ctx.node()) for ctx in
+                           repo.set('::%ln', pullop.common)
+                           if ctx.node() != node.nullid]
+        if not kwargs['known']:
+            # Mercurial serializes an empty list as '' and deserializes it as
+            # [''], so delete it instead to avoid handling the empty string on
+            # the server.
+            del kwargs['known']
 
 extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
                         pullbundle2extraprepare)
 
+# This is an extension point for filesystems that need to do something other
+# than just blindly unlink the files. It's not clear what arguments would be
+# useful, so we're passing in a fair number of them, some of them redundant.
+def _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes, newexcludes,
+                       oldmatch, newmatch):
+    for f in repo.dirstate:
+        if not newmatch(f):
+            repo.dirstate.drop(f)
+            repo.wvfs.unlinkpath(f)
+
 def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
             newincludes, newexcludes, force):
     oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
@@ -241,20 +243,23 @@
                 util.unlinkpath(repo.svfs.join(f))
                 repo.store.markremoved(f)
 
-            for f in repo.dirstate:
-                if not newmatch(f):
-                    repo.dirstate.drop(f)
-                    repo.wvfs.unlinkpath(f)
+            _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes,
+                               newexcludes, oldmatch, newmatch)
             repo.setnarrowpats(newincludes, newexcludes)
 
         repo.destroyed()
 
-def _widen(ui, repo, remote, commoninc, newincludes, newexcludes):
+def _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes,
+           newincludes, newexcludes):
     newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
 
-    # TODO(martinvonz): Get expansion working with widening/narrowing.
-    if narrowspec.needsexpansion(newincludes):
-        raise error.Abort('Expansion not yet supported on pull')
+    # for now we assume that if a server has ellipses enabled, we will be
+    # exchanging ellipses nodes. In future we should add ellipses as a client
+    # side requirement (maybe) to distinguish a client is shallow or not and
+    # then send that information to server whether we want ellipses or not.
+    # Theoretically a non-ellipses repo should be able to use narrow
+    # functionality from an ellipses enabled server
+    ellipsesremote = wireprototypes.ELLIPSESCAP in remote.capabilities()
 
     def pullbundle2extraprepare_widen(orig, pullop, kwargs):
         orig(pullop, kwargs)
@@ -269,18 +274,41 @@
     def setnewnarrowpats():
         repo.setnarrowpats(newincludes, newexcludes)
     repo.setnewnarrowpats = setnewnarrowpats
+    # silence the devel-warning of applying an empty changegroup
+    overrides = {('devel', 'all-warnings'): False}
 
     with ui.uninterruptable():
-        ds = repo.dirstate
-        p1, p2 = ds.p1(), ds.p2()
-        with ds.parentchange():
-            ds.setparents(node.nullid, node.nullid)
         common = commoninc[0]
-        with wrappedextraprepare:
-            exchange.pull(repo, remote, heads=common)
-        with ds.parentchange():
-            ds.setparents(p1, p2)
+        if ellipsesremote:
+            ds = repo.dirstate
+            p1, p2 = ds.p1(), ds.p2()
+            with ds.parentchange():
+                ds.setparents(node.nullid, node.nullid)
+            with wrappedextraprepare,\
+                 repo.ui.configoverride(overrides, 'widen'):
+                exchange.pull(repo, remote, heads=common)
+            with ds.parentchange():
+                ds.setparents(p1, p2)
+        else:
+            with remote.commandexecutor() as e:
+                bundle = e.callcommand('narrow_widen', {
+                    'oldincludes': oldincludes,
+                    'oldexcludes': oldexcludes,
+                    'newincludes': newincludes,
+                    'newexcludes': newexcludes,
+                    'cgversion': '03',
+                    'commonheads': common,
+                    'known': [],
+                    'ellipses': False,
+                }).result()
 
+            with repo.transaction('widening') as tr,\
+                 repo.ui.configoverride(overrides, 'widen'):
+                tgetter = lambda: tr
+                bundle2.processbundle(repo, bundle,
+                        transactiongetter=tgetter)
+
+        repo.setnewnarrowpats()
         actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()}
         addgaction = actions['g'].append
 
@@ -299,6 +327,7 @@
     [('', 'addinclude', [], _('new paths to include')),
      ('', 'removeinclude', [], _('old paths to no longer include')),
      ('', 'addexclude', [], _('new paths to exclude')),
+     ('', 'import-rules', '', _('import narrowspecs from a file')),
      ('', 'removeexclude', [], _('old paths to no longer exclude')),
      ('', 'clear', False, _('whether to replace the existing narrowspec')),
      ('', 'force-delete-local-changes', False,
@@ -331,7 +360,7 @@
     empty and will not match any files.
     """
     opts = pycompat.byteskwargs(opts)
-    if changegroup.NARROW_REQUIREMENT not in repo.requirements:
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
         ui.warn(_('The narrow command is only supported on respositories cloned'
                   ' with --narrow.\n'))
         return 1
@@ -342,8 +371,22 @@
         ui.warn(_('The --clear option is not yet supported.\n'))
         return 1
 
-    if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']):
-        raise error.Abort('Expansion not yet supported on widen/narrow')
+    # import rules from a file
+    newrules = opts.get('import_rules')
+    if newrules:
+        try:
+            filepath = os.path.join(encoding.getcwd(), newrules)
+            fdata = util.readfile(filepath)
+        except IOError as inst:
+            raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
+                              (filepath, encoding.strtolocal(inst.strerror)))
+        includepats, excludepats, profiles = sparse.parseconfig(ui, fdata,
+                                                                'narrow')
+        if profiles:
+            raise error.Abort(_("including other spec files using '%include' "
+                                "is not supported in narrowspec"))
+        opts['addinclude'].extend(includepats)
+        opts['addexclude'].extend(excludepats)
 
     addedincludes = narrowspec.parsepatterns(opts['addinclude'])
     removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
@@ -380,6 +423,13 @@
         url, branches = hg.parseurl(remotepath)
         ui.status(_('comparing with %s\n') % util.hidepassword(url))
         remote = hg.peer(repo, opts, url)
+
+        # check narrow support before doing anything if widening needs to be
+        # performed. In future we should also abort if client is ellipses and
+        # server does not support ellipses
+        if widening and wireprototypes.NARROWCAP not in remote.capabilities():
+            raise error.Abort(_("server does not support narrow clones"))
+
         commoninc = discovery.findcommonincoming(repo, remote)
 
         oldincludes, oldexcludes = repo.narrowpats
@@ -398,6 +448,7 @@
         if widening:
             newincludes = oldincludes | addedincludes
             newexcludes = oldexcludes - removedexcludes
-            _widen(ui, repo, remote, commoninc, newincludes, newexcludes)
+            _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes,
+                    newincludes, newexcludes)
 
     return 0
--- a/hgext/narrow/narrowcopies.py	Wed Oct 10 12:25:28 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-# narrowcopies.py - extensions to mercurial copies module to support narrow
-# clones
-#
-# Copyright 2017 Google, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-from mercurial import (
-    copies,
-    extensions,
-)
-
-def setup(repo):
-    def _computeforwardmissing(orig, a, b, match=None):
-        missing = orig(a, b, match)
-        narrowmatch = repo.narrowmatch()
-        if narrowmatch.always():
-            return missing
-        missing = [f for f in missing if narrowmatch(f)]
-        return missing
-
-    def _checkcopies(orig, srcctx, dstctx, f, base, tca, remotebase, limit,
-                     data):
-        narrowmatch = repo.narrowmatch()
-        if not narrowmatch(f):
-            return
-        orig(srcctx, dstctx, f, base, tca, remotebase, limit, data)
-
-    extensions.wrapfunction(copies, '_computeforwardmissing',
-                            _computeforwardmissing)
-    extensions.wrapfunction(copies, '_checkcopies', _checkcopies)
--- a/hgext/narrow/narrowdirstate.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/narrow/narrowdirstate.py	Mon Oct 22 14:46:06 2018 -0400
@@ -10,9 +10,6 @@
 from mercurial.i18n import _
 from mercurial import (
     error,
-    match as matchmod,
-    narrowspec,
-    util as hgutil,
 )
 
 def wrapdirstate(repo, dirstate):
@@ -20,31 +17,15 @@
 
     def _editfunc(fn):
         def _wrapper(self, *args):
-            dirstate = repo.dirstate
             narrowmatch = repo.narrowmatch()
             for f in args:
-                if f is not None and not narrowmatch(f) and f not in dirstate:
+                if f is not None and not narrowmatch(f) and f not in self:
                     raise error.Abort(_("cannot track '%s' - it is outside " +
                         "the narrow clone") % f)
             return fn(self, *args)
         return _wrapper
 
-    def _narrowbackupname(backupname):
-        assert 'dirstate' in backupname
-        return backupname.replace('dirstate', narrowspec.FILENAME)
-
     class narrowdirstate(dirstate.__class__):
-        def walk(self, match, subrepos, unknown, ignored, full=True,
-                 narrowonly=True):
-            if narrowonly:
-                # hack to not exclude explicitly-specified paths so that they
-                # can be warned later on e.g. dirstate.add()
-                em = matchmod.exact(match._root, match._cwd, match.files())
-                nm = matchmod.unionmatcher([repo.narrowmatch(), em])
-                match = matchmod.intersectmatchers(match, nm)
-            return super(narrowdirstate, self).walk(match, subrepos, unknown,
-                                                    ignored, full)
-
         # Prevent adding/editing/copying/deleting files that are outside the
         # sparse checkout
         @_editfunc
@@ -78,22 +59,5 @@
                 allfiles = [f for f in allfiles if repo.narrowmatch()(f)]
             super(narrowdirstate, self).rebuild(parent, allfiles, changedfiles)
 
-        def restorebackup(self, tr, backupname):
-            self._opener.rename(_narrowbackupname(backupname),
-                                narrowspec.FILENAME, checkambig=True)
-            super(narrowdirstate, self).restorebackup(tr, backupname)
-
-        def savebackup(self, tr, backupname):
-            super(narrowdirstate, self).savebackup(tr, backupname)
-
-            narrowbackupname = _narrowbackupname(backupname)
-            self._opener.tryunlink(narrowbackupname)
-            hgutil.copyfile(self._opener.join(narrowspec.FILENAME),
-                            self._opener.join(narrowbackupname), hardlink=True)
-
-        def clearbackup(self, tr, backupname):
-            super(narrowdirstate, self).clearbackup(tr, backupname)
-            self._opener.unlink(_narrowbackupname(backupname))
-
     dirstate.__class__ = narrowdirstate
     return dirstate
--- a/hgext/narrow/narrowpatch.py	Wed Oct 10 12:25:28 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-# narrowpatch.py - extensions to mercurial patch module to support narrow clones
-#
-# Copyright 2017 Google, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-from mercurial import (
-    extensions,
-    patch,
-)
-
-def setup(repo):
-    def _filepairs(orig, *args):
-        """Only includes files within the narrow spec in the diff."""
-        narrowmatch = repo.narrowmatch()
-        if not narrowmatch.always():
-            for x in orig(*args):
-                f1, f2, copyop = x
-                if ((not f1 or narrowmatch(f1)) and
-                    (not f2 or narrowmatch(f2))):
-                    yield x
-        else:
-            for x in orig(*args):
-                yield x
-
-    def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
-                copy, getfilectx, *args, **kwargs):
-        narrowmatch = repo.narrowmatch()
-        if not narrowmatch.always():
-            modified = [f for f in modified if narrowmatch(f)]
-            added = [f for f in added if narrowmatch(f)]
-            removed = [f for f in removed if narrowmatch(f)]
-            copy = {k: v for k, v in copy.iteritems() if narrowmatch(k)}
-        return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
-                    getfilectx, *args, **kwargs)
-
-    extensions.wrapfunction(patch, '_filepairs', _filepairs)
-    extensions.wrapfunction(patch, 'trydiff', trydiff)
--- a/hgext/narrow/narrowrepo.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/narrow/narrowrepo.py	Mon Oct 22 14:46:06 2018 -0400
@@ -8,63 +8,26 @@
 from __future__ import absolute_import
 
 from mercurial import (
-    changegroup,
-    hg,
-    narrowspec,
-    scmutil,
+    wireprototypes,
 )
 
 from . import (
     narrowdirstate,
-    narrowrevlog,
 )
 
-def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
-    orig(sourcerepo, destrepo, **kwargs)
-    if changegroup.NARROW_REQUIREMENT in sourcerepo.requirements:
-        with destrepo.wlock():
-            with destrepo.vfs('shared', 'a') as fp:
-                fp.write(narrowspec.FILENAME + '\n')
-
-def unsharenarrowspec(orig, ui, repo, repopath):
-    if (changegroup.NARROW_REQUIREMENT in repo.requirements
-        and repo.path == repopath and repo.shared()):
-        srcrepo = hg.sharedreposource(repo)
-        with srcrepo.vfs(narrowspec.FILENAME) as f:
-            spec = f.read()
-        with repo.vfs(narrowspec.FILENAME, 'w') as f:
-            f.write(spec)
-    return orig(ui, repo, repopath)
-
 def wraprepo(repo):
     """Enables narrow clone functionality on a single local repository."""
 
     class narrowrepository(repo.__class__):
 
-        def file(self, f):
-            fl = super(narrowrepository, self).file(f)
-            narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
-            return fl
-
-        # I'm not sure this is the right place to do this filter.
-        # context._manifestmatches() would probably be better, or perhaps
-        # move it to a later place, in case some of the callers do want to know
-        # which directories changed. This seems to work for now, though.
-        def status(self, *args, **kwargs):
-            s = super(narrowrepository, self).status(*args, **kwargs)
-            narrowmatch = self.narrowmatch()
-            modified = list(filter(narrowmatch, s.modified))
-            added = list(filter(narrowmatch, s.added))
-            removed = list(filter(narrowmatch, s.removed))
-            deleted = list(filter(narrowmatch, s.deleted))
-            unknown = list(filter(narrowmatch, s.unknown))
-            ignored = list(filter(narrowmatch, s.ignored))
-            clean = list(filter(narrowmatch, s.clean))
-            return scmutil.status(modified, added, removed, deleted, unknown,
-                                  ignored, clean)
-
         def _makedirstate(self):
             dirstate = super(narrowrepository, self)._makedirstate()
             return narrowdirstate.wrapdirstate(self, dirstate)
 
+        def peer(self):
+            peer = super(narrowrepository, self).peer()
+            peer._caps.add(wireprototypes.NARROWCAP)
+            peer._caps.add(wireprototypes.ELLIPSESCAP)
+            return peer
+
     repo.__class__ = narrowrepository
--- a/hgext/narrow/narrowrevlog.py	Wed Oct 10 12:25:28 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-# narrowrevlog.py - revlog storing irrelevant nodes as "ellipsis" nodes
-#
-# Copyright 2017 Google, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-from mercurial import (
-   revlog,
-   util,
-)
-
-def readtransform(self, text):
-    return text, False
-
-def writetransform(self, text):
-    return text, False
-
-def rawtransform(self, text):
-    return False
-
-revlog.addflagprocessor(revlog.REVIDX_ELLIPSIS,
-                        (readtransform, writetransform, rawtransform))
-
-def setup():
-    # We just wanted to add the flag processor, which is done at module
-    # load time.
-    pass
-
-def makenarrowfilelog(fl, narrowmatch):
-    class narrowfilelog(fl.__class__):
-        def renamed(self, node):
-            # Renames that come from outside the narrowspec are
-            # problematic at least for git-diffs, because we lack the
-            # base text for the rename. This logic was introduced in
-            # 3cd72b1 of narrowhg (authored by martinvonz, reviewed by
-            # adgar), but that revision doesn't have any additional
-            # commentary on what problems we can encounter.
-            m = super(narrowfilelog, self).renamed(node)
-            if m and not narrowmatch(m[0]):
-                return None
-            return m
-
-        def size(self, rev):
-            # We take advantage of the fact that remotefilelog
-            # lacks a node() method to just skip the
-            # rename-checking logic when on remotefilelog. This
-            # might be incorrect on other non-revlog-based storage
-            # engines, but for now this seems to be fine.
-            #
-            # TODO: when remotefilelog is in core, improve this to
-            # explicitly look for remotefilelog instead of cheating
-            # with a hasattr check.
-            if util.safehasattr(self, 'node'):
-                node = self.node(rev)
-                # Because renamed() is overridden above to
-                # sometimes return None even if there is metadata
-                # in the revlog, size can be incorrect for
-                # copies/renames, so we need to make sure we call
-                # the super class's implementation of renamed()
-                # for the purpose of size calculation.
-                if super(narrowfilelog, self).renamed(node):
-                    return len(self.read(node))
-            return super(narrowfilelog, self).size(rev)
-
-        def cmp(self, node, text):
-            different = super(narrowfilelog, self).cmp(node, text)
-            if different:
-                # Similar to size() above, if the file was copied from
-                # a file outside the narrowspec, the super class's
-                # would have returned True because we tricked it into
-                # thinking that the file was not renamed.
-                if super(narrowfilelog, self).renamed(node):
-                    t2 = self.read(node)
-                    return t2 != text
-            return different
-
-    fl.__class__ = narrowfilelog
--- a/hgext/narrow/narrowtemplates.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/narrow/narrowtemplates.py	Mon Oct 22 14:46:06 2018 -0400
@@ -42,7 +42,7 @@
             return 'outsidenarrow'
     return ''
 
-@revsetpredicate('ellipsis')
+@revsetpredicate('ellipsis()')
 def ellipsisrevset(repo, subset, x):
     """Changesets that are ellipsis nodes."""
     return subset.filter(lambda r: _isellipsis(repo, r))
--- a/hgext/narrow/narrowwirepeer.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/narrow/narrowwirepeer.py	Mon Oct 22 14:46:06 2018 -0400
@@ -7,36 +7,20 @@
 
 from __future__ import absolute_import
 
-from mercurial.i18n import _
 from mercurial import (
+    bundle2,
     error,
     extensions,
     hg,
     narrowspec,
-    node,
+    pycompat,
+    wireprototypes,
+    wireprotov1peer,
+    wireprotov1server,
 )
 
 def uisetup():
-    def peersetup(ui, peer):
-        # We must set up the expansion before reposetup below, since it's used
-        # at clone time before we have a repo.
-        class expandingpeer(peer.__class__):
-            def expandnarrow(self, narrow_include, narrow_exclude, nodes):
-                ui.status(_("expanding narrowspec\n"))
-                if not self.capable('exp-expandnarrow'):
-                    raise error.Abort(
-                        'peer does not support expanding narrowspecs')
-
-                hex_nodes = (node.hex(n) for n in nodes)
-                new_narrowspec = self._call(
-                    'expandnarrow',
-                    includepats=','.join(narrow_include),
-                    excludepats=','.join(narrow_exclude),
-                    nodes=','.join(hex_nodes))
-
-                return narrowspec.parseserverpatterns(new_narrowspec)
-        peer.__class__ = expandingpeer
-    hg.wirepeersetupfuncs.append(peersetup)
+    wireprotov1peer.wirepeer.narrow_widen = peernarrowwiden
 
 def reposetup(repo):
     def wirereposetup(ui, peer):
@@ -50,3 +34,74 @@
             return orig(cmd, *args, **kwargs)
         extensions.wrapfunction(peer, '_calltwowaystream', wrapped)
     hg.wirepeersetupfuncs.append(wirereposetup)
+
+@wireprotov1server.wireprotocommand('narrow_widen', 'oldincludes oldexcludes'
+                                                    ' newincludes newexcludes'
+                                                    ' commonheads cgversion'
+                                                    ' known ellipses',
+                                    permission='pull')
+def narrow_widen(repo, proto, oldincludes, oldexcludes, newincludes,
+                 newexcludes, commonheads, cgversion, known, ellipses):
+    """wireprotocol command to send data when a narrow clone is widen. We will
+    be sending a changegroup here.
+
+    The current set of arguments which are required:
+    oldincludes: the old includes of the narrow copy
+    oldexcludes: the old excludes of the narrow copy
+    newincludes: the new includes of the narrow copy
+    newexcludes: the new excludes of the narrow copy
+    commonheads: list of heads which are common between the server and client
+    cgversion(maybe): the changegroup version to produce
+    known: list of nodes which are known on the client (used in ellipses cases)
+    ellipses: whether to send ellipses data or not
+    """
+
+    preferuncompressed = False
+    try:
+        oldincludes = wireprototypes.decodelist(oldincludes)
+        newincludes = wireprototypes.decodelist(newincludes)
+        oldexcludes = wireprototypes.decodelist(oldexcludes)
+        newexcludes = wireprototypes.decodelist(newexcludes)
+        # validate the patterns
+        narrowspec.validatepatterns(set(oldincludes))
+        narrowspec.validatepatterns(set(newincludes))
+        narrowspec.validatepatterns(set(oldexcludes))
+        narrowspec.validatepatterns(set(newexcludes))
+
+        common = wireprototypes.decodelist(commonheads)
+        known = None
+        if known:
+            known = wireprototypes.decodelist(known)
+        if ellipses == '0':
+            ellipses = False
+        else:
+            ellipses = bool(ellipses)
+        cgversion = cgversion
+        newmatch = narrowspec.match(repo.root, include=newincludes,
+                                    exclude=newexcludes)
+        oldmatch = narrowspec.match(repo.root, include=oldincludes,
+                                    exclude=oldexcludes)
+
+        bundler = bundle2.widen_bundle(repo, oldmatch, newmatch, common, known,
+                                             cgversion, ellipses)
+    except error.Abort as exc:
+        bundler = bundle2.bundle20(repo.ui)
+        manargs = [('message', pycompat.bytestr(exc))]
+        advargs = []
+        if exc.hint is not None:
+            advargs.append(('hint', exc.hint))
+        bundler.addpart(bundle2.bundlepart('error:abort', manargs, advargs))
+        preferuncompressed = True
+
+    chunks = bundler.getchunks()
+    return wireprototypes.streamres(gen=chunks,
+                                    prefer_uncompressed=preferuncompressed)
+
+def peernarrowwiden(remote, **kwargs):
+    for ch in (r'oldincludes', r'newincludes', r'oldexcludes', r'newexcludes',
+               r'commonheads', r'known'):
+        kwargs[ch] = wireprototypes.encodelist(kwargs[ch])
+
+    kwargs[r'ellipses'] = '%i' % bool(kwargs[r'ellipses'])
+    f = remote._callcompressable('narrow_widen', **kwargs)
+    return bundle2.getunbundler(remote.ui, f)
--- a/hgext/notify.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/notify.py	Mon Oct 22 14:46:06 2018 -0400
@@ -141,7 +141,7 @@
 '''
 from __future__ import absolute_import
 
-import email
+import email.errors as emailerrors
 import email.parser as emailparser
 import fnmatch
 import socket
@@ -149,6 +149,7 @@
 
 from mercurial.i18n import _
 from mercurial import (
+    encoding,
     error,
     logcmdutil,
     mail,
@@ -226,7 +227,7 @@
 )
 
 # template for single changeset can include email headers.
-single_template = '''
+single_template = b'''
 Subject: changeset in {webroot}: {desc|firstline|strip}
 From: {author}
 
@@ -239,7 +240,7 @@
 # template for multiple changesets should not contain email headers,
 # because only first set of headers will be used and result will look
 # strange.
-multiple_template = '''
+multiple_template = b'''
 changeset {node|short} in {root}
 details: {baseurl}{webroot}?cmd=changeset;node={node|short}
 summary: {desc|firstline}
@@ -361,13 +362,14 @@
 
         p = emailparser.Parser()
         try:
-            msg = p.parsestr(data)
-        except email.Errors.MessageParseError as inst:
+            msg = p.parsestr(encoding.strfromlocal(data))
+        except emailerrors.MessageParseError as inst:
             raise error.Abort(inst)
 
         # store sender and subject
-        sender, subject = msg['From'], msg['Subject']
-        del msg['From'], msg['Subject']
+        sender = encoding.strtolocal(msg[r'From'])
+        subject = encoding.strtolocal(msg[r'Subject'])
+        del msg[r'From'], msg[r'Subject']
 
         if not msg.is_multipart():
             # create fresh mime message from scratch
@@ -380,7 +382,8 @@
             for k, v in headers:
                 msg[k] = v
 
-        msg['Date'] = dateutil.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
+        msg[r'Date'] = encoding.strfromlocal(
+            dateutil.datestr(format="%a, %d %b %Y %H:%M:%S %1%2"))
 
         # try to make subject line exist and be useful
         if not subject:
@@ -392,25 +395,26 @@
         maxsubject = int(self.ui.config('notify', 'maxsubject'))
         if maxsubject:
             subject = stringutil.ellipsis(subject, maxsubject)
-        msg['Subject'] = mail.headencode(self.ui, subject,
-                                         self.charsets, self.test)
+        msg[r'Subject'] = encoding.strfromlocal(
+            mail.headencode(self.ui, subject, self.charsets, self.test))
 
         # try to make message have proper sender
         if not sender:
             sender = self.ui.config('email', 'from') or self.ui.username()
         if '@' not in sender or '@localhost' in sender:
             sender = self.fixmail(sender)
-        msg['From'] = mail.addressencode(self.ui, sender,
-                                         self.charsets, self.test)
+        msg[r'From'] = encoding.strfromlocal(
+            mail.addressencode(self.ui, sender, self.charsets, self.test))
 
-        msg['X-Hg-Notification'] = 'changeset %s' % ctx
-        if not msg['Message-Id']:
-            msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' %
-                                 (ctx, int(time.time()),
-                                  hash(self.repo.root), socket.getfqdn()))
-        msg['To'] = ', '.join(sorted(subs))
+        msg[r'X-Hg-Notification'] = r'changeset %s' % ctx
+        if not msg[r'Message-Id']:
+            msg[r'Message-Id'] = encoding.strfromlocal(
+                '<hg.%s.%d.%d@%s>' % (ctx, int(time.time()),
+                                      hash(self.repo.root),
+                                      encoding.strtolocal(socket.getfqdn())))
+        msg[r'To'] = encoding.strfromlocal(', '.join(sorted(subs)))
 
-        msgtext = msg.as_string()
+        msgtext = encoding.strtolocal(msg.as_string())
         if self.test:
             self.ui.write(msgtext)
             if not msgtext.endswith('\n'):
@@ -418,7 +422,7 @@
         else:
             self.ui.status(_('notify: sending %d subscribers %d changes\n') %
                            (len(subs), count))
-            mail.sendmail(self.ui, stringutil.email(msg['From']),
+            mail.sendmail(self.ui, stringutil.email(msg[r'From']),
                           subs, msgtext, mbox=self.mbox)
 
     def diff(self, ctx, ref=None):
--- a/hgext/patchbomb.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/patchbomb.py	Mon Oct 22 14:46:06 2018 -0400
@@ -73,7 +73,7 @@
 '''
 from __future__ import absolute_import
 
-import email as emailmod
+import email.encoders as emailencoders
 import email.generator as emailgen
 import email.mime.base as emimebase
 import email.mime.multipart as emimemultipart
@@ -139,6 +139,11 @@
     default=None,
 )
 
+if pycompat.ispy3:
+    _bytesgenerator = emailgen.BytesGenerator
+else:
+    _bytesgenerator = emailgen.Generator
+
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
@@ -182,12 +187,12 @@
     elif introconfig == 'never':
         intro = False
     elif introconfig == 'auto':
-        intro = 1 < number
+        intro = number > 1
     else:
         ui.write_err(_('warning: invalid patchbomb.intro value "%s"\n')
                      % introconfig)
         ui.write_err(_('(should be one of always, never, auto)\n'))
-        intro = 1 < number
+        intro = number > 1
     return intro
 
 def _formatflags(ui, repo, rev, flags):
@@ -273,10 +278,11 @@
                                                  seqno=idx, total=total)
             else:
                 patchname = cmdutil.makefilename(repo[node], '%b.patch')
-        disposition = 'inline'
+        disposition = r'inline'
         if opts.get('attach'):
-            disposition = 'attachment'
-        p['Content-Disposition'] = disposition + '; filename=' + patchname
+            disposition = r'attachment'
+        p[r'Content-Disposition'] = (
+            disposition + r'; filename=' + encoding.strfromlocal(patchname))
         msg.attach(p)
     else:
         msg = mail.mimetextpatch(body, display=opts.get('test'))
@@ -370,12 +376,12 @@
     msg = emimemultipart.MIMEMultipart()
     if body:
         msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(r'test')))
-    datapart = emimebase.MIMEBase('application', 'x-mercurial-bundle')
+    datapart = emimebase.MIMEBase(r'application', r'x-mercurial-bundle')
     datapart.set_payload(bundle)
     bundlename = '%s.hg' % opts.get(r'bundlename', 'bundle')
-    datapart.add_header('Content-Disposition', 'attachment',
-                        filename=bundlename)
-    emailmod.Encoders.encode_base64(datapart)
+    datapart.add_header(r'Content-Disposition', r'attachment',
+                        filename=encoding.strfromlocal(bundlename))
+    emailencoders.encode_base64(datapart)
     msg.attach(datapart)
     msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get(r'test'))
     return [(msg, subj, None)]
@@ -463,24 +469,34 @@
         ui.status(_("no changes found\n"))
     return revs
 
+def _msgid(node, timestamp):
+    hostname = encoding.strtolocal(socket.getfqdn())
+    hostname = encoding.environ.get('HGHOSTNAME', hostname)
+    return '<%s.%d@%s>' % (node, timestamp, hostname)
+
 emailopts = [
     ('', 'body', None, _('send patches as inline message text (default)')),
     ('a', 'attach', None, _('send patches as attachments')),
     ('i', 'inline', None, _('send patches as inline attachments')),
-    ('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
-    ('c', 'cc', [], _('email addresses of copy recipients')),
+    ('', 'bcc', [],
+     _('email addresses of blind carbon copy recipients'), _('EMAIL')),
+    ('c', 'cc', [], _('email addresses of copy recipients'), _('EMAIL')),
     ('', 'confirm', None, _('ask for confirmation before sending')),
     ('d', 'diffstat', None, _('add diffstat output to messages')),
-    ('', 'date', '', _('use the given date as the sending date')),
-    ('', 'desc', '', _('use the given file as the series description')),
-    ('f', 'from', '', _('email address of sender')),
+    ('', 'date', '', _('use the given date as the sending date'), _('DATE')),
+    ('', 'desc', '',
+     _('use the given file as the series description'), _('FILE')),
+    ('f', 'from', '', _('email address of sender'), _('EMAIL')),
     ('n', 'test', None, _('print messages that would be sent')),
-    ('m', 'mbox', '', _('write messages to mbox file instead of sending them')),
-    ('', 'reply-to', [], _('email addresses replies should be sent to')),
-    ('s', 'subject', '', _('subject of first message (intro or single patch)')),
-    ('', 'in-reply-to', '', _('message identifier to reply to')),
-    ('', 'flag', [], _('flags to add in subject prefixes')),
-    ('t', 'to', [], _('email addresses of recipients'))]
+    ('m', 'mbox', '',
+     _('write messages to mbox file instead of sending them'), _('FILE')),
+    ('', 'reply-to', [],
+     _('email addresses replies should be sent to'), _('EMAIL')),
+    ('s', 'subject', '',
+     _('subject of first message (intro or single patch)'), _('TEXT')),
+    ('', 'in-reply-to', '', _('message identifier to reply to'), _('MSGID')),
+    ('', 'flag', [], _('flags to add in subject prefixes'), _('FLAG')),
+    ('t', 'to', [], _('email addresses of recipients'), _('EMAIL'))]
 
 @command('email',
     [('g', 'git', None, _('use git extended diff format')),
@@ -488,7 +504,8 @@
     ('o', 'outgoing', None,
      _('send changes not found in the target repository')),
     ('b', 'bundle', None, _('send changes not in target as a binary bundle')),
-    ('B', 'bookmark', '', _('send changes only reachable by given bookmark')),
+    ('B', 'bookmark', '',
+     _('send changes only reachable by given bookmark'), _('BOOKMARK')),
     ('', 'bundlename', 'bundle',
      _('name of the bundle attachment file'), _('NAME')),
     ('r', 'rev', [], _('a revision to send'), _('REV')),
@@ -498,7 +515,8 @@
        '(with -b/--bundle)'), _('REV')),
     ('', 'intro', None, _('send an introduction email for a single patch')),
     ] + emailopts + cmdutil.remoteopts,
-    _('hg email [OPTION]... [DEST]...'))
+    _('hg email [OPTION]... [DEST]...'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT)
 def email(ui, repo, *revs, **opts):
     '''send changesets by email
 
@@ -652,7 +670,7 @@
                 if not known[idx]:
                     missing.append(h)
             if missing:
-                if 1 < len(missing):
+                if len(missing) > 1:
                     msg = _('public "%s" is missing %s and %i others')
                     msg %= (publicurl, missing[0], len(missing) - 1)
                 else:
@@ -671,8 +689,7 @@
         start_time = dateutil.makedate()
 
     def genmsgid(id):
-        return '<%s.%d@%s>' % (id[:20], int(start_time[0]),
-                               encoding.strtolocal(socket.getfqdn()))
+        return _msgid(id[:20], int(start_time[0]))
 
     # deprecated config: patchbomb.from
     sender = (opts.get('from') or ui.config('email', 'from') or
@@ -780,10 +797,27 @@
             m['Bcc'] = ', '.join(bcc)
         if replyto:
             m['Reply-To'] = ', '.join(replyto)
+        # Fix up all headers to be native strings.
+        # TODO(durin42): this should probably be cleaned up above in the future.
+        if pycompat.ispy3:
+            for hdr, val in list(m.items()):
+                change = False
+                if isinstance(hdr, bytes):
+                    del m[hdr]
+                    hdr = pycompat.strurl(hdr)
+                    change = True
+                if isinstance(val, bytes):
+                    val = pycompat.strurl(val)
+                    if not change:
+                        # prevent duplicate headers
+                        del m[hdr]
+                    change = True
+                if change:
+                    m[hdr] = val
         if opts.get('test'):
             ui.status(_('displaying '), subj, ' ...\n')
             ui.pager('email')
-            generator = emailgen.Generator(ui, mangle_from_=False)
+            generator = _bytesgenerator(ui, mangle_from_=False)
             try:
                 generator.flatten(m, 0)
                 ui.write('\n')
@@ -799,8 +833,10 @@
                 # Exim does not remove the Bcc field
                 del m['Bcc']
             fp = stringio()
-            generator = emailgen.Generator(fp, mangle_from_=False)
+            generator = _bytesgenerator(fp, mangle_from_=False)
             generator.flatten(m, 0)
-            sendmail(sender_addr, to + bcc + cc, fp.getvalue())
+            alldests = to + bcc + cc
+            alldests = [encoding.strfromlocal(d) for d in alldests]
+            sendmail(sender_addr, alldests, fp.getvalue())
 
     progress.complete()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/phabricator.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,986 @@
+# phabricator.py - simple Phabricator integration
+#
+# Copyright 2017 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""simple Phabricator integration (EXPERIMENTAL)
+
+This extension provides a ``phabsend`` command which sends a stack of
+changesets to Phabricator, and a ``phabread`` command which prints a stack of
+revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
+to update statuses in batch.
+
+By default, Phabricator requires ``Test Plan`` which might prevent some
+changeset from being sent. The requirement could be disabled by changing
+``differential.require-test-plan-field`` config server side.
+
+Config::
+
+    [phabricator]
+    # Phabricator URL
+    url = https://phab.example.com/
+
+    # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
+    # callsign is "FOO".
+    callsign = FOO
+
+    # curl command to use. If not set (default), use builtin HTTP library to
+    # communicate. If set, use the specified curl command. This could be useful
+    # if you need to specify advanced options that is not easily supported by
+    # the internal library.
+    curlcmd = curl --connect-timeout 2 --retry 3 --silent
+
+    [auth]
+    example.schemes = https
+    example.prefix = phab.example.com
+
+    # API token. Get it from https://$HOST/conduit/login/
+    example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
+"""
+
+from __future__ import absolute_import
+
+import itertools
+import json
+import operator
+import re
+
+from mercurial.node import bin, nullid
+from mercurial.i18n import _
+from mercurial import (
+    cmdutil,
+    context,
+    encoding,
+    error,
+    httpconnection as httpconnectionmod,
+    mdiff,
+    obsutil,
+    parser,
+    patch,
+    registrar,
+    scmutil,
+    smartset,
+    tags,
+    templateutil,
+    url as urlmod,
+    util,
+)
+from mercurial.utils import (
+    procutil,
+    stringutil,
+)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+# developer config: phabricator.batchsize
+configitem(b'phabricator', b'batchsize',
+    default=12,
+)
+configitem(b'phabricator', b'callsign',
+    default=None,
+)
+configitem(b'phabricator', b'curlcmd',
+    default=None,
+)
+# developer config: phabricator.repophid
+configitem(b'phabricator', b'repophid',
+    default=None,
+)
+configitem(b'phabricator', b'url',
+    default=None,
+)
+configitem(b'phabsend', b'confirm',
+    default=False,
+)
+
+colortable = {
+    b'phabricator.action.created': b'green',
+    b'phabricator.action.skipped': b'magenta',
+    b'phabricator.action.updated': b'magenta',
+    b'phabricator.desc': b'',
+    b'phabricator.drev': b'bold',
+    b'phabricator.node': b'',
+}
+
+_VCR_FLAGS = [
+    (b'', b'test-vcr', b'',
+     _(b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
+       b', otherwise will mock all http requests using the specified vcr file.'
+       b' (ADVANCED)'
+     )),
+]
+
+def vcrcommand(name, flags, spec):
+    fullflags = flags + _VCR_FLAGS
+    def decorate(fn):
+        def inner(*args, **kwargs):
+            cassette = kwargs.pop(r'test_vcr', None)
+            if cassette:
+                import hgdemandimport
+                with hgdemandimport.deactivated():
+                    import vcr as vcrmod
+                    import vcr.stubs as stubs
+                    vcr = vcrmod.VCR(
+                        serializer=r'json',
+                        custom_patches=[
+                            (urlmod, 'httpconnection', stubs.VCRHTTPConnection),
+                            (urlmod, 'httpsconnection',
+                             stubs.VCRHTTPSConnection),
+                        ])
+                    with vcr.use_cassette(cassette):
+                        return fn(*args, **kwargs)
+            return fn(*args, **kwargs)
+        inner.__name__ = fn.__name__
+        return command(name, fullflags, spec)(inner)
+    return decorate
+
+def urlencodenested(params):
+    """like urlencode, but works with nested parameters.
+
+    For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
+    flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
+    urlencode. Note: the encoding is consistent with PHP's http_build_query.
+    """
+    flatparams = util.sortdict()
+    def process(prefix, obj):
+        items = {list: enumerate, dict: lambda x: x.items()}.get(type(obj))
+        if items is None:
+            flatparams[prefix] = obj
+        else:
+            for k, v in items(obj):
+                if prefix:
+                    process(b'%s[%s]' % (prefix, k), v)
+                else:
+                    process(k, v)
+    process(b'', params)
+    return util.urlreq.urlencode(flatparams)
+
+def readurltoken(repo):
+    """return conduit url, token and make sure they exist
+
+    Currently read from [auth] config section. In the future, it might
+    make sense to read from .arcconfig and .arcrc as well.
+    """
+    url = repo.ui.config(b'phabricator', b'url')
+    if not url:
+        raise error.Abort(_(b'config %s.%s is required')
+                          % (b'phabricator', b'url'))
+
+    res = httpconnectionmod.readauthforuri(repo.ui, url, util.url(url).user)
+    token = None
+
+    if res:
+        group, auth = res
+
+        repo.ui.debug(b"using auth.%s.* for authentication\n" % group)
+
+        token = auth.get(b'phabtoken')
+
+    if not token:
+        raise error.Abort(_(b'Can\'t find conduit token associated to %s')
+                            % (url,))
+
+    return url, token
+
+def callconduit(repo, name, params):
+    """call Conduit API, params is a dict. return json.loads result, or None"""
+    host, token = readurltoken(repo)
+    url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
+    repo.ui.debug(b'Conduit Call: %s %s\n' % (url, params))
+    params = params.copy()
+    params[b'api.token'] = token
+    data = urlencodenested(params)
+    curlcmd = repo.ui.config(b'phabricator', b'curlcmd')
+    if curlcmd:
+        sin, sout = procutil.popen2(b'%s -d @- %s'
+                                    % (curlcmd, procutil.shellquote(url)))
+        sin.write(data)
+        sin.close()
+        body = sout.read()
+    else:
+        urlopener = urlmod.opener(repo.ui, authinfo)
+        request = util.urlreq.request(url, data=data)
+        body = urlopener.open(request).read()
+    repo.ui.debug(b'Conduit Response: %s\n' % body)
+    parsed = json.loads(body)
+    if parsed.get(r'error_code'):
+        msg = (_(b'Conduit Error (%s): %s')
+               % (parsed[r'error_code'], parsed[r'error_info']))
+        raise error.Abort(msg)
+    return parsed[r'result']
+
+@vcrcommand(b'debugcallconduit', [], _(b'METHOD'))
+def debugcallconduit(ui, repo, name):
+    """call Conduit API
+
+    Call parameters are read from stdin as a JSON blob. Result will be written
+    to stdout as a JSON blob.
+    """
+    params = json.loads(ui.fin.read())
+    result = callconduit(repo, name, params)
+    s = json.dumps(result, sort_keys=True, indent=2, separators=(b',', b': '))
+    ui.write(b'%s\n' % s)
+
+def getrepophid(repo):
+    """given callsign, return repository PHID or None"""
+    # developer config: phabricator.repophid
+    repophid = repo.ui.config(b'phabricator', b'repophid')
+    if repophid:
+        return repophid
+    callsign = repo.ui.config(b'phabricator', b'callsign')
+    if not callsign:
+        return None
+    query = callconduit(repo, b'diffusion.repository.search',
+                        {b'constraints': {b'callsigns': [callsign]}})
+    if len(query[r'data']) == 0:
+        return None
+    repophid = encoding.strtolocal(query[r'data'][0][r'phid'])
+    repo.ui.setconfig(b'phabricator', b'repophid', repophid)
+    return repophid
+
+_differentialrevisiontagre = re.compile(b'\AD([1-9][0-9]*)\Z')
+_differentialrevisiondescre = re.compile(
+    b'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M)
+
+def getoldnodedrevmap(repo, nodelist):
+    """find previous nodes that has been sent to Phabricator
+
+    return {node: (oldnode, Differential diff, Differential Revision ID)}
+    for node in nodelist with known previous sent versions, or associated
+    Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
+    be ``None``.
+
+    Examines commit messages like "Differential Revision:" to get the
+    association information.
+
+    If such commit message line is not found, examines all precursors and their
+    tags. Tags with format like "D1234" are considered a match and the node
+    with that tag, and the number after "D" (ex. 1234) will be returned.
+
+    The ``old node``, if not None, is guaranteed to be the last diff of
+    corresponding Differential Revision, and exist in the repo.
+    """
+    url, token = readurltoken(repo)
+    unfi = repo.unfiltered()
+    nodemap = unfi.changelog.nodemap
+
+    result = {} # {node: (oldnode?, lastdiff?, drev)}
+    toconfirm = {} # {node: (force, {precnode}, drev)}
+    for node in nodelist:
+        ctx = unfi[node]
+        # For tags like "D123", put them into "toconfirm" to verify later
+        precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
+        for n in precnodes:
+            if n in nodemap:
+                for tag in unfi.nodetags(n):
+                    m = _differentialrevisiontagre.match(tag)
+                    if m:
+                        toconfirm[node] = (0, set(precnodes), int(m.group(1)))
+                        continue
+
+        # Check commit message
+        m = _differentialrevisiondescre.search(ctx.description())
+        if m:
+            toconfirm[node] = (1, set(precnodes), int(m.group(b'id')))
+
+    # Double check if tags are genuine by collecting all old nodes from
+    # Phabricator, and expect precursors overlap with it.
+    if toconfirm:
+        drevs = [drev for force, precs, drev in toconfirm.values()]
+        alldiffs = callconduit(unfi, b'differential.querydiffs',
+                               {b'revisionIDs': drevs})
+        getnode = lambda d: bin(encoding.unitolocal(
+            getdiffmeta(d).get(r'node', b''))) or None
+        for newnode, (force, precset, drev) in toconfirm.items():
+            diffs = [d for d in alldiffs.values()
+                     if int(d[r'revisionID']) == drev]
+
+            # "precursors" as known by Phabricator
+            phprecset = set(getnode(d) for d in diffs)
+
+            # Ignore if precursors (Phabricator and local repo) do not overlap,
+            # and force is not set (when commit message says nothing)
+            if not force and not bool(phprecset & precset):
+                tagname = b'D%d' % drev
+                tags.tag(repo, tagname, nullid, message=None, user=None,
+                         date=None, local=True)
+                unfi.ui.warn(_(b'D%s: local tag removed - does not match '
+                               b'Differential history\n') % drev)
+                continue
+
+            # Find the last node using Phabricator metadata, and make sure it
+            # exists in the repo
+            oldnode = lastdiff = None
+            if diffs:
+                lastdiff = max(diffs, key=lambda d: int(d[r'id']))
+                oldnode = getnode(lastdiff)
+                if oldnode and oldnode not in nodemap:
+                    oldnode = None
+
+            result[newnode] = (oldnode, lastdiff, drev)
+
+    return result
+
+def getdiff(ctx, diffopts):
+    """plain-text diff without header (user, commit message, etc)"""
+    output = util.stringio()
+    for chunk, _label in patch.diffui(ctx.repo(), ctx.p1().node(), ctx.node(),
+                                      None, opts=diffopts):
+        output.write(chunk)
+    return output.getvalue()
+
+def creatediff(ctx):
+    """create a Differential Diff"""
+    repo = ctx.repo()
+    repophid = getrepophid(repo)
+    # Create a "Differential Diff" via "differential.createrawdiff" API
+    params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
+    if repophid:
+        params[b'repositoryPHID'] = repophid
+    diff = callconduit(repo, b'differential.createrawdiff', params)
+    if not diff:
+        raise error.Abort(_(b'cannot create diff for %s') % ctx)
+    return diff
+
+def writediffproperties(ctx, diff):
+    """write metadata to diff so patches could be applied losslessly"""
+    params = {
+        b'diff_id': diff[r'id'],
+        b'name': b'hg:meta',
+        b'data': json.dumps({
+            b'user': ctx.user(),
+            b'date': b'%d %d' % ctx.date(),
+            b'node': ctx.hex(),
+            b'parent': ctx.p1().hex(),
+        }),
+    }
+    callconduit(ctx.repo(), b'differential.setdiffproperty', params)
+
+    params = {
+        b'diff_id': diff[r'id'],
+        b'name': b'local:commits',
+        b'data': json.dumps({
+            ctx.hex(): {
+                b'author': stringutil.person(ctx.user()),
+                b'authorEmail': stringutil.email(ctx.user()),
+                b'time': ctx.date()[0],
+            },
+        }),
+    }
+    callconduit(ctx.repo(), b'differential.setdiffproperty', params)
+
+def createdifferentialrevision(ctx, revid=None, parentrevid=None, oldnode=None,
+                               olddiff=None, actions=None):
+    """create or update a Differential Revision
+
+    If revid is None, create a new Differential Revision, otherwise update
+    revid. If parentrevid is not None, set it as a dependency.
+
+    If oldnode is not None, check if the patch content (without commit message
+    and metadata) has changed before creating another diff.
+
+    If actions is not None, they will be appended to the transaction.
+    """
+    repo = ctx.repo()
+    if oldnode:
+        diffopts = mdiff.diffopts(git=True, context=32767)
+        oldctx = repo.unfiltered()[oldnode]
+        neednewdiff = (getdiff(ctx, diffopts) != getdiff(oldctx, diffopts))
+    else:
+        neednewdiff = True
+
+    transactions = []
+    if neednewdiff:
+        diff = creatediff(ctx)
+        transactions.append({b'type': b'update', b'value': diff[r'phid']})
+    else:
+        # Even if we don't need to upload a new diff because the patch content
+        # does not change. We might still need to update its metadata so
+        # pushers could know the correct node metadata.
+        assert olddiff
+        diff = olddiff
+    writediffproperties(ctx, diff)
+
+    # Use a temporary summary to set dependency. There might be better ways but
+    # I cannot find them for now. But do not do that if we are updating an
+    # existing revision (revid is not None) since that introduces visible
+    # churns (someone edited "Summary" twice) on the web page.
+    if parentrevid and revid is None:
+        summary = b'Depends on D%s' % parentrevid
+        transactions += [{b'type': b'summary', b'value': summary},
+                         {b'type': b'summary', b'value': b' '}]
+
+    if actions:
+        transactions += actions
+
+    # Parse commit message and update related fields.
+    desc = ctx.description()
+    info = callconduit(repo, b'differential.parsecommitmessage',
+                       {b'corpus': desc})
+    for k, v in info[r'fields'].items():
+        if k in [b'title', b'summary', b'testPlan']:
+            transactions.append({b'type': k, b'value': v})
+
+    params = {b'transactions': transactions}
+    if revid is not None:
+        # Update an existing Differential Revision
+        params[b'objectIdentifier'] = revid
+
+    revision = callconduit(repo, b'differential.revision.edit', params)
+    if not revision:
+        raise error.Abort(_(b'cannot create revision for %s') % ctx)
+
+    return revision, diff
+
+def userphids(repo, names):
+    """convert user names to PHIDs"""
+    query = {b'constraints': {b'usernames': names}}
+    result = callconduit(repo, b'user.search', query)
+    # username not found is not an error of the API. So check if we have missed
+    # some names here.
+    data = result[r'data']
+    resolved = set(entry[r'fields'][r'username'] for entry in data)
+    unresolved = set(names) - resolved
+    if unresolved:
+        raise error.Abort(_(b'unknown username: %s')
+                          % b' '.join(sorted(unresolved)))
+    return [entry[r'phid'] for entry in data]
+
+@vcrcommand(b'phabsend',
+         [(b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
+          (b'', b'amend', True, _(b'update commit messages')),
+          (b'', b'reviewer', [], _(b'specify reviewers')),
+          (b'', b'confirm', None, _(b'ask for confirmation before sending'))],
+         _(b'REV [OPTIONS]'))
+def phabsend(ui, repo, *revs, **opts):
+    """upload changesets to Phabricator
+
+    If there are multiple revisions specified, they will be send as a stack
+    with a linear dependencies relationship using the order specified by the
+    revset.
+
+    For the first time uploading changesets, local tags will be created to
+    maintain the association. After the first time, phabsend will check
+    obsstore and tags information so it can figure out whether to update an
+    existing Differential Revision, or create a new one.
+
+    If --amend is set, update commit messages so they have the
+    ``Differential Revision`` URL, remove related tags. This is similar to what
+    arcanist will do, and is more desired in author-push workflows. Otherwise,
+    use local tags to record the ``Differential Revision`` association.
+
+    The --confirm option lets you confirm changesets before sending them. You
+    can also add following to your configuration file to make it default
+    behaviour::
+
+        [phabsend]
+        confirm = true
+
+    phabsend will check obsstore and the above association to decide whether to
+    update an existing Differential Revision, or create a new one.
+    """
+    revs = list(revs) + opts.get(b'rev', [])
+    revs = scmutil.revrange(repo, revs)
+
+    if not revs:
+        raise error.Abort(_(b'phabsend requires at least one changeset'))
+    if opts.get(b'amend'):
+        cmdutil.checkunfinished(repo)
+
+    # {newnode: (oldnode, olddiff, olddrev}
+    oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
+
+    confirm = ui.configbool(b'phabsend', b'confirm')
+    confirm |= bool(opts.get(b'confirm'))
+    if confirm:
+        confirmed = _confirmbeforesend(repo, revs, oldmap)
+        if not confirmed:
+            raise error.Abort(_(b'phabsend cancelled'))
+
+    actions = []
+    reviewers = opts.get(b'reviewer', [])
+    if reviewers:
+        phids = userphids(repo, reviewers)
+        actions.append({b'type': b'reviewers.add', b'value': phids})
+
+    drevids = [] # [int]
+    diffmap = {} # {newnode: diff}
+
+    # Send patches one by one so we know their Differential Revision IDs and
+    # can provide dependency relationship
+    lastrevid = None
+    for rev in revs:
+        ui.debug(b'sending rev %d\n' % rev)
+        ctx = repo[rev]
+
+        # Get Differential Revision ID
+        oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
+        if oldnode != ctx.node() or opts.get(b'amend'):
+            # Create or update Differential Revision
+            revision, diff = createdifferentialrevision(
+                ctx, revid, lastrevid, oldnode, olddiff, actions)
+            diffmap[ctx.node()] = diff
+            newrevid = int(revision[r'object'][r'id'])
+            if revid:
+                action = b'updated'
+            else:
+                action = b'created'
+
+            # Create a local tag to note the association, if commit message
+            # does not have it already
+            m = _differentialrevisiondescre.search(ctx.description())
+            if not m or int(m.group(b'id')) != newrevid:
+                tagname = b'D%d' % newrevid
+                tags.tag(repo, tagname, ctx.node(), message=None, user=None,
+                         date=None, local=True)
+        else:
+            # Nothing changed. But still set "newrevid" so the next revision
+            # could depend on this one.
+            newrevid = revid
+            action = b'skipped'
+
+        actiondesc = ui.label(
+            {b'created': _(b'created'),
+             b'skipped': _(b'skipped'),
+             b'updated': _(b'updated')}[action],
+            b'phabricator.action.%s' % action)
+        drevdesc = ui.label(b'D%s' % newrevid, b'phabricator.drev')
+        nodedesc = ui.label(bytes(ctx), b'phabricator.node')
+        desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc')
+        ui.write(_(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc,
+                                             desc))
+        drevids.append(newrevid)
+        lastrevid = newrevid
+
+    # Update commit messages and remove tags
+    if opts.get(b'amend'):
+        unfi = repo.unfiltered()
+        drevs = callconduit(repo, b'differential.query', {b'ids': drevids})
+        with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
+            wnode = unfi[b'.'].node()
+            mapping = {} # {oldnode: [newnode]}
+            for i, rev in enumerate(revs):
+                old = unfi[rev]
+                drevid = drevids[i]
+                drev = [d for d in drevs if int(d[r'id']) == drevid][0]
+                newdesc = getdescfromdrev(drev)
+                newdesc = encoding.unitolocal(newdesc)
+                # Make sure commit message contain "Differential Revision"
+                if old.description() != newdesc:
+                    parents = [
+                        mapping.get(old.p1().node(), (old.p1(),))[0],
+                        mapping.get(old.p2().node(), (old.p2(),))[0],
+                    ]
+                    new = context.metadataonlyctx(
+                        repo, old, parents=parents, text=newdesc,
+                        user=old.user(), date=old.date(), extra=old.extra())
+
+                    newnode = new.commit()
+
+                    mapping[old.node()] = [newnode]
+                    # Update diff property
+                    writediffproperties(unfi[newnode], diffmap[old.node()])
+                # Remove local tags since it's no longer necessary
+                tagname = b'D%d' % drevid
+                if tagname in repo.tags():
+                    tags.tag(repo, tagname, nullid, message=None, user=None,
+                             date=None, local=True)
+            scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
+            if wnode in mapping:
+                unfi.setparents(mapping[wnode][0])
+
+# Map from "hg:meta" keys to header understood by "hg import". The order is
+# consistent with "hg export" output.
+_metanamemap = util.sortdict([(r'user', b'User'), (r'date', b'Date'),
+                              (r'node', b'Node ID'), (r'parent', b'Parent ')])
+
+def _confirmbeforesend(repo, revs, oldmap):
+    url, token = readurltoken(repo)
+    ui = repo.ui
+    for rev in revs:
+        ctx = repo[rev]
+        desc = ctx.description().splitlines()[0]
+        oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
+        if drevid:
+            drevdesc = ui.label(b'D%s' % drevid, b'phabricator.drev')
+        else:
+            drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
+
+        ui.write(_(b'%s - %s: %s\n')
+                 % (drevdesc,
+                    ui.label(bytes(ctx), b'phabricator.node'),
+                    ui.label(desc, b'phabricator.desc')))
+
+    if ui.promptchoice(_(b'Send the above changes to %s (yn)?'
+                         b'$$ &Yes $$ &No') % url):
+        return False
+
+    return True
+
+_knownstatusnames = {b'accepted', b'needsreview', b'needsrevision', b'closed',
+                     b'abandoned'}
+
+def _getstatusname(drev):
+    """get normalized status name from a Differential Revision"""
+    return drev[r'statusName'].replace(b' ', b'').lower()
+
+# Small language to specify differential revisions. Support symbols: (), :X,
+# +, and -.
+
+_elements = {
+    # token-type: binding-strength, primary, prefix, infix, suffix
+    b'(':      (12, None, (b'group', 1, b')'), None, None),
+    b':':      (8, None, (b'ancestors', 8), None, None),
+    b'&':      (5,  None, None, (b'and_', 5), None),
+    b'+':      (4,  None, None, (b'add', 4), None),
+    b'-':      (4,  None, None, (b'sub', 4), None),
+    b')':      (0,  None, None, None, None),
+    b'symbol': (0, b'symbol', None, None, None),
+    b'end':    (0, None, None, None, None),
+}
+
+def _tokenize(text):
+    view = memoryview(text) # zero-copy slice
+    special = b'():+-& '
+    pos = 0
+    length = len(text)
+    while pos < length:
+        symbol = b''.join(itertools.takewhile(lambda ch: ch not in special,
+                                              view[pos:]))
+        if symbol:
+            yield (b'symbol', symbol, pos)
+            pos += len(symbol)
+        else: # special char, ignore space
+            if text[pos] != b' ':
+                yield (text[pos], None, pos)
+            pos += 1
+    yield (b'end', None, pos)
+
+def _parse(text):
+    tree, pos = parser.parser(_elements).parse(_tokenize(text))
+    if pos != len(text):
+        raise error.ParseError(b'invalid token', pos)
+    return tree
+
+def _parsedrev(symbol):
+    """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
+    if symbol.startswith(b'D') and symbol[1:].isdigit():
+        return int(symbol[1:])
+    if symbol.isdigit():
+        return int(symbol)
+
+def _prefetchdrevs(tree):
+    """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
+    drevs = set()
+    ancestordrevs = set()
+    op = tree[0]
+    if op == b'symbol':
+        r = _parsedrev(tree[1])
+        if r:
+            drevs.add(r)
+    elif op == b'ancestors':
+        r, a = _prefetchdrevs(tree[1])
+        drevs.update(r)
+        ancestordrevs.update(r)
+        ancestordrevs.update(a)
+    else:
+        for t in tree[1:]:
+            r, a = _prefetchdrevs(t)
+            drevs.update(r)
+            ancestordrevs.update(a)
+    return drevs, ancestordrevs
+
+def querydrev(repo, spec):
+    """return a list of "Differential Revision" dicts
+
+    spec is a string using a simple query language, see docstring in phabread
+    for details.
+
+    A "Differential Revision dict" looks like:
+
+        {
+            "id": "2",
+            "phid": "PHID-DREV-672qvysjcczopag46qty",
+            "title": "example",
+            "uri": "https://phab.example.com/D2",
+            "dateCreated": "1499181406",
+            "dateModified": "1499182103",
+            "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
+            "status": "0",
+            "statusName": "Needs Review",
+            "properties": [],
+            "branch": null,
+            "summary": "",
+            "testPlan": "",
+            "lineCount": "2",
+            "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
+            "diffs": [
+              "3",
+              "4",
+            ],
+            "commits": [],
+            "reviewers": [],
+            "ccs": [],
+            "hashes": [],
+            "auxiliary": {
+              "phabricator:projects": [],
+              "phabricator:depends-on": [
+                "PHID-DREV-gbapp366kutjebt7agcd"
+              ]
+            },
+            "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
+            "sourcePath": null
+        }
+    """
+    def fetch(params):
+        """params -> single drev or None"""
+        key = (params.get(r'ids') or params.get(r'phids') or [None])[0]
+        if key in prefetched:
+            return prefetched[key]
+        drevs = callconduit(repo, b'differential.query', params)
+        # Fill prefetched with the result
+        for drev in drevs:
+            prefetched[drev[r'phid']] = drev
+            prefetched[int(drev[r'id'])] = drev
+        if key not in prefetched:
+            raise error.Abort(_(b'cannot get Differential Revision %r')
+                              % params)
+        return prefetched[key]
+
+    def getstack(topdrevids):
+        """given a top, get a stack from the bottom, [id] -> [id]"""
+        visited = set()
+        result = []
+        queue = [{r'ids': [i]} for i in topdrevids]
+        while queue:
+            params = queue.pop()
+            drev = fetch(params)
+            if drev[r'id'] in visited:
+                continue
+            visited.add(drev[r'id'])
+            result.append(int(drev[r'id']))
+            auxiliary = drev.get(r'auxiliary', {})
+            depends = auxiliary.get(r'phabricator:depends-on', [])
+            for phid in depends:
+                queue.append({b'phids': [phid]})
+        result.reverse()
+        return smartset.baseset(result)
+
+    # Initialize prefetch cache
+    prefetched = {} # {id or phid: drev}
+
+    tree = _parse(spec)
+    drevs, ancestordrevs = _prefetchdrevs(tree)
+
+    # developer config: phabricator.batchsize
+    batchsize = repo.ui.configint(b'phabricator', b'batchsize')
+
+    # Prefetch Differential Revisions in batch
+    tofetch = set(drevs)
+    for r in ancestordrevs:
+        tofetch.update(range(max(1, r - batchsize), r + 1))
+    if drevs:
+        fetch({r'ids': list(tofetch)})
+    validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
+
+    # Walk through the tree, return smartsets
+    def walk(tree):
+        op = tree[0]
+        if op == b'symbol':
+            drev = _parsedrev(tree[1])
+            if drev:
+                return smartset.baseset([drev])
+            elif tree[1] in _knownstatusnames:
+                drevs = [r for r in validids
+                         if _getstatusname(prefetched[r]) == tree[1]]
+                return smartset.baseset(drevs)
+            else:
+                raise error.Abort(_(b'unknown symbol: %s') % tree[1])
+        elif op in {b'and_', b'add', b'sub'}:
+            assert len(tree) == 3
+            return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
+        elif op == b'group':
+            return walk(tree[1])
+        elif op == b'ancestors':
+            return getstack(walk(tree[1]))
+        else:
+            raise error.ProgrammingError(b'illegal tree: %r' % tree)
+
+    return [prefetched[r] for r in walk(tree)]
+
+def getdescfromdrev(drev):
+    """get description (commit message) from "Differential Revision"
+
+    This is similar to differential.getcommitmessage API. But we only care
+    about limited fields: title, summary, test plan, and URL.
+    """
+    title = drev[r'title']
+    summary = drev[r'summary'].rstrip()
+    testplan = drev[r'testPlan'].rstrip()
+    if testplan:
+        testplan = b'Test Plan:\n%s' % testplan
+    uri = b'Differential Revision: %s' % drev[r'uri']
+    return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
+
+def getdiffmeta(diff):
+    """get commit metadata (date, node, user, p1) from a diff object
+
+    The metadata could be "hg:meta", sent by phabsend, like:
+
+        "properties": {
+          "hg:meta": {
+            "date": "1499571514 25200",
+            "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
+            "user": "Foo Bar <foo@example.com>",
+            "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
+          }
+        }
+
+    Or converted from "local:commits", sent by "arc", like:
+
+        "properties": {
+          "local:commits": {
+            "98c08acae292b2faf60a279b4189beb6cff1414d": {
+              "author": "Foo Bar",
+              "time": 1499546314,
+              "branch": "default",
+              "tag": "",
+              "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
+              "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
+              "local": "1000",
+              "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
+              "summary": "...",
+              "message": "...",
+              "authorEmail": "foo@example.com"
+            }
+          }
+        }
+
+    Note: metadata extracted from "local:commits" will lose time zone
+    information.
+    """
+    props = diff.get(r'properties') or {}
+    meta = props.get(r'hg:meta')
+    if not meta and props.get(r'local:commits'):
+        commit = sorted(props[r'local:commits'].values())[0]
+        meta = {
+            r'date': r'%d 0' % commit[r'time'],
+            r'node': commit[r'rev'],
+            r'user': r'%s <%s>' % (commit[r'author'], commit[r'authorEmail']),
+        }
+        if len(commit.get(r'parents', ())) >= 1:
+            meta[r'parent'] = commit[r'parents'][0]
+    return meta or {}
+
+def readpatch(repo, drevs, write):
+    """generate plain-text patch readable by 'hg import'
+
+    write is usually ui.write. drevs is what "querydrev" returns, results of
+    "differential.query".
+    """
+    # Prefetch hg:meta property for all diffs
+    diffids = sorted(set(max(int(v) for v in drev[r'diffs']) for drev in drevs))
+    diffs = callconduit(repo, b'differential.querydiffs', {b'ids': diffids})
+
+    # Generate patch for each drev
+    for drev in drevs:
+        repo.ui.note(_(b'reading D%s\n') % drev[r'id'])
+
+        diffid = max(int(v) for v in drev[r'diffs'])
+        body = callconduit(repo, b'differential.getrawdiff',
+                           {b'diffID': diffid})
+        desc = getdescfromdrev(drev)
+        header = b'# HG changeset patch\n'
+
+        # Try to preserve metadata from hg:meta property. Write hg patch
+        # headers that can be read by the "import" command. See patchheadermap
+        # and extract in mercurial/patch.py for supported headers.
+        meta = getdiffmeta(diffs[str(diffid)])
+        for k in _metanamemap.keys():
+            if k in meta:
+                header += b'# %s %s\n' % (_metanamemap[k], meta[k])
+
+        content = b'%s%s\n%s' % (header, desc, body)
+        write(encoding.unitolocal(content))
+
+@vcrcommand(b'phabread',
+         [(b'', b'stack', False, _(b'read dependencies'))],
+         _(b'DREVSPEC [OPTIONS]'))
+def phabread(ui, repo, spec, **opts):
+    """print patches from Phabricator suitable for importing
+
+    DREVSPEC could be a Differential Revision identity, like ``D123``, or just
+    the number ``123``. It could also have common operators like ``+``, ``-``,
+    ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
+    select a stack.
+
+    ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
+    could be used to filter patches by status. For performance reason, they
+    only represent a subset of non-status selections and cannot be used alone.
+
+    For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
+    D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
+    stack up to D9.
+
+    If --stack is given, follow dependencies information and read all patches.
+    It is equivalent to the ``:`` operator.
+    """
+    if opts.get(b'stack'):
+        spec = b':(%s)' % spec
+    drevs = querydrev(repo, spec)
+    readpatch(repo, drevs, ui.write)
+
+@vcrcommand(b'phabupdate',
+         [(b'', b'accept', False, _(b'accept revisions')),
+          (b'', b'reject', False, _(b'reject revisions')),
+          (b'', b'abandon', False, _(b'abandon revisions')),
+          (b'', b'reclaim', False, _(b'reclaim revisions')),
+          (b'm', b'comment', b'', _(b'comment on the last revision')),
+          ], _(b'DREVSPEC [OPTIONS]'))
+def phabupdate(ui, repo, spec, **opts):
+    """update Differential Revision in batch
+
+    DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
+    """
+    flags = [n for n in b'accept reject abandon reclaim'.split() if opts.get(n)]
+    if len(flags) > 1:
+        raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
+
+    actions = []
+    for f in flags:
+        actions.append({b'type': f, b'value': b'true'})
+
+    drevs = querydrev(repo, spec)
+    for i, drev in enumerate(drevs):
+        if i + 1 == len(drevs) and opts.get(b'comment'):
+            actions.append({b'type': b'comment', b'value': opts[b'comment']})
+        if actions:
+            params = {b'objectIdentifier': drev[r'phid'],
+                      b'transactions': actions}
+            callconduit(repo, b'differential.revision.edit', params)
+
+templatekeyword = registrar.templatekeyword()
+
+@templatekeyword(b'phabreview', requires={b'ctx'})
+def template_review(context, mapping):
+    """:phabreview: Object describing the review for this changeset.
+    Has attributes `url` and `id`.
+    """
+    ctx = context.resource(mapping, b'ctx')
+    m = _differentialrevisiondescre.search(ctx.description())
+    if m:
+        return templateutil.hybriddict({
+            b'url': m.group(b'url'),
+            b'id': b"D{}".format(m.group(b'id')),
+        })
--- a/hgext/purge.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/purge.py	Mon Oct 22 14:46:06 2018 -0400
@@ -25,16 +25,13 @@
 '''command to delete untracked files from the working directory'''
 from __future__ import absolute_import
 
-import os
-
 from mercurial.i18n import _
 from mercurial import (
     cmdutil,
-    error,
+    merge as mergemod,
     pycompat,
     registrar,
     scmutil,
-    util,
 )
 
 cmdtable = {}
@@ -54,7 +51,8 @@
     ('0', 'print0', None, _('end filenames with NUL, for use with xargs'
                             ' (implies -p/--print)')),
     ] + cmdutil.walkopts,
-    _('hg purge [OPTION]... [DIR]...'))
+    _('hg purge [OPTION]... [DIR]...'),
+    helpcategory=command.CATEGORY_MAINTENANCE)
 def purge(ui, repo, *dirs, **opts):
     '''removes files not tracked by Mercurial
 
@@ -86,44 +84,28 @@
     option.
     '''
     opts = pycompat.byteskwargs(opts)
+
     act = not opts.get('print')
     eol = '\n'
     if opts.get('print0'):
         eol = '\0'
         act = False # --print0 implies --print
+
     removefiles = opts.get('files')
     removedirs = opts.get('dirs')
+
     if not removefiles and not removedirs:
         removefiles = True
         removedirs = True
 
-    def remove(remove_func, name):
-        if act:
-            try:
-                remove_func(repo.wjoin(name))
-            except OSError:
-                m = _('%s cannot be removed') % name
-                if opts.get('abort_on_err'):
-                    raise error.Abort(m)
-                ui.warn(_('warning: %s\n') % m)
-        else:
-            ui.write('%s%s' % (name, eol))
+    match = scmutil.match(repo[None], dirs, opts)
 
-    match = scmutil.match(repo[None], dirs, opts)
-    if removedirs:
-        directories = []
-        match.explicitdir = match.traversedir = directories.append
-    status = repo.status(match=match, ignored=opts.get('all'), unknown=True)
+    paths = mergemod.purge(
+        repo, match, ignored=opts.get('all', False),
+        removeemptydirs=removedirs, removefiles=removefiles,
+        abortonerror=opts.get('abort_on_err'),
+        noop=not act)
 
-    if removefiles:
-        for f in sorted(status.unknown + status.ignored):
-            if act:
-                ui.note(_('removing file %s\n') % f)
-            remove(util.unlink, f)
-
-    if removedirs:
-        for f in sorted(directories, reverse=True):
-            if match(f) and not os.listdir(repo.wjoin(f)):
-                if act:
-                    ui.note(_('removing directory %s\n') % f)
-                remove(os.rmdir, f)
+    for path in paths:
+        if not act:
+            ui.write('%s%s' % (path, eol))
--- a/hgext/rebase.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/rebase.py	Mon Oct 22 14:46:06 2018 -0400
@@ -177,6 +177,7 @@
         if e:
             self.extrafns = [e]
 
+        self.backupf = ui.configbool('ui', 'history-editing-backup')
         self.keepf = opts.get('keep', False)
         self.keepbranchesf = opts.get('keepbranches', False)
         self.obsoletenotrebased = {}
@@ -343,7 +344,9 @@
                 msg = _('cannot continue inconsistent rebase')
                 hint = _('use "hg rebase --abort" to clear broken state')
                 raise error.Abort(msg, hint=hint)
+
         if isabort:
+            backup = backup and self.backupf
             return abort(self.repo, self.originalwd, self.destmap, self.state,
                          activebookmark=self.activebookmark, backup=backup,
                          suppwarns=suppwarns)
@@ -632,7 +635,7 @@
         if self.collapsef and not self.keepf:
             collapsedas = newnode
         clearrebased(ui, repo, self.destmap, self.state, self.skipped,
-                     collapsedas, self.keepf, fm=fm)
+                     collapsedas, self.keepf, fm=fm, backup=self.backupf)
 
         clearstatus(repo)
         clearcollapsemsg(repo)
@@ -670,12 +673,14 @@
     ('D', 'detach', False, _('(DEPRECATED)')),
     ('i', 'interactive', False, _('(DEPRECATED)')),
     ('t', 'tool', '', _('specify merge tool')),
+    ('', 'stop', False, _('stop interrupted rebase')),
     ('c', 'continue', False, _('continue an interrupted rebase')),
     ('a', 'abort', False, _('abort an interrupted rebase')),
     ('', 'auto-orphans', '', _('automatically rebase orphan revisions '
                                'in the specified revset (EXPERIMENTAL)')),
      ] + cmdutil.dryrunopts + cmdutil.formatteropts + cmdutil.confirmopts,
-    _('[-s REV | -b REV] [-d REV] [OPTION]'))
+    _('[-s REV | -b REV] [-d REV] [OPTION]'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
 def rebase(ui, repo, **opts):
     """move changeset (and descendants) to a different branch
 
@@ -729,7 +734,8 @@
     deleted, there is no hook presently available for this.
 
     If a rebase is interrupted to manually resolve a conflict, it can be
-    continued with --continue/-c or aborted with --abort/-a.
+    continued with --continue/-c, aborted with --abort/-a, or stopped with
+    --stop.
 
     .. container:: verbose
 
@@ -800,22 +806,20 @@
     opts = pycompat.byteskwargs(opts)
     inmemory = ui.configbool('rebase', 'experimental.inmemory')
     dryrun = opts.get('dry_run')
-    if dryrun:
-        if opts.get('abort'):
-            raise error.Abort(_('cannot specify both --dry-run and --abort'))
-        if opts.get('continue'):
-            raise error.Abort(_('cannot specify both --dry-run and --continue'))
-    if opts.get('confirm'):
-        dryrun = True
-        if opts.get('dry_run'):
-            raise error.Abort(_('cannot specify both --confirm and --dry-run'))
-        if opts.get('abort'):
-            raise error.Abort(_('cannot specify both --confirm and --abort'))
-        if opts.get('continue'):
-            raise error.Abort(_('cannot specify both --confirm and --continue'))
+    confirm = opts.get('confirm')
+    selactions = [k for k in ['abort', 'stop', 'continue'] if opts.get(k)]
+    if len(selactions) > 1:
+        raise error.Abort(_('cannot use --%s with --%s')
+                          % tuple(selactions[:2]))
+    action = selactions[0] if selactions else None
+    if dryrun and action:
+        raise error.Abort(_('cannot specify both --dry-run and --%s') % action)
+    if confirm and action:
+        raise error.Abort(_('cannot specify both --confirm and --%s') % action)
+    if dryrun and confirm:
+        raise error.Abort(_('cannot specify both --confirm and --dry-run'))
 
-    if (opts.get('continue') or opts.get('abort') or
-        repo.currenttransaction() is not None):
+    if action or repo.currenttransaction() is not None:
         # in-memory rebase is not compatible with resuming rebases.
         # (Or if it is run within a transaction, since the restart logic can
         # fail the entire transaction.)
@@ -830,24 +834,43 @@
         opts['rev'] = [revsetlang.formatspec('%ld and orphan()', userrevs)]
         opts['dest'] = '_destautoorphanrebase(SRC)'
 
-    if dryrun:
-        return _dryrunrebase(ui, repo, opts)
+    if dryrun or confirm:
+        return _dryrunrebase(ui, repo, action, opts)
+    elif action == 'stop':
+        rbsrt = rebaseruntime(repo, ui)
+        with repo.wlock(), repo.lock():
+            rbsrt.restorestatus()
+            if rbsrt.collapsef:
+                raise error.Abort(_("cannot stop in --collapse session"))
+            allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
+            if not (rbsrt.keepf or allowunstable):
+                raise error.Abort(_("cannot remove original changesets with"
+                                    " unrebased descendants"),
+                    hint=_('either enable obsmarkers to allow unstable '
+                           'revisions or use --keep to keep original '
+                           'changesets'))
+            if needupdate(repo, rbsrt.state):
+                # update to the current working revision
+                # to clear interrupted merge
+                hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
+            rbsrt._finishrebase()
+            return 0
     elif inmemory:
         try:
             # in-memory merge doesn't support conflicts, so if we hit any, abort
             # and re-run as an on-disk merge.
             overrides = {('rebase', 'singletransaction'): True}
             with ui.configoverride(overrides, 'rebase'):
-                return _dorebase(ui, repo, opts, inmemory=inmemory)
+                return _dorebase(ui, repo, action, opts, inmemory=inmemory)
         except error.InMemoryMergeConflictsError:
             ui.warn(_('hit merge conflicts; re-running rebase without in-memory'
                       ' merge\n'))
-            _dorebase(ui, repo, {'abort': True})
-            return _dorebase(ui, repo, opts, inmemory=False)
+            _dorebase(ui, repo, action='abort', opts={})
+            return _dorebase(ui, repo, action, opts, inmemory=False)
     else:
-        return _dorebase(ui, repo, opts)
+        return _dorebase(ui, repo, action, opts)
 
-def _dryrunrebase(ui, repo, opts):
+def _dryrunrebase(ui, repo, action, opts):
     rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
     confirm = opts.get('confirm')
     if confirm:
@@ -860,7 +883,7 @@
         try:
             overrides = {('rebase', 'singletransaction'): True}
             with ui.configoverride(overrides, 'rebase'):
-                _origrebase(ui, repo, opts, rbsrt, inmemory=True,
+                _origrebase(ui, repo, action, opts, rbsrt, inmemory=True,
                             leaveunfinished=True)
         except error.InMemoryMergeConflictsError:
             ui.status(_('hit a merge conflict\n'))
@@ -886,11 +909,13 @@
                 rbsrt._prepareabortorcontinue(isabort=True, backup=False,
                                               suppwarns=True)
 
-def _dorebase(ui, repo, opts, inmemory=False):
+def _dorebase(ui, repo, action, opts, inmemory=False):
     rbsrt = rebaseruntime(repo, ui, inmemory, opts)
-    return _origrebase(ui, repo, opts, rbsrt, inmemory=inmemory)
+    return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
 
-def _origrebase(ui, repo, opts, rbsrt, inmemory=False, leaveunfinished=False):
+def _origrebase(ui, repo, action, opts, rbsrt, inmemory=False,
+                leaveunfinished=False):
+    assert action != 'stop'
     with repo.wlock(), repo.lock():
         # Validate input and define rebasing points
         destf = opts.get('dest', None)
@@ -900,8 +925,6 @@
         # search default destination in this space
         # used in the 'hg pull --rebase' case, see issue 5214.
         destspace = opts.get('_destspace')
-        contf = opts.get('continue')
-        abortf = opts.get('abort')
         if opts.get('interactive'):
             try:
                 if extensions.find('histedit'):
@@ -917,22 +940,20 @@
             raise error.Abort(
                 _('message can only be specified with collapse'))
 
-        if contf or abortf:
-            if contf and abortf:
-                raise error.Abort(_('cannot use both abort and continue'))
+        if action:
             if rbsrt.collapsef:
                 raise error.Abort(
                     _('cannot use collapse with continue or abort'))
             if srcf or basef or destf:
                 raise error.Abort(
                     _('abort and continue do not allow specifying revisions'))
-            if abortf and opts.get('tool', False):
+            if action == 'abort' and opts.get('tool', False):
                 ui.warn(_('tool option will be ignored\n'))
-            if contf:
+            if action == 'continue':
                 ms = mergemod.mergestate.read(repo)
                 mergeutil.checkunresolved(ms)
 
-            retcode = rbsrt._prepareabortorcontinue(abortf)
+            retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort'))
             if retcode is not None:
                 return retcode
         else:
@@ -1167,7 +1188,7 @@
     else:
         if repo['.'].rev() != p1:
             repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
-            mergemod.update(repo, p1, False, True)
+            mergemod.update(repo, p1, branchmerge=False, force=True)
         else:
             repo.ui.debug(" already in destination\n")
         # This is, alas, necessary to invalidate workingctx's manifest cache,
@@ -1179,7 +1200,8 @@
         repo.ui.debug("   detach base %d:%s\n" % (base, repo[base]))
     # When collapsing in-place, the parent is the common ancestor, we
     # have to allow merging with it.
-    stats = mergemod.update(repo, rev, True, True, base, collapse,
+    stats = mergemod.update(repo, rev, branchmerge=True, force=True,
+                            ancestor=base, mergeancestor=collapse,
                             labels=['dest', 'source'], wc=wctx)
     if collapse:
         copies.duplicatecopies(repo, wctx, rev, dest)
@@ -1622,7 +1644,7 @@
 
             # Update away from the rebase if necessary
             if shouldupdate or needupdate(repo, state):
-                mergemod.update(repo, originalwd, False, True)
+                mergemod.update(repo, originalwd, branchmerge=False, force=True)
 
             # Strip from the first rebased revision
             if rebased:
@@ -1728,7 +1750,7 @@
     return originalwd, destmap, state
 
 def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None,
-                 keepf=False, fm=None):
+                 keepf=False, fm=None, backup=True):
     """dispose of rebased revision at the end of the rebase
 
     If `collapsedas` is not None, the rebase was a collapse whose result if the
@@ -1736,29 +1758,44 @@
 
     If `keepf` is not True, the rebase has --keep set and no nodes should be
     removed (but bookmarks still need to be moved).
+
+    If `backup` is False, no backup will be stored when stripping rebased
+    revisions.
     """
     tonode = repo.changelog.node
     replacements = {}
     moves = {}
+    stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
+
+    collapsednodes = []
     for rev, newrev in sorted(state.items()):
         if newrev >= 0 and newrev != rev:
             oldnode = tonode(rev)
             newnode = collapsedas or tonode(newrev)
             moves[oldnode] = newnode
             if not keepf:
+                succs = None
                 if rev in skipped:
-                    succs = ()
+                    if stripcleanup or not repo[rev].obsolete():
+                        succs = ()
+                elif collapsedas:
+                    collapsednodes.append(oldnode)
                 else:
                     succs = (newnode,)
-                replacements[oldnode] = succs
-    scmutil.cleanupnodes(repo, replacements, 'rebase', moves)
+                if succs is not None:
+                    replacements[(oldnode,)] = succs
+    if collapsednodes:
+        replacements[tuple(collapsednodes)] = (collapsedas,)
+    scmutil.cleanupnodes(repo, replacements, 'rebase', moves, backup=backup)
     if fm:
         hf = fm.hexfunc
         fl = fm.formatlist
         fd = fm.formatdict
-        nodechanges = fd({hf(oldn): fl([hf(n) for n in newn], name='node')
-                          for oldn, newn in replacements.iteritems()},
-                         key="oldnode", value="newnodes")
+        changes = {}
+        for oldns, newn in replacements.iteritems():
+            for oldn in oldns:
+                changes[hf(oldn)] = fl([hf(n) for n in newn], name='node')
+        nodechanges = fd(changes, key="oldnode", value="newnodes")
         fm.data(nodechanges=nodechanges)
 
 def pullrebase(orig, ui, repo, *args, **opts):
@@ -1868,7 +1905,7 @@
                 # If 'srcrev' has a successor in rebase set but none in
                 # destination (which would be catched above), we shall skip it
                 # and its descendants to avoid divergence.
-                if any(s in destmap for s in succrevs):
+                if srcrev in extinctrevs or any(s in destmap for s in succrevs):
                     obsoletewithoutsuccessorindestination.add(srcrev)
 
     return (
--- a/hgext/record.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/record.py	Mon Oct 22 14:46:06 2018 -0400
@@ -32,9 +32,10 @@
 
 @command("record",
          # same options as commit + white space diff options
-        [c for c in commands.table['^commit|ci'][1][:]
+        [c for c in commands.table['commit|ci'][1][:]
             if c[1] != "interactive"] + cmdutil.diffwsopts,
-          _('hg record [OPTION]... [FILE]...'))
+          _('hg record [OPTION]... [FILE]...'),
+        helpcategory=command.CATEGORY_COMMITTING)
 def record(ui, repo, *pats, **opts):
     '''interactively select changes to commit
 
@@ -94,6 +95,7 @@
 @command('qrecord',
     [],
     _('hg qrecord [OPTION]... PATCH [FILE]...'),
+    helpcategory=command.CATEGORY_COMMITTING,
     inferrepo=True)
 def qrecord(ui, repo, patch, *pats, **opts):
     '''interactively record a new patch
@@ -136,7 +138,7 @@
         (qrecord,
          # same options as qnew, but copy them so we don't get
          # -i/--interactive for qrecord and add white space diff options
-         mq.cmdtable['^qnew'][1][:] + cmdutil.diffwsopts,
+         mq.cmdtable['qnew'][1][:] + cmdutil.diffwsopts,
          _('hg qrecord [OPTION]... PATCH [FILE]...'))
 
     _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
--- a/hgext/releasenotes.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/releasenotes.py	Mon Oct 22 14:46:06 2018 -0400
@@ -16,8 +16,6 @@
 import difflib
 import errno
 import re
-import sys
-import textwrap
 
 from mercurial.i18n import _
 from mercurial import (
@@ -30,6 +28,9 @@
     scmutil,
     util,
 )
+from mercurial.utils import (
+    stringutil,
+)
 
 cmdtable = {}
 command = registrar.command(cmdtable)
@@ -55,7 +56,7 @@
 ]
 
 RE_DIRECTIVE = re.compile('^\.\. ([a-zA-Z0-9_]+)::\s*([^$]+)?$')
-RE_ISSUE = r'\bissue ?[0-9]{4,6}(?![0-9])\b'
+RE_ISSUE = br'\bissue ?[0-9]{4,6}(?![0-9])\b'
 
 BULLET_SECTION = _('Other Changes')
 
@@ -444,11 +445,11 @@
             lines.append('-' * len(title))
             lines.append('')
 
-            wrapper = textwrap.TextWrapper(width=78)
             for i, para in enumerate(paragraphs):
                 if i:
                     lines.append('')
-                lines.extend(wrapper.wrap(' '.join(para)))
+                lines.extend(stringutil.wrap(' '.join(para),
+                                             width=78).splitlines())
 
             lines.append('')
 
@@ -466,17 +467,17 @@
             lines.append('')
 
         for paragraphs in nontitled:
-            wrapper = textwrap.TextWrapper(initial_indent='* ',
-                                           subsequent_indent='  ',
-                                           width=78)
-            lines.extend(wrapper.wrap(' '.join(paragraphs[0])))
+            lines.extend(stringutil.wrap(' '.join(paragraphs[0]),
+                                         width=78,
+                                         initindent='* ',
+                                         hangindent='  ').splitlines())
 
-            wrapper = textwrap.TextWrapper(initial_indent='  ',
-                                           subsequent_indent='  ',
-                                           width=78)
             for para in paragraphs[1:]:
                 lines.append('')
-                lines.extend(wrapper.wrap(' '.join(para)))
+                lines.extend(stringutil.wrap(' '.join(para),
+                                             width=78,
+                                             initindent='  ',
+                                             hangindent='  ').splitlines())
 
             lines.append('')
 
@@ -491,7 +492,8 @@
         _('REV')),
     ('l', 'list', False, _('list the available admonitions with their title'),
         None)],
-    _('hg releasenotes [-r REV] [-c] FILE'))
+    _('hg releasenotes [-r REV] [-c] FILE'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION)
 def releasenotes(ui, repo, file_=None, **opts):
     """parse release notes from commit messages into an output file
 
@@ -617,7 +619,7 @@
 def debugparsereleasenotes(ui, path, repo=None):
     """parse release notes and print resulting data structure"""
     if path == '-':
-        text = sys.stdin.read()
+        text = pycompat.stdin.read()
     else:
         with open(path, 'rb') as fh:
             text = fh.read()
--- a/hgext/relink.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/relink.py	Mon Oct 22 14:46:06 2018 -0400
@@ -30,7 +30,7 @@
 # leave the attribute unspecified.
 testedwith = 'ships-with-hg-core'
 
-@command('relink', [], _('[ORIGIN]'))
+@command('relink', [], _('[ORIGIN]'), helpcategory=command.CATEGORY_MAINTENANCE)
 def relink(ui, repo, origin=None, **opts):
     """recreate hardlinks between two repositories
 
--- a/hgext/remotenames.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/remotenames.py	Mon Oct 22 14:46:06 2018 -0400
@@ -33,6 +33,7 @@
 )
 from mercurial import (
     bookmarks,
+    error,
     extensions,
     logexchange,
     namespaces,
@@ -41,6 +42,11 @@
     revsetlang,
     smartset,
     templateutil,
+    util,
+)
+
+from mercurial.utils import (
+    stringutil,
 )
 
 if pycompat.ispy3:
@@ -230,7 +236,7 @@
                     self._nodetohoists.setdefault(node[0], []).append(name)
         return self._nodetohoists
 
-def wrapprintbookmarks(orig, ui, repo, bmarks, **opts):
+def wrapprintbookmarks(orig, ui, repo, fm, bmarks):
     if 'remotebookmarks' not in repo.names:
         return
     ns = repo.names['remotebookmarks']
@@ -243,7 +249,7 @@
 
         bmarks[name] = (node, ' ', '')
 
-    return orig(ui, repo, bmarks, **opts)
+    return orig(ui, repo, fm, bmarks)
 
 def extsetup(ui):
     extensions.wrapfunction(bookmarks, '_printbookmarks', wrapprintbookmarks)
@@ -345,32 +351,53 @@
 
 def _revsetutil(repo, subset, x, rtypes):
     """utility function to return a set of revs based on the rtypes"""
+    args = revsetlang.getargs(x, 0, 1, _('only one argument accepted'))
+    if args:
+        kind, pattern, matcher = stringutil.stringmatcher(
+            revsetlang.getstring(args[0], _('argument must be a string')))
+    else:
+        kind = pattern = None
+        matcher = util.always
 
-    revs = set()
+    nodes = set()
     cl = repo.changelog
     for rtype in rtypes:
         if rtype in repo.names:
             ns = repo.names[rtype]
             for name in ns.listnames(repo):
-                revs.update(ns.nodes(repo, name))
-
-    results = (cl.rev(n) for n in revs if cl.hasnode(n))
-    return subset & smartset.baseset(sorted(results))
+                if not matcher(name):
+                    continue
+                nodes.update(ns.nodes(repo, name))
+    if kind == 'literal' and not nodes:
+        raise error.RepoLookupError(_("remote name '%s' does not exist")
+                                    % pattern)
 
-@revsetpredicate('remotenames()')
+    revs = (cl.rev(n) for n in nodes if cl.hasnode(n))
+    return subset & smartset.baseset(revs)
+
+@revsetpredicate('remotenames([name])')
 def remotenamesrevset(repo, subset, x):
-    """All changesets which have a remotename on them."""
-    revsetlang.getargs(x, 0, 0, _("remotenames takes no arguments"))
+    """All changesets which have a remotename on them. If `name` is
+    specified, only remotenames of matching remote paths are considered.
+
+    Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
+    """
     return _revsetutil(repo, subset, x, ('remotebookmarks', 'remotebranches'))
 
-@revsetpredicate('remotebranches()')
+@revsetpredicate('remotebranches([name])')
 def remotebranchesrevset(repo, subset, x):
-    """All changesets which are branch heads on remotes."""
-    revsetlang.getargs(x, 0, 0, _("remotebranches takes no arguments"))
+    """All changesets which are branch heads on remotes. If `name` is
+    specified, only remotenames of matching remote paths are considered.
+
+    Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
+    """
     return _revsetutil(repo, subset, x, ('remotebranches',))
 
-@revsetpredicate('remotebookmarks()')
+@revsetpredicate('remotebookmarks([name])')
 def remotebmarksrevset(repo, subset, x):
-    """All changesets which have bookmarks on remotes."""
-    revsetlang.getargs(x, 0, 0, _("remotebookmarks takes no arguments"))
+    """All changesets which have bookmarks on remotes. If `name` is
+    specified, only remotenames of matching remote paths are considered.
+
+    Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
+    """
     return _revsetutil(repo, subset, x, ('remotebookmarks',))
--- a/hgext/schemes.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/schemes.py	Mon Oct 22 14:46:06 2018 -0400
@@ -78,9 +78,10 @@
     def __repr__(self):
         return '<ShortRepository: %s>' % self.scheme
 
-    def instance(self, ui, url, create, intents=None):
+    def instance(self, ui, url, create, intents=None, createopts=None):
         url = self.resolve(url)
-        return hg._peerlookup(url).instance(ui, url, create, intents=intents)
+        return hg._peerlookup(url).instance(ui, url, create, intents=intents,
+                                            createopts=createopts)
 
     def resolve(self, url):
         # Should this use the util.url class, or is manual parsing better?
--- a/hgext/share.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/share.py	Mon Oct 22 14:46:06 2018 -0400
@@ -67,6 +67,7 @@
                               '(EXPERIMENTAL)')),
     ],
     _('[-U] [-B] SOURCE [DEST]'),
+    helpcategory=command.CATEGORY_REPO_CREATION,
     norepo=True)
 def share(ui, source, dest=None, noupdate=False, bookmarks=False,
           relative=False):
@@ -91,7 +92,7 @@
              bookmarks=bookmarks, relative=relative)
     return 0
 
-@command('unshare', [], '')
+@command('unshare', [], '', helpcategory=command.CATEGORY_MAINTENANCE)
 def unshare(ui, repo):
     """convert a shared repository to a normal one
 
--- a/hgext/shelve.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/shelve.py	Mon Oct 22 14:46:06 2018 -0400
@@ -41,6 +41,7 @@
     lock as lockmod,
     mdiff,
     merge,
+    narrowspec,
     node as nodemod,
     patch,
     phases,
@@ -78,7 +79,7 @@
 
 backupdir = 'shelve-backup'
 shelvedir = 'shelved'
-shelvefileextensions = ['hg', 'patch', 'oshelve']
+shelvefileextensions = ['hg', 'patch', 'shelve']
 # universal extension is present in all types of shelves
 patchextension = 'patch'
 
@@ -139,17 +140,29 @@
     def applybundle(self):
         fp = self.opener()
         try:
+            targetphase = phases.internal
+            if not phases.supportinternal(self.repo):
+                targetphase = phases.secret
             gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
-            bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
+            pretip = self.repo['tip']
+            tr = self.repo.currenttransaction()
+            bundle2.applybundle(self.repo, gen, tr,
                                 source='unshelve',
                                 url='bundle:' + self.vfs.join(self.fname),
-                                targetphase=phases.secret)
+                                targetphase=targetphase)
+            shelvectx = self.repo['tip']
+            if pretip == shelvectx:
+                shelverev = tr.changes['revduplicates'][-1]
+                shelvectx = self.repo[shelverev]
+            return shelvectx
         finally:
             fp.close()
 
     def bundlerepo(self):
-        return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
-                                           self.vfs.join(self.fname))
+        path = self.vfs.join(self.fname)
+        return bundlerepo.instance(self.repo.baseui,
+                                   'bundle://%s+%s' % (self.repo.root, path))
+
     def writebundle(self, bases, node):
         cgversion = changegroup.safeversion(self.repo)
         if cgversion == '01':
@@ -159,18 +172,19 @@
             btype = 'HG20'
             compression = 'BZ'
 
-        outgoing = discovery.outgoing(self.repo, missingroots=bases,
+        repo = self.repo.unfiltered()
+
+        outgoing = discovery.outgoing(repo, missingroots=bases,
                                       missingheads=[node])
-        cg = changegroup.makechangegroup(self.repo, outgoing, cgversion,
-                                         'shelve')
+        cg = changegroup.makechangegroup(repo, outgoing, cgversion, 'shelve')
 
         bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
                                 compression=compression)
 
-    def writeobsshelveinfo(self, info):
+    def writeinfo(self, info):
         scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
 
-    def readobsshelveinfo(self):
+    def readinfo(self):
         return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
 
 class shelvedstate(object):
@@ -288,7 +302,7 @@
     hgfiles = [f for f in vfs.listdir()
                if f.endswith('.' + patchextension)]
     hgfiles = sorted([(vfs.stat(f)[stat.ST_MTIME], f) for f in hgfiles])
-    if 0 < maxbackups and maxbackups < len(hgfiles):
+    if maxbackups > 0 and maxbackups < len(hgfiles):
         bordermtime = hgfiles[-maxbackups][0]
     else:
         bordermtime = None
@@ -314,16 +328,13 @@
     '''Abort current transaction for shelve/unshelve, but keep dirstate
     '''
     tr = repo.currenttransaction()
-    backupname = 'dirstate.shelve'
-    repo.dirstate.savebackup(tr, backupname)
+    dirstatebackupname = 'dirstate.shelve'
+    narrowspecbackupname = 'narrowspec.shelve'
+    repo.dirstate.savebackup(tr, dirstatebackupname)
+    narrowspec.savebackup(repo, narrowspecbackupname)
     tr.abort()
-    repo.dirstate.restorebackup(None, backupname)
-
-def createcmd(ui, repo, pats, opts):
-    """subcommand that creates a new shelve"""
-    with repo.wlock():
-        cmdutil.checkunfinished(repo)
-        return _docreatecmd(ui, repo, pats, opts)
+    narrowspec.restorebackup(repo, narrowspecbackupname)
+    repo.dirstate.restorebackup(None, dirstatebackupname)
 
 def getshelvename(repo, parent, opts):
     """Decide on the name this shelve is going to have"""
@@ -381,7 +392,11 @@
         hasmq = util.safehasattr(repo, 'mq')
         if hasmq:
             saved, repo.mq.checkapplied = repo.mq.checkapplied, False
-        overrides = {('phases', 'new-commit'): phases.secret}
+
+        targetphase = phases.internal
+        if not phases.supportinternal(repo):
+            targetphase = phases.secret
+        overrides = {('phases', 'new-commit'): targetphase}
         try:
             editor_ = False
             if editor:
@@ -411,6 +426,8 @@
         ui.status(_("nothing changed\n"))
 
 def _shelvecreatedcommit(repo, node, name):
+    info = {'node': nodemod.hex(node)}
+    shelvedfile(repo, name, 'shelve').writeinfo(info)
     bases = list(mutableancestors(repo[node]))
     shelvedfile(repo, name, 'hg').writebundle(bases, node)
     with shelvedfile(repo, name, patchextension).opener('wb') as fp:
@@ -424,7 +441,20 @@
         repo[None].add(s.unknown)
 
 def _finishshelve(repo):
-    _aborttransaction(repo)
+    if phases.supportinternal(repo):
+        backupname = 'dirstate.shelve'
+        tr = repo.currenttransaction()
+        repo.dirstate.savebackup(tr, backupname)
+        tr.close()
+        repo.dirstate.restorebackup(None, backupname)
+    else:
+        _aborttransaction(repo)
+
+def createcmd(ui, repo, pats, opts):
+    """subcommand that creates a new shelve"""
+    with repo.wlock():
+        cmdutil.checkunfinished(repo)
+        return _docreatecmd(ui, repo, pats, opts)
 
 def _docreatecmd(ui, repo, pats, opts):
     wctx = repo[None]
@@ -456,7 +486,7 @@
 
         name = getshelvename(repo, parent, opts)
         activebookmark = _backupactivebookmark(repo)
-        extra = {}
+        extra = {'internal': 'shelve'}
         if includeunknown:
             _includeunknownfiles(repo, pats, opts, extra)
 
@@ -626,7 +656,7 @@
         try:
             checkparents(repo, state)
 
-            merge.update(repo, state.pendingctx, False, True)
+            merge.update(repo, state.pendingctx, branchmerge=False, force=True)
             if (state.activebookmark
                     and state.activebookmark in repo._bookmarks):
                 bookmarks.activate(repo, state.activebookmark)
@@ -636,8 +666,9 @@
                 rebase.clearstatus(repo)
 
             mergefiles(ui, repo, state.wctx, state.pendingctx)
-            repair.strip(ui, repo, state.nodestoremove, backup=False,
-                         topic='shelve')
+            if not phases.supportinternal(repo):
+                repair.strip(ui, repo, state.nodestoremove, backup=False,
+                             topic='shelve')
         finally:
             shelvedstate.clear(repo)
             ui.warn(_("unshelve of '%s' aborted\n") % state.name)
@@ -695,7 +726,10 @@
             repo.setparents(state.pendingctx.node(), nodemod.nullid)
             repo.dirstate.write(repo.currenttransaction())
 
-        overrides = {('phases', 'new-commit'): phases.secret}
+        targetphase = phases.internal
+        if not phases.supportinternal(repo):
+            targetphase = phases.secret
+        overrides = {('phases', 'new-commit'): targetphase}
         with repo.ui.configoverride(overrides, 'unshelve'):
             with repo.dirstate.parentchange():
                 repo.setparents(state.parents[0], nodemod.nullid)
@@ -727,8 +761,9 @@
         mergefiles(ui, repo, state.wctx, shelvectx)
         restorebranch(ui, repo, state.branchtorestore)
 
-        repair.strip(ui, repo, state.nodestoremove, backup=False,
-                     topic='shelve')
+        if not phases.supportinternal(repo):
+            repair.strip(ui, repo, state.nodestoremove, backup=False,
+                         topic='shelve')
         _restoreactivebookmark(repo, state.activebookmark)
         shelvedstate.clear(repo)
         unshelvecleanup(ui, repo, state.name, opts)
@@ -744,7 +779,8 @@
         return tmpwctx, addedbefore
     ui.status(_("temporarily committing pending changes "
                 "(restore with 'hg unshelve --abort')\n"))
-    commitfunc = getcommitfunc(extra=None, interactive=False,
+    extra = {'internal': 'shelve'}
+    commitfunc = getcommitfunc(extra=extra, interactive=False,
                                editor=False)
     tempopts = {}
     tempopts['message'] = "pending changes temporary commit"
@@ -756,9 +792,21 @@
 
 def _unshelverestorecommit(ui, repo, basename):
     """Recreate commit in the repository during the unshelve"""
-    with ui.configoverride({('ui', 'quiet'): True}):
-        shelvedfile(repo, basename, 'hg').applybundle()
-        shelvectx = repo['tip']
+    repo = repo.unfiltered()
+    node = None
+    if shelvedfile(repo, basename, 'shelve').exists():
+        node = shelvedfile(repo, basename, 'shelve').readinfo()['node']
+    if node is None or node not in repo:
+        with ui.configoverride({('ui', 'quiet'): True}):
+            shelvectx = shelvedfile(repo, basename, 'hg').applybundle()
+        # We might not strip the unbundled changeset, so we should keep track of
+        # the unshelve node in case we need to reuse it (eg: unshelve --keep)
+        if node is None:
+            info = {'node': nodemod.hex(shelvectx.node())}
+            shelvedfile(repo, basename, 'shelve').writeinfo(info)
+    else:
+        shelvectx = repo[node]
+
     return repo, shelvectx
 
 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
@@ -783,7 +831,7 @@
             tr.close()
 
             nodestoremove = [repo.changelog.node(rev)
-                             for rev in xrange(oldtiprev, len(repo))]
+                             for rev in pycompat.xrange(oldtiprev, len(repo))]
             shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
                               branchtorestore, opts.get('keep'), activebookmark)
             raise error.InterventionRequired(
@@ -855,7 +903,8 @@
           ('t', 'tool', '', _('specify merge tool')),
           ('', 'date', '',
            _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
-         _('hg unshelve [[-n] SHELVED]'))
+         _('hg unshelve [[-n] SHELVED]'),
+         helpcategory=command.CATEGORY_WORKING_DIRECTORY)
 def unshelve(ui, repo, *shelved, **opts):
     """restore a shelved change to the working directory
 
@@ -955,6 +1004,7 @@
     if not shelvedfile(repo, basename, patchextension).exists():
         raise error.Abort(_("shelved change '%s' not found") % basename)
 
+    repo = repo.unfiltered()
     lock = tr = None
     try:
         lock = repo.lock()
@@ -1024,7 +1074,8 @@
            _('output diffstat-style summary of changes (provide the names of '
              'the shelved changes as positional arguments)')
            )] + cmdutil.walkopts,
-         _('hg shelve [OPTION]... [FILE]...'))
+         _('hg shelve [OPTION]... [FILE]...'),
+         helpcategory=command.CATEGORY_WORKING_DIRECTORY)
 def shelvecmd(ui, repo, *pats, **opts):
     '''save and set aside changes from the working directory
 
--- a/hgext/show.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/show.py	Mon Oct 22 14:46:06 2018 -0400
@@ -93,7 +93,8 @@
     # is an important part of the 'hg show' user experience and the entire
     # 'hg show' experience is experimental.
     ('T', 'template', '', ('display with template'), _('TEMPLATE')),
-    ], _('VIEW'))
+    ], _('VIEW'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION)
 def show(ui, repo, view=None, template=None):
     """show various repository information
 
--- a/hgext/sparse.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/sparse.py	Mon Oct 22 14:46:06 2018 -0400
@@ -122,7 +122,7 @@
                              propname))
 
 def _setuplog(ui):
-    entry = commands.table['^log|history']
+    entry = commands.table['log|history']
     entry[1].append(('', 'sparse', None,
         "limit to changesets affecting the sparse checkout"))
 
@@ -163,7 +163,7 @@
     return orig(ui, repo, *args, **opts)
 
 def _setupclone(ui):
-    entry = commands.table['^clone']
+    entry = commands.table['clone']
     entry[1].append(('', 'enable-profile', [],
                     'enable a sparse profile'))
     entry[1].append(('', 'include', [],
@@ -173,7 +173,7 @@
     extensions.wrapcommand(commands.table, 'clone', _clonesparsecmd)
 
 def _setupadd(ui):
-    entry = commands.table['^add']
+    entry = commands.table['add']
     entry[1].append(('s', 'sparse', None,
                     'also include directories of added files in sparse config'))
 
@@ -237,7 +237,7 @@
             return orig(self, *args)
         extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
 
-@command('^debugsparse', [
+@command('debugsparse', [
     ('I', 'include', False, _('include files in the sparse checkout')),
     ('X', 'exclude', False, _('exclude files in the sparse checkout')),
     ('d', 'delete', False, _('delete an include/exclude rule')),
@@ -249,7 +249,8 @@
     ('', 'refresh', False, _('updates the working after sparseness changes')),
     ('', 'reset', False, _('makes the repo full again')),
     ] + commands.templateopts,
-    _('[--OPTION] PATTERN...'))
+    _('[--OPTION] PATTERN...'),
+    helpbasic=True)
 def debugsparse(ui, repo, *pats, **opts):
     """make the current checkout sparse, or edit the existing checkout
 
--- a/hgext/split.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/split.py	Mon Oct 22 14:46:06 2018 -0400
@@ -44,11 +44,12 @@
 # leave the attribute unspecified.
 testedwith = 'ships-with-hg-core'
 
-@command('^split',
+@command('split',
     [('r', 'rev', '', _("revision to split"), _('REV')),
      ('', 'rebase', True, _('rebase descendants after split')),
     ] + cmdutil.commitopts2,
-    _('hg split [--no-rebase] [[-r] REV]'))
+    _('hg split [--no-rebase] [[-r] REV]'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, helpbasic=True)
 def split(ui, repo, *revs, **opts):
     """split a changeset into smaller ones
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/sqlitestore.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,1169 @@
+# sqlitestore.py - Storage backend that uses SQLite
+#
+# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""store repository data in SQLite (EXPERIMENTAL)
+
+The sqlitestore extension enables the storage of repository data in SQLite.
+
+This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
+GUARANTEES. This means that repositories created with this extension may
+only be usable with the exact version of this extension/Mercurial that was
+used. The extension attempts to enforce this in order to prevent repository
+corruption.
+
+In addition, several features are not yet supported or have known bugs:
+
+* Only some data is stored in SQLite. Changeset, manifest, and other repository
+  data is not yet stored in SQLite.
+* Transactions are not robust. If the process is aborted at the right time
+  during transaction close/rollback, the repository could be in an inconsistent
+  state. This problem will diminish once all repository data is tracked by
+  SQLite.
+* Bundle repositories do not work (the ability to use e.g.
+  `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
+  existing repository).
+* Various other features don't work.
+
+This extension should work for basic clone/pull, update, and commit workflows.
+Some history rewriting operations may fail due to lack of support for bundle
+repositories.
+
+To use, activate the extension and set the ``storage.new-repo-backend`` config
+option to ``sqlite`` to enable new repositories to use SQLite for storage.
+"""
+
+# To run the test suite with repos using SQLite by default, execute the
+# following:
+#
+# HGREPOFEATURES="sqlitestore" run-tests.py \
+#     --extra-config-opt extensions.sqlitestore= \
+#     --extra-config-opt storage.new-repo-backend=sqlite
+
+from __future__ import absolute_import
+
+import hashlib
+import sqlite3
+import struct
+import threading
+import zlib
+
+from mercurial.i18n import _
+from mercurial.node import (
+    nullid,
+    nullrev,
+    short,
+)
+from mercurial.thirdparty import (
+    attr,
+)
+from mercurial import (
+    ancestor,
+    dagop,
+    error,
+    extensions,
+    localrepo,
+    mdiff,
+    pycompat,
+    registrar,
+    repository,
+    util,
+    verify,
+)
+from mercurial.utils import (
+    interfaceutil,
+    storageutil,
+)
+
+try:
+    from mercurial import zstd
+    zstd.__version__
+except ImportError:
+    zstd = None
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+# experimental config: storage.sqlite.compression
+configitem('storage', 'sqlite.compression',
+           default='zstd' if zstd else 'zlib')
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+REQUIREMENT = b'exp-sqlite-001'
+REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
+REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
+REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
+REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
+
+CURRENT_SCHEMA_VERSION = 1
+
+COMPRESSION_NONE = 1
+COMPRESSION_ZSTD = 2
+COMPRESSION_ZLIB = 3
+
+FLAG_CENSORED = 1
+FLAG_MISSING_P1 = 2
+FLAG_MISSING_P2 = 4
+
+CREATE_SCHEMA = [
+    # Deltas are stored as content-indexed blobs.
+    # compression column holds COMPRESSION_* constant for how the
+    # delta is encoded.
+
+    r'CREATE TABLE delta ('
+    r'    id INTEGER PRIMARY KEY, '
+    r'    compression INTEGER NOT NULL, '
+    r'    hash BLOB UNIQUE ON CONFLICT ABORT, '
+    r'    delta BLOB NOT NULL '
+    r')',
+
+    # Tracked paths are denormalized to integers to avoid redundant
+    # storage of the path name.
+    r'CREATE TABLE filepath ('
+    r'    id INTEGER PRIMARY KEY, '
+    r'    path BLOB NOT NULL '
+    r')',
+
+    r'CREATE UNIQUE INDEX filepath_path '
+    r'    ON filepath (path)',
+
+    # We have a single table for all file revision data.
+    # Each file revision is uniquely described by a (path, rev) and
+    # (path, node).
+    #
+    # Revision data is stored as a pointer to the delta producing this
+    # revision and the file revision whose delta should be applied before
+    # that one. One can reconstruct the delta chain by recursively following
+    # the delta base revision pointers until one encounters NULL.
+    #
+    # flags column holds bitwise integer flags controlling storage options.
+    # These flags are defined by the FLAG_* constants.
+    r'CREATE TABLE fileindex ('
+    r'    id INTEGER PRIMARY KEY, '
+    r'    pathid INTEGER REFERENCES filepath(id), '
+    r'    revnum INTEGER NOT NULL, '
+    r'    p1rev INTEGER NOT NULL, '
+    r'    p2rev INTEGER NOT NULL, '
+    r'    linkrev INTEGER NOT NULL, '
+    r'    flags INTEGER NOT NULL, '
+    r'    deltaid INTEGER REFERENCES delta(id), '
+    r'    deltabaseid INTEGER REFERENCES fileindex(id), '
+    r'    node BLOB NOT NULL '
+    r')',
+
+    r'CREATE UNIQUE INDEX fileindex_pathrevnum '
+    r'    ON fileindex (pathid, revnum)',
+
+    r'CREATE UNIQUE INDEX fileindex_pathnode '
+    r'    ON fileindex (pathid, node)',
+
+    # Provide a view over all file data for convenience.
+    r'CREATE VIEW filedata AS '
+    r'SELECT '
+    r'    fileindex.id AS id, '
+    r'    filepath.id AS pathid, '
+    r'    filepath.path AS path, '
+    r'    fileindex.revnum AS revnum, '
+    r'    fileindex.node AS node, '
+    r'    fileindex.p1rev AS p1rev, '
+    r'    fileindex.p2rev AS p2rev, '
+    r'    fileindex.linkrev AS linkrev, '
+    r'    fileindex.flags AS flags, '
+    r'    fileindex.deltaid AS deltaid, '
+    r'    fileindex.deltabaseid AS deltabaseid '
+    r'FROM filepath, fileindex '
+    r'WHERE fileindex.pathid=filepath.id',
+
+    r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
+]
+
+def resolvedeltachain(db, pathid, node, revisioncache,
+                      stoprids, zstddctx=None):
+    """Resolve a delta chain for a file node."""
+
+    # TODO the "not in ({stops})" here is possibly slowing down the query
+    # because it needs to perform the lookup on every recursive invocation.
+    # This could possibly be faster if we created a temporary query with
+    # baseid "poisoned" to null and limited the recursive filter to
+    # "is not null".
+    res = db.execute(
+        r'WITH RECURSIVE '
+        r'    deltachain(deltaid, baseid) AS ('
+        r'        SELECT deltaid, deltabaseid FROM fileindex '
+        r'            WHERE pathid=? AND node=? '
+        r'        UNION ALL '
+        r'        SELECT fileindex.deltaid, deltabaseid '
+        r'            FROM fileindex, deltachain '
+        r'            WHERE '
+        r'                fileindex.id=deltachain.baseid '
+        r'                AND deltachain.baseid IS NOT NULL '
+        r'                AND fileindex.id NOT IN ({stops}) '
+        r'    ) '
+        r'SELECT deltachain.baseid, compression, delta '
+        r'FROM deltachain, delta '
+        r'WHERE delta.id=deltachain.deltaid'.format(
+            stops=r','.join([r'?'] * len(stoprids))),
+        tuple([pathid, node] + list(stoprids.keys())))
+
+    deltas = []
+    lastdeltabaseid = None
+
+    for deltabaseid, compression, delta in res:
+        lastdeltabaseid = deltabaseid
+
+        if compression == COMPRESSION_ZSTD:
+            delta = zstddctx.decompress(delta)
+        elif compression == COMPRESSION_NONE:
+            delta = delta
+        elif compression == COMPRESSION_ZLIB:
+            delta = zlib.decompress(delta)
+        else:
+            raise SQLiteStoreError('unhandled compression type: %d' %
+                                   compression)
+
+        deltas.append(delta)
+
+    if lastdeltabaseid in stoprids:
+        basetext = revisioncache[stoprids[lastdeltabaseid]]
+    else:
+        basetext = deltas.pop()
+
+    deltas.reverse()
+    fulltext = mdiff.patches(basetext, deltas)
+
+    # SQLite returns buffer instances for blob columns on Python 2. This
+    # type can propagate through the delta application layer. Because
+    # downstream callers assume revisions are bytes, cast as needed.
+    if not isinstance(fulltext, bytes):
+        fulltext = bytes(delta)
+
+    return fulltext
+
+def insertdelta(db, compression, hash, delta):
+    try:
+        return db.execute(
+            r'INSERT INTO delta (compression, hash, delta) '
+            r'VALUES (?, ?, ?)',
+            (compression, hash, delta)).lastrowid
+    except sqlite3.IntegrityError:
+        return db.execute(
+            r'SELECT id FROM delta WHERE hash=?',
+            (hash,)).fetchone()[0]
+
+class SQLiteStoreError(error.StorageError):
+    pass
+
+@attr.s
+class revisionentry(object):
+    rid = attr.ib()
+    rev = attr.ib()
+    node = attr.ib()
+    p1rev = attr.ib()
+    p2rev = attr.ib()
+    p1node = attr.ib()
+    p2node = attr.ib()
+    linkrev = attr.ib()
+    flags = attr.ib()
+
+@interfaceutil.implementer(repository.irevisiondelta)
+@attr.s(slots=True)
+class sqliterevisiondelta(object):
+    node = attr.ib()
+    p1node = attr.ib()
+    p2node = attr.ib()
+    basenode = attr.ib()
+    flags = attr.ib()
+    baserevisionsize = attr.ib()
+    revision = attr.ib()
+    delta = attr.ib()
+    linknode = attr.ib(default=None)
+
+@interfaceutil.implementer(repository.iverifyproblem)
+@attr.s(frozen=True)
+class sqliteproblem(object):
+    warning = attr.ib(default=None)
+    error = attr.ib(default=None)
+    node = attr.ib(default=None)
+
+@interfaceutil.implementer(repository.ifilestorage)
+class sqlitefilestore(object):
+    """Implements storage for an individual tracked path."""
+
+    def __init__(self, db, path, compression):
+        self._db = db
+        self._path = path
+
+        self._pathid = None
+
+        # revnum -> node
+        self._revtonode = {}
+        # node -> revnum
+        self._nodetorev = {}
+        # node -> data structure
+        self._revisions = {}
+
+        self._revisioncache = util.lrucachedict(10)
+
+        self._compengine = compression
+
+        if compression == 'zstd':
+            self._cctx = zstd.ZstdCompressor(level=3)
+            self._dctx = zstd.ZstdDecompressor()
+        else:
+            self._cctx = None
+            self._dctx = None
+
+        self._refreshindex()
+
+    def _refreshindex(self):
+        self._revtonode = {}
+        self._nodetorev = {}
+        self._revisions = {}
+
+        res = list(self._db.execute(
+            r'SELECT id FROM filepath WHERE path=?', (self._path,)))
+
+        if not res:
+            self._pathid = None
+            return
+
+        self._pathid = res[0][0]
+
+        res = self._db.execute(
+            r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
+            r'FROM fileindex '
+            r'WHERE pathid=? '
+            r'ORDER BY revnum ASC',
+            (self._pathid,))
+
+        for i, row in enumerate(res):
+            rid, rev, node, p1rev, p2rev, linkrev, flags = row
+
+            if i != rev:
+                raise SQLiteStoreError(_('sqlite database has inconsistent '
+                                         'revision numbers'))
+
+            if p1rev == nullrev:
+                p1node = nullid
+            else:
+                p1node = self._revtonode[p1rev]
+
+            if p2rev == nullrev:
+                p2node = nullid
+            else:
+                p2node = self._revtonode[p2rev]
+
+            entry = revisionentry(
+                rid=rid,
+                rev=rev,
+                node=node,
+                p1rev=p1rev,
+                p2rev=p2rev,
+                p1node=p1node,
+                p2node=p2node,
+                linkrev=linkrev,
+                flags=flags)
+
+            self._revtonode[rev] = node
+            self._nodetorev[node] = rev
+            self._revisions[node] = entry
+
+    # Start of ifileindex interface.
+
+    def __len__(self):
+        return len(self._revisions)
+
+    def __iter__(self):
+        return iter(pycompat.xrange(len(self._revisions)))
+
+    def hasnode(self, node):
+        if node == nullid:
+            return False
+
+        return node in self._nodetorev
+
+    def revs(self, start=0, stop=None):
+        return storageutil.iterrevs(len(self._revisions), start=start,
+                                    stop=stop)
+
+    def parents(self, node):
+        if node == nullid:
+            return nullid, nullid
+
+        if node not in self._revisions:
+            raise error.LookupError(node, self._path, _('no node'))
+
+        entry = self._revisions[node]
+        return entry.p1node, entry.p2node
+
+    def parentrevs(self, rev):
+        if rev == nullrev:
+            return nullrev, nullrev
+
+        if rev not in self._revtonode:
+            raise IndexError(rev)
+
+        entry = self._revisions[self._revtonode[rev]]
+        return entry.p1rev, entry.p2rev
+
+    def rev(self, node):
+        if node == nullid:
+            return nullrev
+
+        if node not in self._nodetorev:
+            raise error.LookupError(node, self._path, _('no node'))
+
+        return self._nodetorev[node]
+
+    def node(self, rev):
+        if rev == nullrev:
+            return nullid
+
+        if rev not in self._revtonode:
+            raise IndexError(rev)
+
+        return self._revtonode[rev]
+
+    def lookup(self, node):
+        return storageutil.fileidlookup(self, node, self._path)
+
+    def linkrev(self, rev):
+        if rev == nullrev:
+            return nullrev
+
+        if rev not in self._revtonode:
+            raise IndexError(rev)
+
+        entry = self._revisions[self._revtonode[rev]]
+        return entry.linkrev
+
+    def iscensored(self, rev):
+        if rev == nullrev:
+            return False
+
+        if rev not in self._revtonode:
+            raise IndexError(rev)
+
+        return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
+
+    def commonancestorsheads(self, node1, node2):
+        rev1 = self.rev(node1)
+        rev2 = self.rev(node2)
+
+        ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
+        return pycompat.maplist(self.node, ancestors)
+
+    def descendants(self, revs):
+        # TODO we could implement this using a recursive SQL query, which
+        # might be faster.
+        return dagop.descendantrevs(revs, self.revs, self.parentrevs)
+
+    def heads(self, start=None, stop=None):
+        if start is None and stop is None:
+            if not len(self):
+                return [nullid]
+
+        startrev = self.rev(start) if start is not None else nullrev
+        stoprevs = {self.rev(n) for n in stop or []}
+
+        revs = dagop.headrevssubset(self.revs, self.parentrevs,
+                                    startrev=startrev, stoprevs=stoprevs)
+
+        return [self.node(rev) for rev in revs]
+
+    def children(self, node):
+        rev = self.rev(node)
+
+        res = self._db.execute(
+            r'SELECT'
+            r'  node '
+            r'  FROM filedata '
+            r'  WHERE path=? AND (p1rev=? OR p2rev=?) '
+            r'  ORDER BY revnum ASC',
+            (self._path, rev, rev))
+
+        return [row[0] for row in res]
+
+    # End of ifileindex interface.
+
+    # Start of ifiledata interface.
+
+    def size(self, rev):
+        if rev == nullrev:
+            return 0
+
+        if rev not in self._revtonode:
+            raise IndexError(rev)
+
+        node = self._revtonode[rev]
+
+        if self.renamed(node):
+            return len(self.read(node))
+
+        return len(self.revision(node))
+
+    def revision(self, node, raw=False, _verifyhash=True):
+        if node in (nullid, nullrev):
+            return b''
+
+        if isinstance(node, int):
+            node = self.node(node)
+
+        if node not in self._nodetorev:
+            raise error.LookupError(node, self._path, _('no node'))
+
+        if node in self._revisioncache:
+            return self._revisioncache[node]
+
+        # Because we have a fulltext revision cache, we are able to
+        # short-circuit delta chain traversal and decompression as soon as
+        # we encounter a revision in the cache.
+
+        stoprids = {self._revisions[n].rid: n
+                    for n in self._revisioncache}
+
+        if not stoprids:
+            stoprids[-1] = None
+
+        fulltext = resolvedeltachain(self._db, self._pathid, node,
+                                     self._revisioncache, stoprids,
+                                     zstddctx=self._dctx)
+
+        # Don't verify hashes if parent nodes were rewritten, as the hash
+        # wouldn't verify.
+        if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
+            _verifyhash = False
+
+        if _verifyhash:
+            self._checkhash(fulltext, node)
+            self._revisioncache[node] = fulltext
+
+        return fulltext
+
+    def read(self, node):
+        return storageutil.filtermetadata(self.revision(node))
+
+    def renamed(self, node):
+        return storageutil.filerevisioncopied(self, node)
+
+    def cmp(self, node, fulltext):
+        return not storageutil.filedataequivalent(self, node, fulltext)
+
+    def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
+                      assumehaveparentrevisions=False, deltaprevious=False):
+        if nodesorder not in ('nodes', 'storage', None):
+            raise error.ProgrammingError('unhandled value for nodesorder: %s' %
+                                         nodesorder)
+
+        nodes = [n for n in nodes if n != nullid]
+
+        if not nodes:
+            return
+
+        # TODO perform in a single query.
+        res = self._db.execute(
+            r'SELECT revnum, deltaid FROM fileindex '
+            r'WHERE pathid=? '
+            r'    AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
+            tuple([self._pathid] + nodes))
+
+        deltabases = {}
+
+        for rev, deltaid in res:
+            res = self._db.execute(
+                r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
+                (self._pathid, deltaid))
+            deltabases[rev] = res.fetchone()[0]
+
+        # TODO define revdifffn so we can use delta from storage.
+        for delta in storageutil.emitrevisions(
+            self, nodes, nodesorder, sqliterevisiondelta,
+            deltaparentfn=deltabases.__getitem__,
+            revisiondata=revisiondata,
+            assumehaveparentrevisions=assumehaveparentrevisions,
+            deltaprevious=deltaprevious):
+
+            yield delta
+
+    # End of ifiledata interface.
+
+    # Start of ifilemutation interface.
+
+    def add(self, filedata, meta, transaction, linkrev, p1, p2):
+        if meta or filedata.startswith(b'\x01\n'):
+            filedata = storageutil.packmeta(meta, filedata)
+
+        return self.addrevision(filedata, transaction, linkrev, p1, p2)
+
+    def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
+                    flags=0, cachedelta=None):
+        if flags:
+            raise SQLiteStoreError(_('flags not supported on revisions'))
+
+        validatehash = node is not None
+        node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
+
+        if validatehash:
+            self._checkhash(revisiondata, node, p1, p2)
+
+        if node in self._nodetorev:
+            return node
+
+        node = self._addrawrevision(node, revisiondata, transaction, linkrev,
+                                    p1, p2)
+
+        self._revisioncache[node] = revisiondata
+        return node
+
+    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
+                 maybemissingparents=False):
+        nodes = []
+
+        for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
+            storeflags = 0
+
+            if wireflags & repository.REVISION_FLAG_CENSORED:
+                storeflags |= FLAG_CENSORED
+
+            if wireflags & ~repository.REVISION_FLAG_CENSORED:
+                raise SQLiteStoreError('unhandled revision flag')
+
+            if maybemissingparents:
+                if p1 != nullid and not self.hasnode(p1):
+                    p1 = nullid
+                    storeflags |= FLAG_MISSING_P1
+
+                if p2 != nullid and not self.hasnode(p2):
+                    p2 = nullid
+                    storeflags |= FLAG_MISSING_P2
+
+            baserev = self.rev(deltabase)
+
+            # If base is censored, delta must be full replacement in a single
+            # patch operation.
+            if baserev != nullrev and self.iscensored(baserev):
+                hlen = struct.calcsize('>lll')
+                oldlen = len(self.revision(deltabase, raw=True,
+                                           _verifyhash=False))
+                newlen = len(delta) - hlen
+
+                if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
+                    raise error.CensoredBaseError(self._path,
+                                                  deltabase)
+
+            if (not (storeflags & FLAG_CENSORED)
+                and storageutil.deltaiscensored(
+                    delta, baserev, lambda x: len(self.revision(x, raw=True)))):
+                storeflags |= FLAG_CENSORED
+
+            linkrev = linkmapper(linknode)
+
+            nodes.append(node)
+
+            if node in self._revisions:
+                # Possibly reset parents to make them proper.
+                entry = self._revisions[node]
+
+                if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
+                    entry.p1node = p1
+                    entry.p1rev = self._nodetorev[p1]
+                    entry.flags &= ~FLAG_MISSING_P1
+
+                    self._db.execute(
+                        r'UPDATE fileindex SET p1rev=?, flags=? '
+                        r'WHERE id=?',
+                        (self._nodetorev[p1], entry.flags, entry.rid))
+
+                if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
+                    entry.p2node = p2
+                    entry.p2rev = self._nodetorev[p2]
+                    entry.flags &= ~FLAG_MISSING_P2
+
+                    self._db.execute(
+                        r'UPDATE fileindex SET p2rev=?, flags=? '
+                        r'WHERE id=?',
+                        (self._nodetorev[p1], entry.flags, entry.rid))
+
+                continue
+
+            if deltabase == nullid:
+                text = mdiff.patch(b'', delta)
+                storedelta = None
+            else:
+                text = None
+                storedelta = (deltabase, delta)
+
+            self._addrawrevision(node, text, transaction, linkrev, p1, p2,
+                                 storedelta=storedelta, flags=storeflags)
+
+            if addrevisioncb:
+                addrevisioncb(self, node)
+
+        return nodes
+
+    def censorrevision(self, tr, censornode, tombstone=b''):
+        tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
+
+        # This restriction is cargo culted from revlogs and makes no sense for
+        # SQLite, since columns can be resized at will.
+        if len(tombstone) > len(self.revision(censornode, raw=True)):
+            raise error.Abort(_('censor tombstone must be no longer than '
+                                'censored data'))
+
+        # We need to replace the censored revision's data with the tombstone.
+        # But replacing that data will have implications for delta chains that
+        # reference it.
+        #
+        # While "better," more complex strategies are possible, we do something
+        # simple: we find delta chain children of the censored revision and we
+        # replace those incremental deltas with fulltexts of their corresponding
+        # revision. Then we delete the now-unreferenced delta and original
+        # revision and insert a replacement.
+
+        # Find the delta to be censored.
+        censoreddeltaid = self._db.execute(
+            r'SELECT deltaid FROM fileindex WHERE id=?',
+            (self._revisions[censornode].rid,)).fetchone()[0]
+
+        # Find all its delta chain children.
+        # TODO once we support storing deltas for !files, we'll need to look
+        # for those delta chains too.
+        rows = list(self._db.execute(
+            r'SELECT id, pathid, node FROM fileindex '
+            r'WHERE deltabaseid=? OR deltaid=?',
+            (censoreddeltaid, censoreddeltaid)))
+
+        for row in rows:
+            rid, pathid, node = row
+
+            fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
+                                         zstddctx=self._dctx)
+
+            deltahash = hashlib.sha1(fulltext).digest()
+
+            if self._compengine == 'zstd':
+                deltablob = self._cctx.compress(fulltext)
+                compression = COMPRESSION_ZSTD
+            elif self._compengine == 'zlib':
+                deltablob = zlib.compress(fulltext)
+                compression = COMPRESSION_ZLIB
+            elif self._compengine == 'none':
+                deltablob = fulltext
+                compression = COMPRESSION_NONE
+            else:
+                raise error.ProgrammingError('unhandled compression engine: %s'
+                                             % self._compengine)
+
+            if len(deltablob) >= len(fulltext):
+                deltablob = fulltext
+                compression = COMPRESSION_NONE
+
+            deltaid = insertdelta(self._db, compression, deltahash, deltablob)
+
+            self._db.execute(
+                r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
+                r'WHERE id=?', (deltaid, rid))
+
+        # Now create the tombstone delta and replace the delta on the censored
+        # node.
+        deltahash = hashlib.sha1(tombstone).digest()
+        tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
+                                       deltahash, tombstone)
+
+        flags = self._revisions[censornode].flags
+        flags |= FLAG_CENSORED
+
+        self._db.execute(
+            r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
+            r'WHERE pathid=? AND node=?',
+            (flags, tombstonedeltaid, self._pathid, censornode))
+
+        self._db.execute(
+            r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
+
+        self._refreshindex()
+        self._revisioncache.clear()
+
+    def getstrippoint(self, minlink):
+        return storageutil.resolvestripinfo(minlink, len(self) - 1,
+                                            [self.rev(n) for n in self.heads()],
+                                            self.linkrev,
+                                            self.parentrevs)
+
+    def strip(self, minlink, transaction):
+        if not len(self):
+            return
+
+        rev, _ignored = self.getstrippoint(minlink)
+
+        if rev == len(self):
+            return
+
+        for rev in self.revs(rev):
+            self._db.execute(
+                r'DELETE FROM fileindex WHERE pathid=? AND node=?',
+                (self._pathid, self.node(rev)))
+
+        # TODO how should we garbage collect data in delta table?
+
+        self._refreshindex()
+
+    # End of ifilemutation interface.
+
+    # Start of ifilestorage interface.
+
+    def files(self):
+        return []
+
+    def storageinfo(self, exclusivefiles=False, sharedfiles=False,
+                    revisionscount=False, trackedsize=False,
+                    storedsize=False):
+        d = {}
+
+        if exclusivefiles:
+            d['exclusivefiles'] = []
+
+        if sharedfiles:
+            # TODO list sqlite file(s) here.
+            d['sharedfiles'] = []
+
+        if revisionscount:
+            d['revisionscount'] = len(self)
+
+        if trackedsize:
+            d['trackedsize'] = sum(len(self.revision(node))
+                                       for node in self._nodetorev)
+
+        if storedsize:
+            # TODO implement this?
+            d['storedsize'] = None
+
+        return d
+
+    def verifyintegrity(self, state):
+        state['skipread'] = set()
+
+        for rev in self:
+            node = self.node(rev)
+
+            try:
+                self.revision(node)
+            except Exception as e:
+                yield sqliteproblem(
+                    error=_('unpacking %s: %s') % (short(node), e),
+                    node=node)
+
+                state['skipread'].add(node)
+
+    # End of ifilestorage interface.
+
+    def _checkhash(self, fulltext, node, p1=None, p2=None):
+        if p1 is None and p2 is None:
+            p1, p2 = self.parents(node)
+
+        if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
+            return
+
+        try:
+            del self._revisioncache[node]
+        except KeyError:
+            pass
+
+        if storageutil.iscensoredtext(fulltext):
+            raise error.CensoredNodeError(self._path, node, fulltext)
+
+        raise SQLiteStoreError(_('integrity check failed on %s') %
+                               self._path)
+
+    def _addrawrevision(self, node, revisiondata, transaction, linkrev,
+                        p1, p2, storedelta=None, flags=0):
+        if self._pathid is None:
+            res = self._db.execute(
+                r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
+            self._pathid = res.lastrowid
+
+        # For simplicity, always store a delta against p1.
+        # TODO we need a lot more logic here to make behavior reasonable.
+
+        if storedelta:
+            deltabase, delta = storedelta
+
+            if isinstance(deltabase, int):
+                deltabase = self.node(deltabase)
+
+        else:
+            assert revisiondata is not None
+            deltabase = p1
+
+            if deltabase == nullid:
+                delta = revisiondata
+            else:
+                delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
+                                       revisiondata)
+
+        # File index stores a pointer to its delta and the parent delta.
+        # The parent delta is stored via a pointer to the fileindex PK.
+        if deltabase == nullid:
+            baseid = None
+        else:
+            baseid = self._revisions[deltabase].rid
+
+        # Deltas are stored with a hash of their content. This allows
+        # us to de-duplicate. The table is configured to ignore conflicts
+        # and it is faster to just insert and silently noop than to look
+        # first.
+        deltahash = hashlib.sha1(delta).digest()
+
+        if self._compengine == 'zstd':
+            deltablob = self._cctx.compress(delta)
+            compression = COMPRESSION_ZSTD
+        elif self._compengine == 'zlib':
+            deltablob = zlib.compress(delta)
+            compression = COMPRESSION_ZLIB
+        elif self._compengine == 'none':
+            deltablob = delta
+            compression = COMPRESSION_NONE
+        else:
+            raise error.ProgrammingError('unhandled compression engine: %s' %
+                                         self._compengine)
+
+        # Don't store compressed data if it isn't practical.
+        if len(deltablob) >= len(delta):
+            deltablob = delta
+            compression = COMPRESSION_NONE
+
+        deltaid = insertdelta(self._db, compression, deltahash, deltablob)
+
+        rev = len(self)
+
+        if p1 == nullid:
+            p1rev = nullrev
+        else:
+            p1rev = self._nodetorev[p1]
+
+        if p2 == nullid:
+            p2rev = nullrev
+        else:
+            p2rev = self._nodetorev[p2]
+
+        rid = self._db.execute(
+            r'INSERT INTO fileindex ('
+            r'    pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
+            r'    deltaid, deltabaseid) '
+            r'    VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
+            (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
+             deltaid, baseid)
+        ).lastrowid
+
+        entry = revisionentry(
+            rid=rid,
+            rev=rev,
+            node=node,
+            p1rev=p1rev,
+            p2rev=p2rev,
+            p1node=p1,
+            p2node=p2,
+            linkrev=linkrev,
+            flags=flags)
+
+        self._nodetorev[node] = rev
+        self._revtonode[rev] = node
+        self._revisions[node] = entry
+
+        return node
+
+class sqliterepository(localrepo.localrepository):
+    def cancopy(self):
+        return False
+
+    def transaction(self, *args, **kwargs):
+        current = self.currenttransaction()
+
+        tr = super(sqliterepository, self).transaction(*args, **kwargs)
+
+        if current:
+            return tr
+
+        self._dbconn.execute(r'BEGIN TRANSACTION')
+
+        def committransaction(_):
+            self._dbconn.commit()
+
+        tr.addfinalize('sqlitestore', committransaction)
+
+        return tr
+
+    @property
+    def _dbconn(self):
+        # SQLite connections can only be used on the thread that created
+        # them. In most cases, this "just works." However, hgweb uses
+        # multiple threads.
+        tid = threading.current_thread().ident
+
+        if self._db:
+            if self._db[0] == tid:
+                return self._db[1]
+
+        db = makedb(self.svfs.join('db.sqlite'))
+        self._db = (tid, db)
+
+        return db
+
+def makedb(path):
+    """Construct a database handle for a database at path."""
+
+    db = sqlite3.connect(path)
+    db.text_factory = bytes
+
+    res = db.execute(r'PRAGMA user_version').fetchone()[0]
+
+    # New database.
+    if res == 0:
+        for statement in CREATE_SCHEMA:
+            db.execute(statement)
+
+        db.commit()
+
+    elif res == CURRENT_SCHEMA_VERSION:
+        pass
+
+    else:
+        raise error.Abort(_('sqlite database has unrecognized version'))
+
+    db.execute(r'PRAGMA journal_mode=WAL')
+
+    return db
+
+def featuresetup(ui, supported):
+    supported.add(REQUIREMENT)
+
+    if zstd:
+        supported.add(REQUIREMENT_ZSTD)
+
+    supported.add(REQUIREMENT_ZLIB)
+    supported.add(REQUIREMENT_NONE)
+    supported.add(REQUIREMENT_SHALLOW_FILES)
+    supported.add(repository.NARROW_REQUIREMENT)
+
+def newreporequirements(orig, ui, createopts):
+    if createopts['backend'] != 'sqlite':
+        return orig(ui, createopts)
+
+    # This restriction can be lifted once we have more confidence.
+    if 'sharedrepo' in createopts:
+        raise error.Abort(_('shared repositories not supported with SQLite '
+                            'store'))
+
+    # This filtering is out of an abundance of caution: we want to ensure
+    # we honor creation options and we do that by annotating exactly the
+    # creation options we recognize.
+    known = {
+        'narrowfiles',
+        'backend',
+        'shallowfilestore',
+    }
+
+    unsupported = set(createopts) - known
+    if unsupported:
+        raise error.Abort(_('SQLite store does not support repo creation '
+                            'option: %s') % ', '.join(sorted(unsupported)))
+
+    # Since we're a hybrid store that still relies on revlogs, we fall back
+    # to using the revlogv1 backend's storage requirements then adding our
+    # own requirement.
+    createopts['backend'] = 'revlogv1'
+    requirements = orig(ui, createopts)
+    requirements.add(REQUIREMENT)
+
+    compression = ui.config('storage', 'sqlite.compression')
+
+    if compression == 'zstd' and not zstd:
+        raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
+                            'zstandard compression not available to this '
+                            'Mercurial install'))
+
+    if compression == 'zstd':
+        requirements.add(REQUIREMENT_ZSTD)
+    elif compression == 'zlib':
+        requirements.add(REQUIREMENT_ZLIB)
+    elif compression == 'none':
+        requirements.add(REQUIREMENT_NONE)
+    else:
+        raise error.Abort(_('unknown compression engine defined in '
+                            'storage.sqlite.compression: %s') % compression)
+
+    if createopts.get('shallowfilestore'):
+        requirements.add(REQUIREMENT_SHALLOW_FILES)
+
+    return requirements
+
+@interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
+class sqlitefilestorage(object):
+    """Repository file storage backed by SQLite."""
+    def file(self, path):
+        if path[0] == b'/':
+            path = path[1:]
+
+        if REQUIREMENT_ZSTD in self.requirements:
+            compression = 'zstd'
+        elif REQUIREMENT_ZLIB in self.requirements:
+            compression = 'zlib'
+        elif REQUIREMENT_NONE in self.requirements:
+            compression = 'none'
+        else:
+            raise error.Abort(_('unable to determine what compression engine '
+                                'to use for SQLite storage'))
+
+        return sqlitefilestore(self._dbconn, path, compression)
+
+def makefilestorage(orig, requirements, features, **kwargs):
+    """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
+    if REQUIREMENT in requirements:
+        if REQUIREMENT_SHALLOW_FILES in requirements:
+            features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
+
+        return sqlitefilestorage
+    else:
+        return orig(requirements=requirements, features=features, **kwargs)
+
+def makemain(orig, ui, requirements, **kwargs):
+    if REQUIREMENT in requirements:
+        if REQUIREMENT_ZSTD in requirements and not zstd:
+            raise error.Abort(_('repository uses zstandard compression, which '
+                                'is not available to this Mercurial install'))
+
+        return sqliterepository
+
+    return orig(requirements=requirements, **kwargs)
+
+def verifierinit(orig, self, *args, **kwargs):
+    orig(self, *args, **kwargs)
+
+    # We don't care that files in the store don't align with what is
+    # advertised. So suppress these warnings.
+    self.warnorphanstorefiles = False
+
+def extsetup(ui):
+    localrepo.featuresetupfuncs.add(featuresetup)
+    extensions.wrapfunction(localrepo, 'newreporequirements',
+                            newreporequirements)
+    extensions.wrapfunction(localrepo, 'makefilestorage',
+                            makefilestorage)
+    extensions.wrapfunction(localrepo, 'makemain',
+                            makemain)
+    extensions.wrapfunction(verify.verifier, '__init__',
+                            verifierinit)
+
+def reposetup(ui, repo):
+    if isinstance(repo, sqliterepository):
+        repo._db = None
+
+    # TODO check for bundlerepository?
--- a/hgext/strip.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/strip.py	Mon Oct 22 14:46:06 2018 -0400
@@ -110,8 +110,9 @@
           ('k', 'keep', None, _("do not modify working directory during "
                                 "strip")),
           ('B', 'bookmark', [], _("remove revs only reachable from given"
-                                  " bookmark"))],
-          _('hg strip [-k] [-f] [-B bookmark] [-r] REV...'))
+                                  " bookmark"), _('BOOKMARK'))],
+          _('hg strip [-k] [-f] [-B bookmark] [-r] REV...'),
+          helpcategory=command.CATEGORY_MAINTENANCE)
 def stripcmd(ui, repo, *revs, **opts):
     """strip changesets and all their descendants from the repository
 
--- a/hgext/transplant.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/transplant.py	Mon Oct 22 14:46:06 2018 -0400
@@ -184,7 +184,8 @@
                     if pulls:
                         if source != repo:
                             exchange.pull(repo, source.peer(), heads=pulls)
-                        merge.update(repo, pulls[-1], False, False)
+                        merge.update(repo, pulls[-1], branchmerge=False,
+                                     force=False)
                         p1, p2 = repo.dirstate.parents()
                         pulls = []
 
@@ -249,7 +250,7 @@
             tr.close()
             if pulls:
                 exchange.pull(repo, source.peer(), heads=pulls)
-                merge.update(repo, pulls[-1], False, False)
+                merge.update(repo, pulls[-1], branchmerge=False, force=False)
         finally:
             self.saveseries(revmap, merges)
             self.transplants.write()
@@ -503,7 +504,7 @@
 def hasnode(repo, node):
     try:
         return repo.changelog.rev(node) is not None
-    except error.RevlogError:
+    except error.StorageError:
         return False
 
 def browserevs(ui, repo, nodes, opts):
@@ -562,7 +563,8 @@
     ('', 'filter', '',
      _('filter changesets through command'), _('CMD'))],
     _('hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] '
-      '[-m REV] [REV]...'))
+      '[-m REV] [REV]...'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
 def transplant(ui, repo, *revs, **opts):
     '''transplant changesets from another branch
 
--- a/hgext/uncommit.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/uncommit.py	Mon Oct 22 14:46:06 2018 -0400
@@ -138,7 +138,8 @@
 @command('uncommit',
     [('', 'keep', False, _('allow an empty commit after uncommiting')),
     ] + commands.walkopts,
-    _('[OPTION]... [FILE]...'))
+    _('[OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
 def uncommit(ui, repo, *pats, **opts):
     """uncommit part or all of a local changeset
 
@@ -182,7 +183,7 @@
 
             with repo.dirstate.parentchange():
                 repo.dirstate.setparents(newid, node.nullid)
-                s = repo.status(old.p1(), old, match=match)
+                s = old.p1().status(old, match=match)
                 _fixdirstate(repo, old, repo[newid], s)
 
 def predecessormarkers(ctx):
@@ -190,7 +191,8 @@
     for data in ctx.repo().obsstore.predecessors.get(ctx.node(), ()):
         yield obsutil.marker(ctx.repo(), data)
 
-@command('^unamend', [])
+@command('unamend', [], helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
+         helpbasic=True)
 def unamend(ui, repo, **opts):
     """undo the most recent amend operation on a current changeset
 
--- a/hgext/win32text.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/hgext/win32text.py	Mon Oct 22 14:46:06 2018 -0400
@@ -49,6 +49,7 @@
     short,
 )
 from mercurial import (
+    pycompat,
     registrar,
 )
 from mercurial.utils import (
@@ -141,7 +142,8 @@
     # changegroup that contains an unacceptable commit followed later
     # by a commit that fixes the problem.
     tip = repo['tip']
-    for rev in xrange(repo.changelog.tiprev(), repo[node].rev() - 1, -1):
+    for rev in pycompat.xrange(repo.changelog.tiprev(),
+                               repo[node].rev() - 1, -1):
         c = repo[rev]
         for f in c.files():
             if f in seen or f not in tip or f not in c:
--- a/i18n/check-translation.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/i18n/check-translation.py	Mon Oct 22 14:46:06 2018 -0400
@@ -224,14 +224,6 @@
         failures, tests = doctest.testmod()
         sys.exit(failures and 1 or 0)
 
-    # replace polib._POFileParser to show linenum of problematic msgstr
-    class ExtPOFileParser(polib._POFileParser):
-        def process(self, symbol, linenum):
-            super(ExtPOFileParser, self).process(symbol, linenum)
-            if symbol == 'MS': # msgstr
-                self.current_entry.linenum = linenum
-    polib._POFileParser = ExtPOFileParser
-
     detected = []
     warning = options.warning
     for f in args:
--- a/i18n/hggettext	Wed Oct 10 12:25:28 2018 -0400
+++ b/i18n/hggettext	Mon Oct 22 14:46:06 2018 -0400
@@ -63,7 +63,7 @@
 
 doctestre = re.compile(r'^ +>>> ', re.MULTILINE)
 
-def offset(src, doc, name, default):
+def offset(src, doc, name, lineno, default):
     """Compute offset or issue a warning on stdout."""
     # remove doctest part, in order to avoid backslash mismatching
     m = doctestre.search(doc)
@@ -76,8 +76,9 @@
         # This can happen if the docstring contains unnecessary escape
         # sequences such as \" in a triple-quoted string. The problem
         # is that \" is turned into " and so doc wont appear in src.
-        sys.stderr.write("warning: unknown offset in %s, assuming %d lines\n"
-                         % (name, default))
+        sys.stderr.write("%s:%d:warning:"
+                         " unknown docstr offset, assuming %d lines\n"
+                         % (name, lineno, default))
         return default
     else:
         return src.count('\n', 0, end)
@@ -106,7 +107,7 @@
     if not path.startswith('mercurial/') and mod.__doc__:
         with open(path) as fobj:
             src = fobj.read()
-        lineno = 1 + offset(src, mod.__doc__, path, 7)
+        lineno = 1 + offset(src, mod.__doc__, path, 1, 7)
         print(poentry(path, lineno, mod.__doc__))
 
     functions = list(getattr(mod, 'i18nfunctions', []))
@@ -129,7 +130,6 @@
             actualpath = '%s%s.py' % (funcmod.__name__.replace('.', '/'), extra)
 
             src = inspect.getsource(func)
-            name = "%s.%s" % (actualpath, func.__name__)
             lineno = inspect.getsourcelines(func)[1]
             doc = docobj.__doc__
             origdoc = getattr(docobj, '_origdoc', '')
@@ -137,9 +137,9 @@
                 doc = doc.rstrip()
                 origdoc = origdoc.rstrip()
             if origdoc:
-                lineno += offset(src, origdoc, name, 1)
+                lineno += offset(src, origdoc, actualpath, lineno, 1)
             else:
-                lineno += offset(src, doc, name, 1)
+                lineno += offset(src, doc, actualpath, lineno, 1)
             print(poentry(actualpath, lineno, doc))
 
 
--- a/i18n/polib.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/i18n/polib.py	Mon Oct 22 14:46:06 2018 -0400
@@ -1,5 +1,5 @@
-# -*- coding: utf-8 -*-
 # no-check-code
+# -* coding: utf-8 -*-
 #
 # License: MIT (see LICENSE file provided)
 # vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4:
@@ -15,10 +15,10 @@
 
 from __future__ import absolute_import
 
-__author__    = 'David Jean Louis <izimobil@gmail.com>'
-__version__   = '0.6.4'
-__all__       = ['pofile', 'POFile', 'POEntry', 'mofile', 'MOFile', 'MOEntry',
-                 'detect_encoding', 'escape', 'unescape', 'detect_encoding',]
+__author__ = 'David Jean Louis <izimobil@gmail.com>'
+__version__ = '1.0.7'
+__all__ = ['pofile', 'POFile', 'POEntry', 'mofile', 'MOFile', 'MOEntry',
+           'default_encoding', 'escape', 'unescape', 'detect_encoding', ]
 
 import array
 import codecs
@@ -27,14 +27,47 @@
 import struct
 import sys
 import textwrap
-import types
+
+try:
+    import io
+except ImportError:
+    # replacement of io.open() for python < 2.6
+    # we use codecs instead
+    class io(object):
+        @staticmethod
+        def open(fpath, mode='r', encoding=None):
+            return codecs.open(fpath, mode, encoding)
 
 
 # the default encoding to use when encoding cannot be detected
 default_encoding = 'utf-8'
 
+# python 2/3 compatibility helpers {{{
+
+
+if sys.version_info[:2] < (3, 0):
+    PY3 = False
+    text_type = unicode
+
+    def b(s):
+        return s
+
+    def u(s):
+        return unicode(s, "unicode_escape")
+
+else:
+    PY3 = True
+    text_type = str
+
+    def b(s):
+        return s.encode("latin-1")
+
+    def u(s):
+        return s
+# }}}
 # _pofile_or_mofile {{{
 
+
 def _pofile_or_mofile(f, type, **kwargs):
     """
     Internal function used by :func:`polib.pofile` and :func:`polib.mofile` to
@@ -50,15 +83,34 @@
     parser = kls(
         f,
         encoding=enc,
-        check_for_duplicates=kwargs.get('check_for_duplicates', False)
+        check_for_duplicates=kwargs.get('check_for_duplicates', False),
+        klass=kwargs.get('klass')
     )
     instance = parser.parse()
     instance.wrapwidth = kwargs.get('wrapwidth', 78)
     return instance
+# }}}
+# _is_file {{{
 
+
+def _is_file(filename_or_contents):
+    """
+    Safely returns the value of os.path.exists(filename_or_contents).
+
+    Arguments:
+
+    ``filename_or_contents``
+        either a filename, or a string holding the contents of some file.
+        In the latter case, this function will always return False.
+    """
+    try:
+        return os.path.exists(filename_or_contents)
+    except (ValueError, UnicodeEncodeError):
+        return False
 # }}}
 # function pofile() {{{
 
+
 def pofile(pofile, **kwargs):
     """
     Convenience function that parses the po or pot file ``pofile`` and returns
@@ -80,12 +132,17 @@
     ``check_for_duplicates``
         whether to check for duplicate entries when adding entries to the
         file (optional, default: ``False``).
+
+    ``klass``
+        class which is used to instantiate the return value (optional,
+        default: ``None``, the return value with be a :class:`~polib.POFile`
+        instance).
     """
     return _pofile_or_mofile(pofile, 'pofile', **kwargs)
-
 # }}}
 # function mofile() {{{
 
+
 def mofile(mofile, **kwargs):
     """
     Convenience function that parses the mo file ``mofile`` and returns a
@@ -108,12 +165,17 @@
     ``check_for_duplicates``
         whether to check for duplicate entries when adding entries to the
         file (optional, default: ``False``).
+
+    ``klass``
+        class which is used to instantiate the return value (optional,
+        default: ``None``, the return value with be a :class:`~polib.POFile`
+        instance).
     """
     return _pofile_or_mofile(mofile, 'mofile', **kwargs)
-
 # }}}
 # function detect_encoding() {{{
 
+
 def detect_encoding(file, binary_mode=False):
     """
     Try to detect the encoding used by the ``file``. The ``file`` argument can
@@ -129,7 +191,9 @@
     ``binary_mode``
         boolean, set this to True if ``file`` is a mo file.
     """
-    rx = re.compile(r'"?Content-Type:.+? charset=([\w_\-:\.]+)')
+    PATTERN = r'"?Content-Type:.+? charset=([\w_\-:\.]+)'
+    rxt = re.compile(u(PATTERN))
+    rxb = re.compile(b(PATTERN))
 
     def charset_exists(charset):
         """Check whether ``charset`` is valid or not."""
@@ -139,31 +203,36 @@
             return False
         return True
 
-    if not os.path.exists(file):
-        match = rx.search(file)
+    if not _is_file(file):
+        match = rxt.search(file)
         if match:
             enc = match.group(1).strip()
             if charset_exists(enc):
                 return enc
     else:
-        if binary_mode:
+        # For PY3, always treat as binary
+        if binary_mode or PY3:
             mode = 'rb'
+            rx = rxb
         else:
             mode = 'r'
+            rx = rxt
         f = open(file, mode)
         for l in f.readlines():
             match = rx.search(l)
             if match:
                 f.close()
                 enc = match.group(1).strip()
+                if not isinstance(enc, text_type):
+                    enc = enc.decode('utf-8')
                 if charset_exists(enc):
                     return enc
         f.close()
     return default_encoding
-
 # }}}
 # function escape() {{{
 
+
 def escape(st):
     """
     Escapes the characters ``\\\\``, ``\\t``, ``\\n``, ``\\r`` and ``"`` in
@@ -174,10 +243,10 @@
              .replace('\r', r'\r')\
              .replace('\n', r'\n')\
              .replace('\"', r'\"')
-
 # }}}
 # function unescape() {{{
 
+
 def unescape(st):
     """
     Unescapes the characters ``\\\\``, ``\\t``, ``\\n``, ``\\r`` and ``"`` in
@@ -193,12 +262,12 @@
             return '\r'
         if m == '\\':
             return '\\'
-        return m # handles escaped double quote
+        return m  # handles escaped double quote
     return re.sub(r'\\(\\|n|t|r|")', unescape_repl, st)
-
 # }}}
 # class _BaseFile {{{
 
+
 class _BaseFile(list):
     """
     Common base class for the :class:`~polib.POFile` and :class:`~polib.MOFile`
@@ -227,7 +296,7 @@
         list.__init__(self)
         # the opened file handle
         pofile = kwargs.get('pofile', None)
-        if pofile and os.path.exists(pofile):
+        if pofile and _is_file(pofile):
             self.fpath = pofile
         else:
             self.fpath = kwargs.get('fpath')
@@ -254,38 +323,45 @@
             ret.append(entry.__unicode__(self.wrapwidth))
         for entry in self.obsolete_entries():
             ret.append(entry.__unicode__(self.wrapwidth))
-        ret = '\n'.join(ret)
+        ret = u('\n').join(ret)
 
-        if type(ret) != types.UnicodeType:
-            return unicode(ret, self.encoding)
+        assert isinstance(ret, text_type)
+        #if type(ret) != text_type:
+        #    return unicode(ret, self.encoding)
         return ret
 
-    def __str__(self):
-        """
-        Returns the string representation of the file.
-        """
-        return unicode(self).encode(self.encoding)
+    if PY3:
+        def __str__(self):
+            return self.__unicode__()
+    else:
+        def __str__(self):
+            """
+            Returns the string representation of the file.
+            """
+            return unicode(self).encode(self.encoding)
 
     def __contains__(self, entry):
         """
-        Overriden ``list`` method to implement the membership test (in and
+        Overridden ``list`` method to implement the membership test (in and
         not in).
         The method considers that an entry is in the file if it finds an entry
-        that has the same msgid (the test is **case sensitive**).
+        that has the same msgid (the test is **case sensitive**) and the same
+        msgctxt (or none for both entries).
 
         Argument:
 
         ``entry``
             an instance of :class:`~polib._BaseEntry`.
         """
-        return self.find(entry.msgid, by='msgid') is not None
+        return self.find(entry.msgid, by='msgid', msgctxt=entry.msgctxt) \
+            is not None
 
     def __eq__(self, other):
-        return unicode(self) == unicode(other)
+        return str(self) == str(other)
 
     def append(self, entry):
         """
-        Overriden method to check for duplicates entries, if a user tries to
+        Overridden method to check for duplicates entries, if a user tries to
         add an entry that is already in the file, the method will raise a
         ``ValueError`` exception.
 
@@ -300,7 +376,7 @@
 
     def insert(self, index, entry):
         """
-        Overriden method to check for duplicates entries, if a user tries to
+        Overridden method to check for duplicates entries, if a user tries to
         add an entry that is already in the file, the method will raise a
         ``ValueError`` exception.
 
@@ -332,7 +408,7 @@
             e.flags.append('fuzzy')
         return e
 
-    def save(self, fpath=None, repr_method='__str__'):
+    def save(self, fpath=None, repr_method='__unicode__'):
         """
         Saves the po file to ``fpath``.
         If it is an existing file and no ``fpath`` is provided, then the
@@ -354,8 +430,8 @@
         if repr_method == 'to_binary':
             fhandle = open(fpath, 'wb')
         else:
-            fhandle = codecs.open(fpath, 'w', self.encoding)
-            if type(contents) != types.UnicodeType:
+            fhandle = io.open(fpath, 'w', encoding=self.encoding)
+            if not isinstance(contents, text_type):
                 contents = contents.decode(self.encoding)
         fhandle.write(contents)
         fhandle.close()
@@ -381,7 +457,7 @@
             boolean, whether to also search in entries that are obsolete.
 
         ``msgctxt``
-            string, allows to specify a specific message context for the
+            string, allows specifying a specific message context for the
             search.
         """
         if include_obsolete_entries:
@@ -390,7 +466,7 @@
             entries = [e for e in self if not e.obsolete]
         for e in entries:
             if getattr(e, by) == st:
-                if msgctxt and e.msgctxt != msgctxt:
+                if msgctxt is not False and e.msgctxt != msgctxt:
                     continue
                 return e
         return None
@@ -412,7 +488,9 @@
             'Language-Team',
             'MIME-Version',
             'Content-Type',
-            'Content-Transfer-Encoding'
+            'Content-Transfer-Encoding',
+            'Language',
+            'Plural-Forms'
         ]
         ordered_data = []
         for data in data_order:
@@ -423,9 +501,7 @@
                 pass
         # the rest of the metadata will be alphabetically ordered since there
         # are no specs for this AFAIK
-        keys = metadata.keys()
-        keys.sort()
-        for data in keys:
+        for data in sorted(metadata.keys()):
             value = metadata[data]
             ordered_data.append((data, value))
         return ordered_data
@@ -436,18 +512,12 @@
         """
         offsets = []
         entries = self.translated_entries()
+
         # the keys are sorted in the .mo file
         def cmp(_self, other):
             # msgfmt compares entries with msgctxt if it exists
-            if _self.msgctxt:
-                self_msgid = _self.msgctxt
-            else:
-                self_msgid = _self.msgid
-
-            if other.msgctxt:
-                other_msgid = other.msgctxt
-            else:
-                other_msgid = other.msgid
+            self_msgid = _self.msgctxt and _self.msgctxt or _self.msgid
+            other_msgid = other.msgctxt and other.msgctxt or other.msgid
             if self_msgid > other_msgid:
                 return 1
             elif self_msgid < other_msgid:
@@ -455,25 +525,23 @@
             else:
                 return 0
         # add metadata entry
-        entries.sort(cmp)
+        entries.sort(key=lambda o: o.msgctxt or o.msgid)
         mentry = self.metadata_as_entry()
         #mentry.msgstr = mentry.msgstr.replace('\\n', '').lstrip()
         entries = [mentry] + entries
         entries_len = len(entries)
-        ids, strs = '', ''
+        ids, strs = b(''), b('')
         for e in entries:
             # For each string, we need size and file offset.  Each string is
             # NUL terminated; the NUL does not count into the size.
-            msgid = ''
+            msgid = b('')
             if e.msgctxt:
                 # Contexts are stored by storing the concatenation of the
                 # context, a <EOT> byte, and the original string
                 msgid = self._encode(e.msgctxt + '\4')
             if e.msgid_plural:
-                indexes = e.msgstr_plural.keys()
-                indexes.sort()
                 msgstr = []
-                for index in indexes:
+                for index in sorted(e.msgstr_plural.keys()):
                     msgstr.append(e.msgstr_plural[index])
                 msgid += self._encode(e.msgid + '\0' + e.msgid_plural)
                 msgstr = self._encode('\0'.join(msgstr))
@@ -481,11 +549,11 @@
                 msgid += self._encode(e.msgid)
                 msgstr = self._encode(e.msgstr)
             offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
-            ids  += msgid  + '\0'
-            strs += msgstr + '\0'
+            ids += msgid + b('\0')
+            strs += msgstr + b('\0')
 
         # The header is 7 32-bit unsigned integers.
-        keystart = 7*4+16*entries_len
+        keystart = 7 * 4 + 16 * entries_len
         # and the values start after the keys
         valuestart = keystart + len(ids)
         koffsets = []
@@ -493,26 +561,30 @@
         # The string table first has the list of keys, then the list of values.
         # Each entry has first the size of the string, then the file offset.
         for o1, l1, o2, l2 in offsets:
-            koffsets += [l1, o1+keystart]
-            voffsets += [l2, o2+valuestart]
+            koffsets += [l1, o1 + keystart]
+            voffsets += [l2, o2 + valuestart]
         offsets = koffsets + voffsets
-        # check endianness for magic number
-        if struct.pack('@h', 1) == struct.pack('<h', 1):
-            magic_number = MOFile.LITTLE_ENDIAN
-        else:
-            magic_number = MOFile.BIG_ENDIAN
 
         output = struct.pack(
             "Iiiiiii",
-            magic_number,      # Magic number
-            0,                 # Version
-            entries_len,       # # of entries
-            7*4,               # start of key index
-            7*4+entries_len*8, # start of value index
-            0, keystart        # size and offset of hash table
-                               # Important: we don't use hash tables
+            # Magic number
+            MOFile.MAGIC,
+            # Version
+            0,
+            # number of entries
+            entries_len,
+            # start of key index
+            7 * 4,
+            # start of value index
+            7 * 4 + entries_len * 8,
+            # size and offset of hash table, we don't use hash tables
+            0, keystart
+
         )
-        output += array.array("i", offsets).tostring()
+        if PY3 and sys.version_info.minor > 1:  # python 3.2 or superior
+            output += array.array("i", offsets).tobytes()
+        else:
+            output += array.array("i", offsets).tostring()
         output += ids
         output += strs
         return output
@@ -522,13 +594,13 @@
         Encodes the given ``mixed`` argument with the file encoding if and
         only if it's an unicode string and returns the encoded string.
         """
-        if type(mixed) == types.UnicodeType:
-            return mixed.encode(self.encoding)
+        if isinstance(mixed, text_type):
+            mixed = mixed.encode(self.encoding)
         return mixed
-
 # }}}
 # class POFile {{{
 
+
 class POFile(_BaseFile):
     """
     Po (or Pot) file reader/writer.
@@ -542,13 +614,15 @@
         """
         ret, headers = '', self.header.split('\n')
         for header in headers:
-            if header[:1] in [',', ':']:
+            if not len(header):
+                ret += "#\n"
+            elif header[:1] in [',', ':']:
                 ret += '#%s\n' % header
             else:
                 ret += '# %s\n' % header
 
-        if type(ret) != types.UnicodeType:
-            ret = unicode(ret, self.encoding)
+        if not isinstance(ret, text_type):
+            ret = ret.decode(self.encoding)
 
         return ret + _BaseFile.__unicode__(self)
 
@@ -572,7 +646,7 @@
         if total == 0:
             return 100
         translated = len(self.translated_entries())
-        return int((100.00 / float(total)) * translated)
+        return int(translated * 100 / float(total))
 
     def translated_entries(self):
         """
@@ -584,7 +658,7 @@
         """
         Convenience method that returns the list of untranslated entries.
         """
-        return [e for e in self if not e.translated() and not e.obsolete \
+        return [e for e in self if not e.translated() and not e.obsolete
                 and not 'fuzzy' in e.flags]
 
     def fuzzy_entries(self):
@@ -615,28 +689,32 @@
         ``refpot``
             object POFile, the reference catalog.
         """
+        # Store entries in dict/set for faster access
+        self_entries = dict((entry.msgid, entry) for entry in self)
+        refpot_msgids = set(entry.msgid for entry in refpot)
+        # Merge entries that are in the refpot
         for entry in refpot:
-            e = self.find(entry.msgid, include_obsolete_entries=True)
+            e = self_entries.get(entry.msgid)
             if e is None:
                 e = POEntry()
                 self.append(e)
             e.merge(entry)
         # ok, now we must "obsolete" entries that are not in the refpot anymore
         for entry in self:
-            if refpot.find(entry.msgid) is None:
+            if entry.msgid not in refpot_msgids:
                 entry.obsolete = True
-
 # }}}
 # class MOFile {{{
 
+
 class MOFile(_BaseFile):
     """
     Mo file reader/writer.
     This class inherits the :class:`~polib._BaseFile` class and, by
     extension, the python ``list`` type.
     """
-    BIG_ENDIAN    = 0xde120495
-    LITTLE_ENDIAN = 0x950412de
+    MAGIC = 0x950412de
+    MAGIC_SWAPPED = 0xde120495
 
     def __init__(self, *args, **kwargs):
         """
@@ -698,10 +776,10 @@
         Convenience method to keep the same interface with POFile instances.
         """
         return []
-
 # }}}
 # class _BaseEntry {{{
 
+
 class _BaseEntry(object):
     """
     Base class for :class:`~polib.POEntry` and :class:`~polib.MOEntry` classes.
@@ -753,12 +831,14 @@
         ret = []
         # write the msgctxt if any
         if self.msgctxt is not None:
-            ret += self._str_field("msgctxt", delflag, "", self.msgctxt, wrapwidth)
+            ret += self._str_field("msgctxt", delflag, "", self.msgctxt,
+                                   wrapwidth)
         # write the msgid
         ret += self._str_field("msgid", delflag, "", self.msgid, wrapwidth)
         # write the msgid_plural if any
         if self.msgid_plural:
-            ret += self._str_field("msgid_plural", delflag, "", self.msgid_plural, wrapwidth)
+            ret += self._str_field("msgid_plural", delflag, "",
+                                   self.msgid_plural, wrapwidth)
         if self.msgstr_plural:
             # write the msgstr_plural if any
             msgstrs = self.msgstr_plural
@@ -767,30 +847,34 @@
             for index in keys:
                 msgstr = msgstrs[index]
                 plural_index = '[%s]' % index
-                ret += self._str_field("msgstr", delflag, plural_index, msgstr, wrapwidth)
+                ret += self._str_field("msgstr", delflag, plural_index, msgstr,
+                                       wrapwidth)
         else:
             # otherwise write the msgstr
-            ret += self._str_field("msgstr", delflag, "", self.msgstr, wrapwidth)
+            ret += self._str_field("msgstr", delflag, "", self.msgstr,
+                                   wrapwidth)
         ret.append('')
-        ret = '\n'.join(ret)
-
-        if type(ret) != types.UnicodeType:
-            return unicode(ret, self.encoding)
+        ret = u('\n').join(ret)
         return ret
 
-    def __str__(self):
-        """
-        Returns the string representation of the entry.
-        """
-        return unicode(self).encode(self.encoding)
+    if PY3:
+        def __str__(self):
+            return self.__unicode__()
+    else:
+        def __str__(self):
+            """
+            Returns the string representation of the entry.
+            """
+            return unicode(self).encode(self.encoding)
 
     def __eq__(self, other):
-        return unicode(self) == unicode(other)
+        return str(self) == str(other)
 
-    def _str_field(self, fieldname, delflag, plural_index, field, wrapwidth=78):
+    def _str_field(self, fieldname, delflag, plural_index, field,
+                   wrapwidth=78):
         lines = field.splitlines(True)
         if len(lines) > 1:
-            lines = [''] + lines # start with initial empty line
+            lines = [''] + lines  # start with initial empty line
         else:
             escaped_field = escape(field)
             specialchars_count = 0
@@ -804,9 +888,9 @@
             real_wrapwidth = wrapwidth - flength + specialchars_count
             if wrapwidth > 0 and len(field) > real_wrapwidth:
                 # Wrap the line but take field name into account
-                lines = [''] + [unescape(item) for item in textwrap.wrap(
+                lines = [''] + [unescape(item) for item in wrap(
                     escaped_field,
-                    wrapwidth - 2, # 2 for quotes ""
+                    wrapwidth - 2,  # 2 for quotes ""
                     drop_whitespace=False,
                     break_long_words=False
                 )]
@@ -818,13 +902,13 @@
 
         ret = ['%s%s%s "%s"' % (delflag, fieldname, plural_index,
                                 escape(lines.pop(0)))]
-        for mstr in lines:
-            ret.append('%s"%s"' % (delflag, escape(mstr)))
+        for line in lines:
+            ret.append('%s"%s"' % (delflag, escape(line)))
         return ret
-
 # }}}
 # class POEntry {{{
 
+
 class POEntry(_BaseEntry):
     """
     Represents a po file entry.
@@ -854,6 +938,9 @@
 
         ``previous_msgid_plural``
             string, the entry previous msgid_plural.
+
+        ``linenum``
+            integer, the line number of the entry
         """
         _BaseEntry.__init__(self, *args, **kwargs)
         self.comment = kwargs.get('comment', '')
@@ -863,6 +950,7 @@
         self.previous_msgctxt = kwargs.get('previous_msgctxt', None)
         self.previous_msgid = kwargs.get('previous_msgid', None)
         self.previous_msgid_plural = kwargs.get('previous_msgid_plural', None)
+        self.linenum = kwargs.get('linenum', None)
 
     def __unicode__(self, wrapwidth=78):
         """
@@ -879,7 +967,7 @@
             if val:
                 for comment in val.split('\n'):
                     if wrapwidth > 0 and len(comment) + len(c[1]) > wrapwidth:
-                        ret += textwrap.wrap(
+                        ret += wrap(
                             comment,
                             wrapwidth,
                             initial_indent=c[1],
@@ -903,7 +991,7 @@
                 # what we want for filenames, so the dirty hack is to
                 # temporally replace hyphens with a char that a file cannot
                 # contain, like "*"
-                ret += [l.replace('*', '-') for l in textwrap.wrap(
+                ret += [l.replace('*', '-') for l in wrap(
                     filestr.replace('-', '*'),
                     wrapwidth,
                     initial_indent='#: ',
@@ -918,32 +1006,25 @@
             ret.append('#, %s' % ', '.join(self.flags))
 
         # previous context and previous msgid/msgid_plural
-        fields = ['previous_msgctxt', 'previous_msgid', 'previous_msgid_plural']
+        fields = ['previous_msgctxt', 'previous_msgid',
+                  'previous_msgid_plural']
         for f in fields:
             val = getattr(self, f)
             if val:
                 ret += self._str_field(f, "#| ", "", val, wrapwidth)
 
         ret.append(_BaseEntry.__unicode__(self, wrapwidth))
-        ret = '\n'.join(ret)
+        ret = u('\n').join(ret)
 
-        if type(ret) != types.UnicodeType:
-            return unicode(ret, self.encoding)
+        assert isinstance(ret, text_type)
+        #if type(ret) != types.UnicodeType:
+        #    return unicode(ret, self.encoding)
         return ret
 
     def __cmp__(self, other):
         """
         Called by comparison operations if rich comparison is not defined.
         """
-        def compare_occurrences(a, b):
-            """
-            Compare an entry occurrence with another one.
-            """
-            if a[0] != b[0]:
-                return a[0] < b[0]
-            if a[1] != b[1]:
-                return a[1] < b[1]
-            return 0
 
         # First: Obsolete test
         if self.obsolete != other.obsolete:
@@ -952,12 +1033,8 @@
             else:
                 return 1
         # Work on a copy to protect original
-        occ1 = self.occurrences[:]
-        occ2 = other.occurrences[:]
-        # Sorting using compare method
-        occ1.sort(compare_occurrences)
-        occ2.sort(compare_occurrences)
-        # Comparing sorted occurrences
+        occ1 = sorted(self.occurrences[:])
+        occ2 = sorted(other.occurrences[:])
         pos = 0
         for entry1 in occ1:
             try:
@@ -975,9 +1052,41 @@
                     return 1
                 else:
                     return -1
+        # Compare msgid_plural if set
+        if self.msgid_plural:
+            if not other.msgid_plural:
+                return 1
+            for pos in self.msgid_plural:
+                if pos not in other.msgid_plural:
+                    return 1
+                if self.msgid_plural[pos] > other.msgid_plural[pos]:
+                    return 1
+                if self.msgid_plural[pos] < other.msgid_plural[pos]:
+                    return -1
         # Finally: Compare message ID
-        if self.msgid > other.msgid: return 1
-        else: return -1
+        if self.msgid > other.msgid:
+            return 1
+        elif self.msgid < other.msgid:
+            return -1
+        return 0
+
+    def __gt__(self, other):
+        return self.__cmp__(other) > 0
+
+    def __lt__(self, other):
+        return self.__cmp__(other) < 0
+
+    def __ge__(self, other):
+        return self.__cmp__(other) >= 0
+
+    def __le__(self, other):
+        return self.__cmp__(other) <= 0
+
+    def __eq__(self, other):
+        return self.__cmp__(other) == 0
+
+    def __ne__(self, other):
+        return self.__cmp__(other) != 0
 
     def translated(self):
         """
@@ -1020,18 +1129,49 @@
                 except KeyError:
                     self.msgstr_plural[pos] = ''
 
+    def __hash__(self):
+        return hash((self.msgid, self.msgstr))
 # }}}
 # class MOEntry {{{
 
+
 class MOEntry(_BaseEntry):
     """
     Represents a mo file entry.
     """
-    pass
+    def __init__(self, *args, **kwargs):
+        """
+        Constructor, accepts the following keyword arguments,
+        for consistency with :class:`~polib.POEntry`:
+
+        ``comment``
+        ``tcomment``
+        ``occurrences``
+        ``flags``
+        ``previous_msgctxt``
+        ``previous_msgid``
+        ``previous_msgid_plural``
+
+        Note: even though these keyword arguments are accepted,
+        they hold no real meaning in the context of MO files
+        and are simply ignored.
+        """
+        _BaseEntry.__init__(self, *args, **kwargs)
+        self.comment = ''
+        self.tcomment = ''
+        self.occurrences = []
+        self.flags = []
+        self.previous_msgctxt = None
+        self.previous_msgid = None
+        self.previous_msgid_plural = None
+
+    def __hash__(self):
+        return hash((self.msgid, self.msgstr))
 
 # }}}
 # class _POFileParser {{{
 
+
 class _POFileParser(object):
     """
     A finite state machine to parse efficiently and correctly po
@@ -1056,23 +1196,27 @@
             file (optional, default: ``False``).
         """
         enc = kwargs.get('encoding', default_encoding)
-        if os.path.exists(pofile):
+        if _is_file(pofile):
             try:
-                self.fhandle = codecs.open(pofile, 'rU', enc)
+                self.fhandle = io.open(pofile, 'rt', encoding=enc)
             except LookupError:
                 enc = default_encoding
-                self.fhandle = codecs.open(pofile, 'rU', enc)
+                self.fhandle = io.open(pofile, 'rt', encoding=enc)
         else:
             self.fhandle = pofile.splitlines()
 
-        self.instance = POFile(
+        klass = kwargs.get('klass')
+        if klass is None:
+            klass = POFile
+        self.instance = klass(
             pofile=pofile,
             encoding=enc,
             check_for_duplicates=kwargs.get('check_for_duplicates', False)
         )
         self.transitions = {}
-        self.current_entry = POEntry()
-        self.current_state = 'ST'
+        self.current_line = 0
+        self.current_entry = POEntry(linenum=self.current_line)
+        self.current_state = 'st'
         self.current_token = None
         # two memo flags used in handlers
         self.msgstr_index = 0
@@ -1083,7 +1227,7 @@
         #     * HE: Header
         #     * TC: a translation comment
         #     * GC: a generated comment
-        #     * OC: a file/line occurence
+        #     * OC: a file/line occurrence
         #     * FL: a flags line
         #     * CT: a message context
         #     * PC: a previous msgctxt
@@ -1094,48 +1238,47 @@
         #     * MS: a msgstr
         #     * MX: a msgstr plural
         #     * MC: a msgid or msgstr continuation line
-        all = ['ST', 'HE', 'GC', 'OC', 'FL', 'CT', 'PC', 'PM', 'PP', 'TC',
-               'MS', 'MP', 'MX', 'MI']
+        all = ['st', 'he', 'gc', 'oc', 'fl', 'ct', 'pc', 'pm', 'pp', 'tc',
+               'ms', 'mp', 'mx', 'mi']
 
-        self.add('TC', ['ST', 'HE'],                                     'HE')
-        self.add('TC', ['GC', 'OC', 'FL', 'TC', 'PC', 'PM', 'PP', 'MS',
-                        'MP', 'MX', 'MI'],                               'TC')
-        self.add('GC', all,                                              'GC')
-        self.add('OC', all,                                              'OC')
-        self.add('FL', all,                                              'FL')
-        self.add('PC', all,                                              'PC')
-        self.add('PM', all,                                              'PM')
-        self.add('PP', all,                                              'PP')
-        self.add('CT', ['ST', 'HE', 'GC', 'OC', 'FL', 'TC', 'PC', 'PM',
-                        'PP', 'MS', 'MX'],                               'CT')
-        self.add('MI', ['ST', 'HE', 'GC', 'OC', 'FL', 'CT', 'TC', 'PC',
-                 'PM', 'PP', 'MS', 'MX'],                                'MI')
-        self.add('MP', ['TC', 'GC', 'PC', 'PM', 'PP', 'MI'],             'MP')
-        self.add('MS', ['MI', 'MP', 'TC'],                               'MS')
-        self.add('MX', ['MI', 'MX', 'MP', 'TC'],                         'MX')
-        self.add('MC', ['CT', 'MI', 'MP', 'MS', 'MX', 'PM', 'PP', 'PC'], 'MC')
+        self.add('tc', ['st', 'he'],                                     'he')
+        self.add('tc', ['gc', 'oc', 'fl', 'tc', 'pc', 'pm', 'pp', 'ms',
+                        'mp', 'mx', 'mi'],                               'tc')
+        self.add('gc', all,                                              'gc')
+        self.add('oc', all,                                              'oc')
+        self.add('fl', all,                                              'fl')
+        self.add('pc', all,                                              'pc')
+        self.add('pm', all,                                              'pm')
+        self.add('pp', all,                                              'pp')
+        self.add('ct', ['st', 'he', 'gc', 'oc', 'fl', 'tc', 'pc', 'pm',
+                        'pp', 'ms', 'mx'],                               'ct')
+        self.add('mi', ['st', 'he', 'gc', 'oc', 'fl', 'ct', 'tc', 'pc',
+                 'pm', 'pp', 'ms', 'mx'],                                'mi')
+        self.add('mp', ['tc', 'gc', 'pc', 'pm', 'pp', 'mi'],             'mp')
+        self.add('ms', ['mi', 'mp', 'tc'],                               'ms')
+        self.add('mx', ['mi', 'mx', 'mp', 'tc'],                         'mx')
+        self.add('mc', ['ct', 'mi', 'mp', 'ms', 'mx', 'pm', 'pp', 'pc'], 'mc')
 
     def parse(self):
         """
         Run the state machine, parse the file line by line and call process()
         with the current matched symbol.
         """
-        i = 0
 
         keywords = {
-            'msgctxt': 'CT',
-            'msgid': 'MI',
-            'msgstr': 'MS',
-            'msgid_plural': 'MP',
+            'msgctxt': 'ct',
+            'msgid': 'mi',
+            'msgstr': 'ms',
+            'msgid_plural': 'mp',
         }
         prev_keywords = {
-            'msgid_plural': 'PP',
-            'msgid': 'PM',
-            'msgctxt': 'PC',
+            'msgid_plural': 'pp',
+            'msgid': 'pm',
+            'msgctxt': 'pc',
         }
-
+        tokens = []
         for line in self.fhandle:
-            i += 1
+            self.current_line += 1
             line = line.strip()
             if line == '':
                 continue
@@ -1143,6 +1286,9 @@
             tokens = line.split(None, 2)
             nb_tokens = len(tokens)
 
+            if tokens[0] == '#~|':
+                continue
+
             if tokens[0] == '#~' and nb_tokens > 1:
                 line = line[3:].strip()
                 tokens = tokens[1:]
@@ -1155,41 +1301,56 @@
             # msgid, msgid_plural, msgctxt & msgstr.
             if tokens[0] in keywords and nb_tokens > 1:
                 line = line[len(tokens[0]):].lstrip()
+                if re.search(r'([^\\]|^)"', line[1:-1]):
+                    raise IOError('Syntax error in po file %s (line %s): '
+                                  'unescaped double quote found' %
+                                  (self.instance.fpath, self.current_line))
                 self.current_token = line
-                self.process(keywords[tokens[0]], i)
+                self.process(keywords[tokens[0]])
                 continue
 
             self.current_token = line
 
-            if tokens[0] == '#:' and nb_tokens > 1:
+            if tokens[0] == '#:':
+                if nb_tokens <= 1:
+                    continue
                 # we are on a occurrences line
-                self.process('OC', i)
+                self.process('oc')
 
             elif line[:1] == '"':
                 # we are on a continuation line
-                self.process('MC', i)
+                if re.search(r'([^\\]|^)"', line[1:-1]):
+                    raise IOError('Syntax error in po file %s (line %s): '
+                                  'unescaped double quote found' %
+                                  (self.instance.fpath, self.current_line))
+                self.process('mc')
 
             elif line[:7] == 'msgstr[':
                 # we are on a msgstr plural
-                self.process('MX', i)
+                self.process('mx')
 
-            elif tokens[0] == '#,' and nb_tokens > 1:
+            elif tokens[0] == '#,':
+                if nb_tokens <= 1:
+                    continue
                 # we are on a flags line
-                self.process('FL', i)
+                self.process('fl')
 
-            elif tokens[0] == '#':
-                if line == '#': line += ' '
+            elif tokens[0] == '#' or tokens[0].startswith('##'):
+                if line == '#':
+                    line += ' '
                 # we are on a translator comment line
-                self.process('TC', i)
+                self.process('tc')
 
-            elif tokens[0] == '#.' and nb_tokens > 1:
+            elif tokens[0] == '#.':
+                if nb_tokens <= 1:
+                    continue
                 # we are on a generated comment line
-                self.process('GC', i)
+                self.process('gc')
 
             elif tokens[0] == '#|':
-                if nb_tokens < 2:
-                    self.process('??', i)
-                    continue
+                if nb_tokens <= 1:
+                    raise IOError('Syntax error in po file %s (line %s)' %
+                                  (self.instance.fpath, self.current_line))
 
                 # Remove the marker and any whitespace right after that.
                 line = line[2:].lstrip()
@@ -1197,48 +1358,57 @@
 
                 if tokens[1].startswith('"'):
                     # Continuation of previous metadata.
-                    self.process('MC', i)
+                    self.process('mc')
                     continue
 
                 if nb_tokens == 2:
                     # Invalid continuation line.
-                    self.process('??', i)
+                    raise IOError('Syntax error in po file %s (line %s): '
+                                  'invalid continuation line' %
+                                  (self.instance.fpath, self.current_line))
 
                 # we are on a "previous translation" comment line,
                 if tokens[1] not in prev_keywords:
                     # Unknown keyword in previous translation comment.
-                    self.process('??', i)
+                    raise IOError('Syntax error in po file %s (line %s): '
+                                  'unknown keyword %s' %
+                                  (self.instance.fpath, self.current_line,
+                                   tokens[1]))
 
                 # Remove the keyword and any whitespace
                 # between it and the starting quote.
                 line = line[len(tokens[1]):].lstrip()
                 self.current_token = line
-                self.process(prev_keywords[tokens[1]], i)
+                self.process(prev_keywords[tokens[1]])
 
             else:
-                self.process('??', i)
+                raise IOError('Syntax error in po file %s (line %s)' %
+                              (self.instance.fpath, self.current_line))
 
-        if self.current_entry:
+        if self.current_entry and len(tokens) > 0 and \
+           not tokens[0].startswith('#'):
             # since entries are added when another entry is found, we must add
-            # the last entry here (only if there are lines)
+            # the last entry here (only if there are lines). Trailing comments
+            # are ignored
             self.instance.append(self.current_entry)
+
         # before returning the instance, check if there's metadata and if
         # so extract it in a dict
-        firstentry = self.instance[0]
-        if firstentry.msgid == '': # metadata found
+        metadataentry = self.instance.find('')
+        if metadataentry:  # metadata found
             # remove the entry
-            firstentry = self.instance.pop(0)
-            self.instance.metadata_is_fuzzy = firstentry.flags
+            self.instance.remove(metadataentry)
+            self.instance.metadata_is_fuzzy = metadataentry.flags
             key = None
-            for msg in firstentry.msgstr.splitlines():
+            for msg in metadataentry.msgstr.splitlines():
                 try:
                     key, val = msg.split(':', 1)
                     self.instance.metadata[key] = val.strip()
-                except:
+                except (ValueError, KeyError):
                     if key is not None:
-                        self.instance.metadata[key] += '\n'+ msg.strip()
+                        self.instance.metadata[key] += '\n' + msg.strip()
         # close opened file
-        if isinstance(self.fhandle, file):
+        if not isinstance(self.fhandle, list):  # must be file
             self.fhandle.close()
         return self.instance
 
@@ -1258,10 +1428,10 @@
             the next state the fsm will have after the action.
         """
         for state in states:
-            action = getattr(self, 'handle_%s' % next_state.lower())
+            action = getattr(self, 'handle_%s' % next_state)
             self.transitions[(symbol, state)] = (action, next_state)
 
-    def process(self, symbol, linenum):
+    def process(self, symbol):
         """
         Process the transition corresponding to the current state and the
         symbol provided.
@@ -1278,8 +1448,9 @@
             (action, state) = self.transitions[(symbol, self.current_state)]
             if action():
                 self.current_state = state
-        except Exception as exc:
-            raise IOError('Syntax error in po file (line %s)' % linenum)
+        except Exception:
+            raise IOError('Syntax error in po file (line %s)' %
+                          self.current_line)
 
     # state handlers
 
@@ -1292,90 +1463,94 @@
 
     def handle_tc(self):
         """Handle a translator comment."""
-        if self.current_state in ['MC', 'MS', 'MX']:
+        if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
-            self.current_entry = POEntry()
+            self.current_entry = POEntry(linenum=self.current_line)
         if self.current_entry.tcomment != '':
             self.current_entry.tcomment += '\n'
-        self.current_entry.tcomment += self.current_token[2:]
+        tcomment = self.current_token.lstrip('#')
+        if tcomment.startswith(' '):
+            tcomment = tcomment[1:]
+        self.current_entry.tcomment += tcomment
         return True
 
     def handle_gc(self):
         """Handle a generated comment."""
-        if self.current_state in ['MC', 'MS', 'MX']:
+        if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
-            self.current_entry = POEntry()
+            self.current_entry = POEntry(linenum=self.current_line)
         if self.current_entry.comment != '':
             self.current_entry.comment += '\n'
         self.current_entry.comment += self.current_token[3:]
         return True
 
     def handle_oc(self):
-        """Handle a file:num occurence."""
-        if self.current_state in ['MC', 'MS', 'MX']:
+        """Handle a file:num occurrence."""
+        if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
-            self.current_entry = POEntry()
+            self.current_entry = POEntry(linenum=self.current_line)
         occurrences = self.current_token[3:].split()
         for occurrence in occurrences:
             if occurrence != '':
                 try:
                     fil, line = occurrence.split(':')
                     if not line.isdigit():
-                        fil  = fil + line
+                        fil = fil + line
                         line = ''
                     self.current_entry.occurrences.append((fil, line))
-                except:
+                except (ValueError, AttributeError):
                     self.current_entry.occurrences.append((occurrence, ''))
         return True
 
     def handle_fl(self):
         """Handle a flags line."""
-        if self.current_state in ['MC', 'MS', 'MX']:
+        if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
-            self.current_entry = POEntry()
-        self.current_entry.flags += self.current_token[3:].split(', ')
+            self.current_entry = POEntry(linenum=self.current_line)
+        self.current_entry.flags += [c.strip() for c in
+                                     self.current_token[3:].split(',')]
         return True
 
     def handle_pp(self):
         """Handle a previous msgid_plural line."""
-        if self.current_state in ['MC', 'MS', 'MX']:
+        if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
-            self.current_entry = POEntry()
+            self.current_entry = POEntry(linenum=self.current_line)
         self.current_entry.previous_msgid_plural = \
             unescape(self.current_token[1:-1])
         return True
 
     def handle_pm(self):
         """Handle a previous msgid line."""
-        if self.current_state in ['MC', 'MS', 'MX']:
+        if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
-            self.current_entry = POEntry()
+            self.current_entry = POEntry(linenum=self.current_line)
         self.current_entry.previous_msgid = \
             unescape(self.current_token[1:-1])
         return True
 
     def handle_pc(self):
         """Handle a previous msgctxt line."""
-        if self.current_state in ['MC', 'MS', 'MX']:
+        if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
-            self.current_entry = POEntry()
+            self.current_entry = POEntry(linenum=self.current_line)
         self.current_entry.previous_msgctxt = \
             unescape(self.current_token[1:-1])
         return True
 
     def handle_ct(self):
         """Handle a msgctxt."""
-        if self.current_state in ['MC', 'MS', 'MX']:
+        if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
-            self.current_entry = POEntry()
+            self.current_entry = POEntry(linenum=self.current_line)
         self.current_entry.msgctxt = unescape(self.current_token[1:-1])
         return True
 
     def handle_mi(self):
         """Handle a msgid."""
-        if self.current_state in ['MC', 'MS', 'MX']:
+        if self.current_state in ['mc', 'ms', 'mx']:
             self.instance.append(self.current_entry)
-            self.current_entry = POEntry()
+            self.current_entry = POEntry(linenum=self.current_line)
         self.current_entry.obsolete = self.entry_obsolete
         self.current_entry.msgid = unescape(self.current_token[1:-1])
         return True
@@ -1392,47 +1567,37 @@
 
     def handle_mx(self):
         """Handle a msgstr plural."""
-        index, value = self.current_token[7], self.current_token[11:-1]
-        self.current_entry.msgstr_plural[index] = unescape(value)
-        self.msgstr_index = index
+        index = self.current_token[7]
+        value = self.current_token[self.current_token.find('"') + 1:-1]
+        self.current_entry.msgstr_plural[int(index)] = unescape(value)
+        self.msgstr_index = int(index)
         return True
 
     def handle_mc(self):
         """Handle a msgid or msgstr continuation line."""
         token = unescape(self.current_token[1:-1])
-        if self.current_state == 'CT':
-            typ = 'msgctxt'
+        if self.current_state == 'ct':
             self.current_entry.msgctxt += token
-        elif self.current_state == 'MI':
-            typ = 'msgid'
+        elif self.current_state == 'mi':
             self.current_entry.msgid += token
-        elif self.current_state == 'MP':
-            typ = 'msgid_plural'
+        elif self.current_state == 'mp':
             self.current_entry.msgid_plural += token
-        elif self.current_state == 'MS':
-            typ = 'msgstr'
+        elif self.current_state == 'ms':
             self.current_entry.msgstr += token
-        elif self.current_state == 'MX':
-            typ = 'msgstr[%s]' % self.msgstr_index
+        elif self.current_state == 'mx':
             self.current_entry.msgstr_plural[self.msgstr_index] += token
-        elif self.current_state == 'PP':
-            typ = 'previous_msgid_plural'
-            token = token[3:]
+        elif self.current_state == 'pp':
             self.current_entry.previous_msgid_plural += token
-        elif self.current_state == 'PM':
-            typ = 'previous_msgid'
-            token = token[3:]
+        elif self.current_state == 'pm':
             self.current_entry.previous_msgid += token
-        elif self.current_state == 'PC':
-            typ = 'previous_msgctxt'
-            token = token[3:]
+        elif self.current_state == 'pc':
             self.current_entry.previous_msgctxt += token
         # don't change the current state
         return False
-
 # }}}
 # class _MOFileParser {{{
 
+
 class _MOFileParser(object):
     """
     A class to parse binary mo files.
@@ -1456,12 +1621,24 @@
             file (optional, default: ``False``).
         """
         self.fhandle = open(mofile, 'rb')
-        self.instance = MOFile(
+
+        klass = kwargs.get('klass')
+        if klass is None:
+            klass = MOFile
+        self.instance = klass(
             fpath=mofile,
             encoding=kwargs.get('encoding', default_encoding),
             check_for_duplicates=kwargs.get('check_for_duplicates', False)
         )
 
+    def __del__(self):
+        """
+        Make sure the file is closed, this prevents warnings on unclosed file
+        when running tests with python >= 3.2.
+        """
+        if self.fhandle:
+            self.fhandle.close()
+
     def parse(self):
         """
         Build the instance with the file handle provided in the
@@ -1469,15 +1646,20 @@
         """
         # parse magic number
         magic_number = self._readbinary('<I', 4)
-        if magic_number == MOFile.LITTLE_ENDIAN:
+        if magic_number == MOFile.MAGIC:
             ii = '<II'
-        elif magic_number == MOFile.BIG_ENDIAN:
+        elif magic_number == MOFile.MAGIC_SWAPPED:
             ii = '>II'
         else:
             raise IOError('Invalid mo file, magic number is incorrect !')
         self.instance.magic_number = magic_number
         # parse the version number and the number of strings
-        self.instance.version, numofstrings = self._readbinary(ii, 8)
+        version, numofstrings = self._readbinary(ii, 8)
+        # from MO file format specs: "A program seeing an unexpected major
+        # revision number should stop reading the MO file entirely"
+        if version not in (0, 1):
+            raise IOError('Invalid mo file, unexpected major revision number')
+        self.instance.version = version
         # original strings and translation strings hash table offset
         msgids_hash_offset, msgstrs_hash_offset = self._readbinary(ii, 8)
         # move to msgid hash table and read length and offset of msgids
@@ -1491,29 +1673,34 @@
         for i in range(numofstrings):
             msgstrs_index.append(self._readbinary(ii, 8))
         # build entries
+        encoding = self.instance.encoding
         for i in range(numofstrings):
             self.fhandle.seek(msgids_index[i][1])
             msgid = self.fhandle.read(msgids_index[i][0])
+
             self.fhandle.seek(msgstrs_index[i][1])
             msgstr = self.fhandle.read(msgstrs_index[i][0])
-            if i == 0: # metadata
-                raw_metadata, metadata = msgstr.split('\n'), {}
+            if i == 0 and not msgid:  # metadata
+                raw_metadata, metadata = msgstr.split(b('\n')), {}
                 for line in raw_metadata:
-                    tokens = line.split(':', 1)
-                    if tokens[0] != '':
+                    tokens = line.split(b(':'), 1)
+                    if tokens[0] != b(''):
                         try:
-                            metadata[tokens[0]] = tokens[1].strip()
+                            k = tokens[0].decode(encoding)
+                            v = tokens[1].decode(encoding)
+                            metadata[k] = v.strip()
                         except IndexError:
-                            metadata[tokens[0]] = ''
+                            metadata[k] = u('')
                 self.instance.metadata = metadata
                 continue
             # test if we have a plural entry
-            msgid_tokens = msgid.split('\0')
+            msgid_tokens = msgid.split(b('\0'))
             if len(msgid_tokens) > 1:
                 entry = self._build_entry(
                     msgid=msgid_tokens[0],
                     msgid_plural=msgid_tokens[1],
-                    msgstr_plural=dict((k,v) for k,v in enumerate(msgstr.split('\0')))
+                    msgstr_plural=dict((k, v) for k, v in
+                                       enumerate(msgstr.split(b('\0'))))
                 )
             else:
                 entry = self._build_entry(msgid=msgid, msgstr=msgstr)
@@ -1524,19 +1711,22 @@
 
     def _build_entry(self, msgid, msgstr=None, msgid_plural=None,
                      msgstr_plural=None):
-        msgctxt_msgid = msgid.split('\x04')
+        msgctxt_msgid = msgid.split(b('\x04'))
+        encoding = self.instance.encoding
         if len(msgctxt_msgid) > 1:
             kwargs = {
-                'msgctxt': msgctxt_msgid[0],
-                'msgid'  : msgctxt_msgid[1],
+                'msgctxt': msgctxt_msgid[0].decode(encoding),
+                'msgid': msgctxt_msgid[1].decode(encoding),
             }
         else:
-            kwargs = {'msgid': msgid}
+            kwargs = {'msgid': msgid.decode(encoding)}
         if msgstr:
-            kwargs['msgstr'] = msgstr
+            kwargs['msgstr'] = msgstr.decode(encoding)
         if msgid_plural:
-            kwargs['msgid_plural'] = msgid_plural
+            kwargs['msgid_plural'] = msgid_plural.decode(encoding)
         if msgstr_plural:
+            for k in msgstr_plural:
+                msgstr_plural[k] = msgstr_plural[k].decode(encoding)
             kwargs['msgstr_plural'] = msgstr_plural
         return MOEntry(**kwargs)
 
@@ -1550,5 +1740,99 @@
         if len(tup) == 1:
             return tup[0]
         return tup
+# }}}
+# class TextWrapper {{{
+
+
+class TextWrapper(textwrap.TextWrapper):
+    """
+    Subclass of textwrap.TextWrapper that backport the
+    drop_whitespace option.
+    """
+    def __init__(self, *args, **kwargs):
+        drop_whitespace = kwargs.pop('drop_whitespace', True)
+        textwrap.TextWrapper.__init__(self, *args, **kwargs)
+        self.drop_whitespace = drop_whitespace
+
+    def _wrap_chunks(self, chunks):
+        """_wrap_chunks(chunks : [string]) -> [string]
+
+        Wrap a sequence of text chunks and return a list of lines of
+        length 'self.width' or less.  (If 'break_long_words' is false,
+        some lines may be longer than this.)  Chunks correspond roughly
+        to words and the whitespace between them: each chunk is
+        indivisible (modulo 'break_long_words'), but a line break can
+        come between any two chunks.  Chunks should not have internal
+        whitespace; ie. a chunk is either all whitespace or a "word".
+        Whitespace chunks will be removed from the beginning and end of
+        lines, but apart from that whitespace is preserved.
+        """
+        lines = []
+        if self.width <= 0:
+            raise ValueError("invalid width %r (must be > 0)" % self.width)
+
+        # Arrange in reverse order so items can be efficiently popped
+        # from a stack of chucks.
+        chunks.reverse()
+
+        while chunks:
+
+            # Start the list of chunks that will make up the current line.
+            # cur_len is just the length of all the chunks in cur_line.
+            cur_line = []
+            cur_len = 0
+
+            # Figure out which static string will prefix this line.
+            if lines:
+                indent = self.subsequent_indent
+            else:
+                indent = self.initial_indent
+
+            # Maximum width for this line.
+            width = self.width - len(indent)
+
+            # First chunk on line is whitespace -- drop it, unless this
+            # is the very beginning of the text (ie. no lines started yet).
+            if self.drop_whitespace and chunks[-1].strip() == '' and lines:
+                del chunks[-1]
+
+            while chunks:
+                l = len(chunks[-1])
+
+                # Can at least squeeze this chunk onto the current line.
+                if cur_len + l <= width:
+                    cur_line.append(chunks.pop())
+                    cur_len += l
+
+                # Nope, this line is full.
+                else:
+                    break
+
+            # The current line is full, and the next chunk is too big to
+            # fit on *any* line (not just this one).
+            if chunks and len(chunks[-1]) > width:
+                self._handle_long_word(chunks, cur_line, cur_len, width)
+
+            # If the last chunk on this line is all whitespace, drop it.
+            if self.drop_whitespace and cur_line and not cur_line[-1].strip():
+                del cur_line[-1]
+
+            # Convert current line back to a string and store it in list
+            # of all lines (return value).
+            if cur_line:
+                lines.append(indent + ''.join(cur_line))
+
+        return lines
+# }}}
+# function wrap() {{{
+
+
+def wrap(text, width=70, **kwargs):
+    """
+    Wrap a single paragraph of text, returning a list of wrapped lines.
+    """
+    if sys.version_info < (2, 6):
+        return TextWrapper(width=width, **kwargs).wrap(text)
+    return textwrap.wrap(text, width=width, **kwargs)
 
 # }}}
--- a/i18n/posplit	Wed Oct 10 12:25:28 2018 -0400
+++ b/i18n/posplit	Mon Oct 22 14:46:06 2018 -0400
@@ -15,6 +15,14 @@
     e = cache.get(entry.msgid)
     if e:
         e.occurrences.extend(entry.occurrences)
+
+        # merge comments from entry
+        for comment in entry.comment.split('\n'):
+            if comment and comment not in e.comment:
+                if not e.comment:
+                    e.comment = comment
+                else:
+                    e.comment += '\n' + comment
     else:
         po.append(entry)
         cache[entry.msgid] = entry
--- a/mercurial/__init__.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/__init__.py	Mon Oct 22 14:46:06 2018 -0400
@@ -182,7 +182,7 @@
                     continue
                 r, c = t.start
                 l = (b'; from mercurial.pycompat import '
-                     b'delattr, getattr, hasattr, setattr, xrange, '
+                     b'delattr, getattr, hasattr, setattr, '
                      b'open, unicode\n')
                 for u in tokenize.tokenize(io.BytesIO(l).readline):
                     if u.type in (tokenize.ENCODING, token.ENDMARKER):
@@ -223,7 +223,7 @@
     # ``replacetoken`` or any mechanism that changes semantics of module
     # loading is changed. Otherwise cached bytecode may get loaded without
     # the new transformation mechanisms applied.
-    BYTECODEHEADER = b'HG\x00\x0a'
+    BYTECODEHEADER = b'HG\x00\x0b'
 
     class hgloader(importlib.machinery.SourceFileLoader):
         """Custom module loader that transforms source code.
--- a/mercurial/ancestor.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/ancestor.py	Mon Oct 22 14:46:06 2018 -0400
@@ -7,10 +7,15 @@
 
 from __future__ import absolute_import
 
-import collections
 import heapq
 
 from .node import nullrev
+from . import (
+    policy,
+    pycompat,
+)
+
+parsers = policy.importmod(r'parsers')
 
 def commonancestorsheads(pfunc, *nodes):
     """Returns a set with the heads of all common ancestors of all nodes,
@@ -174,7 +179,7 @@
             # no revs to consider
             return
 
-        for curr in xrange(start, min(revs) - 1, -1):
+        for curr in pycompat.xrange(start, min(revs) - 1, -1):
             if curr not in bases:
                 continue
             revs.discard(curr)
@@ -215,7 +220,7 @@
         # exit.
 
         missing = []
-        for curr in xrange(start, nullrev, -1):
+        for curr in pycompat.xrange(start, nullrev, -1):
             if not revsvisit:
                 break
 
@@ -257,6 +262,50 @@
         missing.reverse()
         return missing
 
+# Extracted from lazyancestors.__iter__ to avoid a reference cycle
+def _lazyancestorsiter(parentrevs, initrevs, stoprev, inclusive):
+    seen = {nullrev}
+    heappush = heapq.heappush
+    heappop = heapq.heappop
+    heapreplace = heapq.heapreplace
+    see = seen.add
+
+    if inclusive:
+        visit = [-r for r in initrevs]
+        seen.update(initrevs)
+        heapq.heapify(visit)
+    else:
+        visit = []
+        heapq.heapify(visit)
+        for r in initrevs:
+            p1, p2 = parentrevs(r)
+            if p1 not in seen:
+                heappush(visit, -p1)
+                see(p1)
+            if p2 not in seen:
+                heappush(visit, -p2)
+                see(p2)
+
+    while visit:
+        current = -visit[0]
+        if current < stoprev:
+            break
+        yield current
+        # optimize out heapq operation if p1 is known to be the next highest
+        # revision, which is quite common in linear history.
+        p1, p2 = parentrevs(current)
+        if p1 not in seen:
+            if current - p1 == 1:
+                visit[0] = -p1
+            else:
+                heapreplace(visit, -p1)
+            see(p1)
+        else:
+            heappop(visit)
+        if p2 not in seen:
+            heappush(visit, -p2)
+            see(p2)
+
 class lazyancestors(object):
     def __init__(self, pfunc, revs, stoprev=0, inclusive=False):
         """Create a new object generating ancestors for the given revs. Does
@@ -271,22 +320,15 @@
 
         Result does not include the null revision."""
         self._parentrevs = pfunc
-        self._initrevs = revs
+        self._initrevs = revs = [r for r in revs if r >= stoprev]
         self._stoprev = stoprev
         self._inclusive = inclusive
 
-        # Initialize data structures for __contains__.
-        # For __contains__, we use a heap rather than a deque because
-        # (a) it minimizes the number of parentrevs calls made
-        # (b) it makes the loop termination condition obvious
-        # Python's heap is a min-heap. Multiply all values by -1 to convert it
-        # into a max-heap.
-        self._containsvisit = [-rev for rev in revs]
-        heapq.heapify(self._containsvisit)
-        if inclusive:
-            self._containsseen = set(revs)
-        else:
-            self._containsseen = set()
+        self._containsseen = set()
+        self._containsiter = _lazyancestorsiter(self._parentrevs,
+                                                self._initrevs,
+                                                self._stoprev,
+                                                self._inclusive)
 
     def __nonzero__(self):
         """False if the set is empty, True otherwise."""
@@ -302,66 +344,77 @@
         """Generate the ancestors of _initrevs in reverse topological order.
 
         If inclusive is False, yield a sequence of revision numbers starting
-        with the parents of each revision in revs, i.e., each revision is *not*
-        considered an ancestor of itself.  Results are in breadth-first order:
-        parents of each rev in revs, then parents of those, etc.
+        with the parents of each revision in revs, i.e., each revision is
+        *not* considered an ancestor of itself. Results are emitted in reverse
+        revision number order. That order is also topological: a child is
+        always emitted before its parent.
 
-        If inclusive is True, yield all the revs first (ignoring stoprev),
-        then yield all the ancestors of revs as when inclusive is False.
-        If an element in revs is an ancestor of a different rev it is not
-        yielded again."""
-        seen = set()
-        revs = self._initrevs
-        if self._inclusive:
-            for rev in revs:
-                yield rev
-            seen.update(revs)
-
-        parentrevs = self._parentrevs
-        stoprev = self._stoprev
-        visit = collections.deque(revs)
-
-        see = seen.add
-        schedule = visit.append
-
-        while visit:
-            for parent in parentrevs(visit.popleft()):
-                if parent >= stoprev and parent not in seen:
-                    schedule(parent)
-                    see(parent)
-                    yield parent
+        If inclusive is True, the source revisions are also yielded. The
+        reverse revision number order is still enforced."""
+        return _lazyancestorsiter(self._parentrevs, self._initrevs,
+                                  self._stoprev, self._inclusive)
 
     def __contains__(self, target):
         """Test whether target is an ancestor of self._initrevs."""
-        # Trying to do both __iter__ and __contains__ using the same visit
-        # heap and seen set is complex enough that it slows down both. Keep
-        # them separate.
         seen = self._containsseen
         if target in seen:
             return True
+        iter = self._containsiter
+        if iter is None:
+            # Iterator exhausted
+            return False
         # Only integer target is valid, but some callers expect 'None in self'
         # to be False. So we explicitly allow it.
         if target is None:
             return False
 
-        parentrevs = self._parentrevs
-        visit = self._containsvisit
-        stoprev = self._stoprev
-        heappop = heapq.heappop
-        heappush = heapq.heappush
         see = seen.add
+        try:
+            while True:
+                rev = next(iter)
+                see(rev)
+                if rev == target:
+                    return True
+                if rev < target:
+                    return False
+        except StopIteration:
+            # Set to None to indicate fast-path can be used next time, and to
+            # free up memory.
+            self._containsiter = None
+            return False
 
-        targetseen = False
+class rustlazyancestors(object):
 
-        while visit and -visit[0] > target and not targetseen:
-            for parent in parentrevs(-heappop(visit)):
-                if parent < stoprev or parent in seen:
-                    continue
-                # We need to make sure we push all parents into the heap so
-                # that we leave it in a consistent state for future calls.
-                heappush(visit, -parent)
-                see(parent)
-                if parent == target:
-                    targetseen = True
+    def __init__(self, index, revs, stoprev=0, inclusive=False):
+        self._index = index
+        self._stoprev = stoprev
+        self._inclusive = inclusive
+        # no need to prefilter out init revs that are smaller than stoprev,
+        # it's done by rustlazyancestors constructor.
+        # we need to convert to a list, because our ruslazyancestors
+        # constructor (from C code) doesn't understand anything else yet
+        self._initrevs = initrevs = list(revs)
+
+        self._containsiter = parsers.rustlazyancestors(
+            index, initrevs, stoprev, inclusive)
+
+    def __nonzero__(self):
+        """False if the set is empty, True otherwise.
 
-        return targetseen
+        It's better to duplicate this essentially trivial method than
+        to subclass lazyancestors
+        """
+        try:
+            next(iter(self))
+            return True
+        except StopIteration:
+            return False
+
+    def __iter__(self):
+        return parsers.rustlazyancestors(self._index,
+                                         self._initrevs,
+                                         self._stoprev,
+                                         self._inclusive)
+
+    def __contains__(self, target):
+        return target in self._containsiter
--- a/mercurial/archival.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/archival.py	Mon Oct 22 14:46:06 2018 -0400
@@ -16,6 +16,9 @@
 import zlib
 
 from .i18n import _
+from .node import (
+    nullrev,
+)
 
 from . import (
     error,
@@ -76,7 +79,7 @@
     # repo[0] may be hidden
     for rev in repo:
         return repo[rev]
-    return repo['null']
+    return repo[nullrev]
 
 # {tags} on ctx includes local tags and 'tip', with no current way to limit
 # that to global tags.  Therefore, use {latesttag} as a substitute when
@@ -200,7 +203,9 @@
     or compressed with deflate.'''
 
     def __init__(self, dest, mtime, compress=True):
-        self.z = zipfile.ZipFile(pycompat.fsdecode(dest), r'w',
+        if isinstance(dest, bytes):
+            dest = pycompat.fsdecode(dest)
+        self.z = zipfile.ZipFile(dest, r'w',
                                  compress and zipfile.ZIP_DEFLATED or
                                  zipfile.ZIP_STORED)
 
--- a/mercurial/bookmarks.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/bookmarks.py	Mon Oct 22 14:46:06 2018 -0400
@@ -240,7 +240,7 @@
             if self.active:
                 return self.active
             else:
-                raise error.Abort(_("no active bookmark"))
+                raise error.RepoLookupError(_("no active bookmark"))
         return bname
 
     def checkconflict(self, mark, force=False, target=None):
@@ -915,22 +915,18 @@
     elif cur != tgt and newact == repo._activebookmark:
         deactivate(repo)
 
-def _printbookmarks(ui, repo, bmarks, **opts):
+def _printbookmarks(ui, repo, fm, bmarks):
     """private method to print bookmarks
 
     Provides a way for extensions to control how bookmarks are printed (e.g.
     prepend or postpend names)
     """
-    opts = pycompat.byteskwargs(opts)
-    fm = ui.formatter('bookmarks', opts)
-    contexthint = fm.contexthint('bookmark rev node active')
     hexfn = fm.hexfunc
     if len(bmarks) == 0 and fm.isplain():
         ui.status(_("no bookmarks set\n"))
     for bmark, (n, prefix, label) in sorted(bmarks.iteritems()):
         fm.startitem()
-        if 'ctx' in contexthint:
-            fm.context(ctx=repo[n])
+        fm.context(repo=repo)
         if not ui.quiet:
             fm.plain(' %s ' % prefix, label=label)
         fm.write('bookmark', '%s', bmark, label=label)
@@ -939,24 +935,25 @@
                      repo.changelog.rev(n), hexfn(n), label=label)
         fm.data(active=(activebookmarklabel in label))
         fm.plain('\n')
-    fm.end()
 
-def printbookmarks(ui, repo, **opts):
-    """print bookmarks to a formatter
+def printbookmarks(ui, repo, fm, names=None):
+    """print bookmarks by the given formatter
 
     Provides a way for extensions to control how bookmarks are printed.
     """
     marks = repo._bookmarks
     bmarks = {}
-    for bmark, n in sorted(marks.iteritems()):
+    for bmark in (names or marks):
+        if bmark not in marks:
+            raise error.Abort(_("bookmark '%s' does not exist") % bmark)
         active = repo._activebookmark
         if bmark == active:
             prefix, label = '*', activebookmarklabel
         else:
             prefix, label = ' ', ''
 
-        bmarks[bmark] = (n, prefix, label)
-    _printbookmarks(ui, repo, bmarks, **opts)
+        bmarks[bmark] = (marks[bmark], prefix, label)
+    _printbookmarks(ui, repo, fm, bmarks)
 
 def preparehookargs(name, old, new):
     if new is None:
--- a/mercurial/branchmap.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/branchmap.py	Mon Oct 22 14:46:06 2018 -0400
@@ -38,15 +38,11 @@
     return filename
 
 def read(repo):
+    f = None
     try:
         f = repo.cachevfs(_filename(repo))
-        lines = f.read().split('\n')
-        f.close()
-    except (IOError, OSError):
-        return None
-
-    try:
-        cachekey = lines.pop(0).split(" ", 2)
+        lineiter = iter(f)
+        cachekey = next(lineiter).rstrip('\n').split(" ", 2)
         last, lrev = cachekey[:2]
         last, lrev = bin(last), int(lrev)
         filteredhash = None
@@ -58,7 +54,8 @@
             # invalidate the cache
             raise ValueError(r'tip differs')
         cl = repo.changelog
-        for l in lines:
+        for l in lineiter:
+            l = l.rstrip('\n')
             if not l:
                 continue
             node, state, label = l.split(" ", 2)
@@ -72,6 +69,10 @@
             partial.setdefault(label, []).append(node)
             if state == 'c':
                 partial._closednodes.add(node)
+
+    except (IOError, OSError):
+        return None
+
     except Exception as inst:
         if repo.ui.debugflag:
             msg = 'invalid branchheads cache'
@@ -80,6 +81,11 @@
             msg += ': %s\n'
             repo.ui.debug(msg % pycompat.bytestr(inst))
         partial = None
+
+    finally:
+        if f:
+            f.close()
+
     return partial
 
 ### Nearest subset relation
@@ -124,18 +130,21 @@
 
     This is likely only called during clone with a branch map from a remote.
     """
+    cl = repo.changelog
+    clrev = cl.rev
+    clbranchinfo = cl.branchinfo
     rbheads = []
     closed = []
     for bheads in bm.itervalues():
         rbheads.extend(bheads)
         for h in bheads:
-            r = repo.changelog.rev(h)
-            b, c = repo.changelog.branchinfo(r)
+            r = clrev(h)
+            b, c = clbranchinfo(r)
             if c:
                 closed.append(h)
 
     if rbheads:
-        rtiprev = max((int(repo.changelog.rev(node))
+        rtiprev = max((int(clrev(node))
                 for node in rbheads))
         cache = branchcache(bm,
                             repo[rtiprev].node(),
@@ -272,7 +281,7 @@
         newbranches = {}
         getbranchinfo = repo.revbranchcache().branchinfo
         for r in revgen:
-            branch, closesbranch = getbranchinfo(r)
+            branch, closesbranch = getbranchinfo(r, changelog=cl)
             newbranches.setdefault(branch, []).append(r)
             if closesbranch:
                 self._closednodes.add(cl.node(r))
@@ -290,7 +299,6 @@
             # This have been tested True on all internal usage of this function.
             # run it again in case of doubt
             # assert not (set(bheadrevs) & set(newheadrevs))
-            newheadrevs.sort()
             bheadset.update(newheadrevs)
 
             # This prunes out two kinds of heads - heads that are superseded by
@@ -399,10 +407,10 @@
         self._rbcrevslen = len(self._repo.changelog)
         self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
 
-    def branchinfo(self, rev):
+    def branchinfo(self, rev, changelog=None):
         """Return branch name and close flag for rev, using and updating
         persistent cache."""
-        changelog = self._repo.changelog
+        changelog = changelog or self._repo.changelog
         rbcrevidx = rev * _rbcrecsize
 
         # avoid negative index, changelog.read(nullrev) is fast without cache
@@ -411,7 +419,7 @@
 
         # if requested rev isn't allocated, grow and cache the rev info
         if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
-            return self._branchinfo(rev)
+            return self._branchinfo(rev, changelog=changelog)
 
         # fast path: extract data from cache, use it if node is matching
         reponode = changelog.node(rev)[:_rbcnodelen]
@@ -439,11 +447,11 @@
             self._rbcrevslen = min(self._rbcrevslen, truncate)
 
         # fall back to slow path and make sure it will be written to disk
-        return self._branchinfo(rev)
+        return self._branchinfo(rev, changelog=changelog)
 
-    def _branchinfo(self, rev):
+    def _branchinfo(self, rev, changelog=None):
         """Retrieve branch info from changelog and update _rbcrevs"""
-        changelog = self._repo.changelog
+        changelog = changelog or self._repo.changelog
         b, close = changelog.branchinfo(rev)
         if b in self._namesreverse:
             branchidx = self._namesreverse[b]
@@ -454,7 +462,7 @@
         reponode = changelog.node(rev)
         if close:
             branchidx |= _rbccloseflag
-        self._setcachedata(rev, reponode, branchidx)
+        self._setcachedata(rev, reponode, branchidx, changelog)
         return b, close
 
     def setdata(self, branch, rev, node, close):
@@ -474,17 +482,19 @@
         #   self.branchinfo = self._branchinfo
         #
         # Since we now have data in the cache, we need to drop this bypassing.
-        if 'branchinfo' in vars(self):
+        if r'branchinfo' in vars(self):
             del self.branchinfo
 
-    def _setcachedata(self, rev, node, branchidx):
+    def _setcachedata(self, rev, node, branchidx, changelog=None):
         """Writes the node's branch data to the in-memory cache data."""
         if rev == nullrev:
             return
+
+        changelog = changelog or self._repo.changelog
         rbcrevidx = rev * _rbcrecsize
         if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
             self._rbcrevs.extend('\0' *
-                                 (len(self._repo.changelog) * _rbcrecsize -
+                                 (len(changelog) * _rbcrecsize -
                                   len(self._rbcrevs)))
         pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
         self._rbcrevslen = min(self._rbcrevslen, rev)
--- a/mercurial/bundle2.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/bundle2.py	Mon Oct 22 14:46:06 2018 -0400
@@ -839,7 +839,7 @@
             params = self._readexact(paramssize)
             self._processallparams(params)
             yield params
-            assert self._compengine.bundletype == 'UN'
+            assert self._compengine.bundletype()[1] == 'UN'
         # From there, payload might need to be decompressed
         self._fp = self._compengine.decompressorreader(self._fp)
         emptycount = 0
@@ -1532,7 +1532,7 @@
     if role == 'server':
         streamsupported = repo.ui.configbool('server', 'uncompressed',
                                              untrusted=True)
-        featuresupported = repo.ui.configbool('experimental', 'bundle2.stream')
+        featuresupported = repo.ui.configbool('server', 'bundle2.stream')
 
         if not streamsupported or not featuresupported:
             caps.pop('stream')
@@ -1672,7 +1672,7 @@
     return params
 
 def addpartbundlestream2(bundler, repo, **kwargs):
-    if not kwargs.get('stream', False):
+    if not kwargs.get(r'stream', False):
         return
 
     if not streamclone.allowservergeneration(repo):
@@ -1687,7 +1687,28 @@
     # to avoid compression to consumers of the bundle.
     bundler.prefercompressed = False
 
-    filecount, bytecount, it = streamclone.generatev2(repo)
+    # get the includes and excludes
+    includepats = kwargs.get(r'includepats')
+    excludepats = kwargs.get(r'excludepats')
+
+    narrowstream = repo.ui.configbool('experimental.server',
+                                      'stream-narrow-clones')
+
+    if (includepats or excludepats) and not narrowstream:
+        raise error.Abort(_('server does not support narrow stream clones'))
+
+    includeobsmarkers = False
+    if repo.obsstore:
+        remoteversions = obsmarkersversion(bundler.capabilities)
+        if not remoteversions:
+            raise error.Abort(_('server has obsolescence markers, but client '
+                                'cannot receive them via stream clone'))
+        elif repo.obsstore._version in remoteversions:
+            includeobsmarkers = True
+
+    filecount, bytecount, it = streamclone.generatev2(repo, includepats,
+                                                      excludepats,
+                                                      includeobsmarkers)
     requirements = _formatrequirementsspec(repo.requirements)
     part = bundler.newpart('stream2', data=it)
     part.addparam('bytecount', '%d' % bytecount, mandatory=True)
@@ -1779,6 +1800,8 @@
     This is a very early implementation that will massive rework before being
     inflicted to any end-user.
     """
+    from . import localrepo
+
     tr = op.gettransaction()
     unpackerversion = inpart.params.get('version', '01')
     # We should raise an appropriate exception here
@@ -1795,7 +1818,8 @@
                 "bundle contains tree manifests, but local repo is "
                 "non-empty and does not use tree manifests"))
         op.repo.requirements.add('treemanifest')
-        op.repo._applyopenerreqs()
+        op.repo.svfs.options = localrepo.resolvestorevfsoptions(
+            op.repo.ui, op.repo.requirements, op.repo.features)
         op.repo._writerequirements()
     extrakwargs = {}
     targetphase = inpart.params.get('targetphase')
@@ -1897,11 +1921,11 @@
     """
     bookdata = bookmarks.binarydecode(inpart)
 
-    msgstandard = ('repository changed while pushing - please try again '
+    msgstandard = ('remote repository changed while pushing - please try again '
                    '(bookmark "%s" move from %s to %s)')
-    msgmissing = ('repository changed while pushing - please try again '
+    msgmissing = ('remote repository changed while pushing - please try again '
                   '(bookmark "%s" is missing, expected %s)')
-    msgexist = ('repository changed while pushing - please try again '
+    msgexist = ('remote repository changed while pushing - please try again '
                 '(bookmark "%s" set on %s, expected missing)')
     for book, node in bookdata:
         currentnode = op.repo._bookmarks.get(book)
@@ -1931,7 +1955,7 @@
     if op.ui.configbool('experimental', 'bundle2lazylocking'):
         op.gettransaction()
     if sorted(heads) != sorted(op.repo.heads()):
-        raise error.PushRaced('repository changed while pushing - '
+        raise error.PushRaced('remote repository changed while pushing - '
                               'please try again')
 
 @parthandler('check:updated-heads')
@@ -1960,7 +1984,7 @@
 
     for h in heads:
         if h not in currentheads:
-            raise error.PushRaced('repository changed while pushing - '
+            raise error.PushRaced('remote repository changed while pushing - '
                                   'please try again')
 
 @parthandler('check:phases')
@@ -1973,7 +1997,7 @@
     unfi = op.repo.unfiltered()
     cl = unfi.changelog
     phasecache = unfi._phasecache
-    msg = ('repository changed while pushing - please try again '
+    msg = ('remote repository changed while pushing - please try again '
            '(%s is %s expected %s)')
     for expectedphase, nodes in enumerate(phasetonodes):
         for n in nodes:
@@ -2223,11 +2247,11 @@
         total += header[1] + header[2]
         utf8branch = inpart.read(header[0])
         branch = encoding.tolocal(utf8branch)
-        for x in xrange(header[1]):
+        for x in pycompat.xrange(header[1]):
             node = inpart.read(20)
             rev = cl.rev(node)
             cache.setdata(branch, rev, node, False)
-        for x in xrange(header[2]):
+        for x in pycompat.xrange(header[2]):
             node = inpart.read(20)
             rev = cl.rev(node)
             cache.setdata(branch, rev, node, True)
@@ -2263,3 +2287,39 @@
     repo.ui.debug('applying stream bundle\n')
     streamclone.applybundlev2(repo, part, filecount, bytecount,
                               requirements)
+
+def widen_bundle(repo, oldmatcher, newmatcher, common, known, cgversion,
+                 ellipses):
+    """generates bundle2 for widening a narrow clone
+
+    repo is the localrepository instance
+    oldmatcher matches what the client already has
+    newmatcher matches what the client needs (including what it already has)
+    common is set of common heads between server and client
+    known is a set of revs known on the client side (used in ellipses)
+    cgversion is the changegroup version to send
+    ellipses is boolean value telling whether to send ellipses data or not
+
+    returns bundle2 of the data required for extending
+    """
+    bundler = bundle20(repo.ui)
+    commonnodes = set()
+    cl = repo.changelog
+    for r in repo.revs("::%ln", common):
+        commonnodes.add(cl.node(r))
+    if commonnodes:
+        # XXX: we should only send the filelogs (and treemanifest). user
+        # already has the changelog and manifest
+        packer = changegroup.getbundler(cgversion, repo,
+                                        oldmatcher=oldmatcher,
+                                        matcher=newmatcher,
+                                        fullnodes=commonnodes)
+        cgdata = packer.generate(set([nodemod.nullid]), list(commonnodes),
+                                 False, 'narrow_widen', changelog=False)
+
+        part = bundler.newpart('changegroup', data=cgdata)
+        part.addparam('version', cgversion)
+        if 'treemanifest' in repo.requirements:
+            part.addparam('treemanifest', '1')
+
+    return bundler
--- a/mercurial/bundlerepo.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/bundlerepo.py	Mon Oct 22 14:46:06 2018 -0400
@@ -25,6 +25,7 @@
     changelog,
     cmdutil,
     discovery,
+    encoding,
     error,
     exchange,
     filelog,
@@ -80,7 +81,7 @@
             # start, size, full unc. size, base (unused), link, p1, p2, node
             e = (revlog.offset_type(start, flags), size, -1, baserev, link,
                  self.rev(p1), self.rev(p2), node)
-            self.index.insert(-1, e)
+            self.index.append(e)
             self.nodemap[node] = n
             self.bundlerevs.add(n)
             n += 1
@@ -126,8 +127,8 @@
         iterrev = rev
         # reconstruct the revision if it is from a changegroup
         while iterrev > self.repotiprev:
-            if self._cache and self._cache[1] == iterrev:
-                rawtext = self._cache[2]
+            if self._revisioncache and self._revisioncache[1] == iterrev:
+                rawtext = self._revisioncache[2]
                 break
             chain.append(iterrev)
             iterrev = self.index[iterrev][3]
@@ -142,7 +143,7 @@
                                                 'read', raw=raw)
         if validatehash:
             self.checkhash(text, node, rev=rev)
-        self._cache = (node, rev, rawtext)
+        self._revisioncache = (node, rev, rawtext)
         return text
 
     def baserevision(self, nodeorrev):
@@ -187,7 +188,7 @@
 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
     def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
                  dir=''):
-        manifest.manifestrevlog.__init__(self, opener, dir=dir)
+        manifest.manifestrevlog.__init__(self, opener, tree=dir)
         bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
                               linkmapper)
         if dirlogstarts is None:
@@ -255,7 +256,7 @@
             pass
     return filespos
 
-class bundlerepository(localrepo.localrepository):
+class bundlerepository(object):
     """A repository instance that is a union of a local repo and a bundle.
 
     Instances represent a read-only repository composed of a local repository
@@ -263,25 +264,19 @@
     conceptually similar to the state of a repository after an
     ``hg unbundle`` operation. However, the contents of the bundle are never
     applied to the actual base repository.
+
+    Instances constructed directly are not usable as repository objects.
+    Use instance() or makebundlerepository() to create instances.
     """
-    def __init__(self, ui, repopath, bundlepath):
-        self._tempparent = None
-        try:
-            localrepo.localrepository.__init__(self, ui, repopath)
-        except error.RepoError:
-            self._tempparent = pycompat.mkdtemp()
-            localrepo.instance(ui, self._tempparent, 1)
-            localrepo.localrepository.__init__(self, ui, self._tempparent)
+    def __init__(self, bundlepath, url, tempparent):
+        self._tempparent = tempparent
+        self._url = url
+
         self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
 
-        if repopath:
-            self._url = 'bundle:' + util.expandpath(repopath) + '+' + bundlepath
-        else:
-            self._url = 'bundle:' + bundlepath
-
         self.tempfile = None
         f = util.posixfile(bundlepath, "rb")
-        bundle = exchange.readbundle(ui, f, bundlepath)
+        bundle = exchange.readbundle(self.ui, f, bundlepath)
 
         if isinstance(bundle, bundle2.unbundle20):
             self._bundlefile = bundle
@@ -311,7 +306,7 @@
             if bundle.compressed():
                 f = self._writetempbundle(bundle.read, '.hg10un',
                                           header='HG10UN')
-                bundle = exchange.readbundle(ui, f, bundlepath, self.vfs)
+                bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
 
             self._bundlefile = bundle
             self._cgunpacker = bundle
@@ -370,14 +365,16 @@
         self.manstart = self._cgunpacker.tell()
         return c
 
-    def _constructmanifest(self):
+    @localrepo.unfilteredpropertycache
+    def manifestlog(self):
         self._cgunpacker.seek(self.manstart)
         # consume the header if it exists
         self._cgunpacker.manifestheader()
         linkmapper = self.unfiltered().changelog.rev
-        m = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
+        rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
         self.filestart = self._cgunpacker.tell()
-        return m
+
+        return manifest.manifestlog(self.svfs, self, rootstore)
 
     def _consumemanifest(self):
         """Consumes the manifest portion of the bundle, setting filestart so the
@@ -436,7 +433,7 @@
         return bundlepeer(self)
 
     def getcwd(self):
-        return pycompat.getcwd() # always outside the repo
+        return encoding.getcwd() # always outside the repo
 
     # Check if parents exist in localrepo before setting
     def setparents(self, p1, p2=nullid):
@@ -449,20 +446,20 @@
             self.ui.warn(msg % nodemod.hex(p2))
         return super(bundlerepository, self).setparents(p1, p2)
 
-def instance(ui, path, create, intents=None):
+def instance(ui, path, create, intents=None, createopts=None):
     if create:
         raise error.Abort(_('cannot create new bundle repository'))
     # internal config: bundle.mainreporoot
     parentpath = ui.config("bundle", "mainreporoot")
     if not parentpath:
         # try to find the correct path to the working directory repo
-        parentpath = cmdutil.findrepo(pycompat.getcwd())
+        parentpath = cmdutil.findrepo(encoding.getcwd())
         if parentpath is None:
             parentpath = ''
     if parentpath:
         # Try to make the full path relative so we get a nice, short URL.
         # In particular, we don't want temp dir names in test outputs.
-        cwd = pycompat.getcwd()
+        cwd = encoding.getcwd()
         if parentpath == cwd:
             parentpath = ''
         else:
@@ -479,7 +476,45 @@
             repopath, bundlename = s
     else:
         repopath, bundlename = parentpath, path
-    return bundlerepository(ui, repopath, bundlename)
+
+    return makebundlerepository(ui, repopath, bundlename)
+
+def makebundlerepository(ui, repopath, bundlepath):
+    """Make a bundle repository object based on repo and bundle paths."""
+    if repopath:
+        url = 'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
+    else:
+        url = 'bundle:%s' % bundlepath
+
+    # Because we can't make any guarantees about the type of the base
+    # repository, we can't have a static class representing the bundle
+    # repository. We also can't make any guarantees about how to even
+    # call the base repository's constructor!
+    #
+    # So, our strategy is to go through ``localrepo.instance()`` to construct
+    # a repo instance. Then, we dynamically create a new type derived from
+    # both it and our ``bundlerepository`` class which overrides some
+    # functionality. We then change the type of the constructed repository
+    # to this new type and initialize the bundle-specific bits of it.
+
+    try:
+        repo = localrepo.instance(ui, repopath, create=False)
+        tempparent = None
+    except error.RepoError:
+        tempparent = pycompat.mkdtemp()
+        try:
+            repo = localrepo.instance(ui, tempparent, create=True)
+        except Exception:
+            shutil.rmtree(tempparent)
+            raise
+
+    class derivedbundlerepository(bundlerepository, repo.__class__):
+        pass
+
+    repo.__class__ = derivedbundlerepository
+    bundlerepository.__init__(repo, bundlepath, url, tempparent)
+
+    return repo
 
 class bundletransactionmanager(object):
     def transaction(self):
@@ -588,8 +623,10 @@
             bundle = None
         if not localrepo:
             # use the created uncompressed bundlerepo
-            localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
-                                                      fname)
+            localrepo = bundlerepo = makebundlerepository(repo. baseui,
+                                                          repo.root,
+                                                          fname)
+
             # this repo contains local and peer now, so filter out local again
             common = repo.heads()
     if localrepo:
--- a/mercurial/cext/mpatch.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/cext/mpatch.c	Mon Oct 22 14:46:06 2018 -0400
@@ -50,21 +50,22 @@
 
 struct mpatch_flist *cpygetitem(void *bins, ssize_t pos)
 {
-	const char *buffer;
-	struct mpatch_flist *res;
-	ssize_t blen;
+	Py_buffer buffer;
+	struct mpatch_flist *res = NULL;
 	int r;
 
 	PyObject *tmp = PyList_GetItem((PyObject *)bins, pos);
 	if (!tmp)
 		return NULL;
-	if (PyObject_AsCharBuffer(tmp, &buffer, (Py_ssize_t *)&blen))
+	if (PyObject_GetBuffer(tmp, &buffer, PyBUF_CONTIG_RO))
 		return NULL;
-	if ((r = mpatch_decode(buffer, blen, &res)) < 0) {
+	if ((r = mpatch_decode(buffer.buf, buffer.len, &res)) < 0) {
 		if (!PyErr_Occurred())
 			setpyerr(r);
-		return NULL;
+		res = NULL;
 	}
+
+	PyBuffer_Release(&buffer);
 	return res;
 }
 
@@ -72,10 +73,10 @@
 {
 	PyObject *text, *bins, *result;
 	struct mpatch_flist *patch;
-	const char *in;
+	Py_buffer buffer;
 	int r = 0;
 	char *out;
-	Py_ssize_t len, outlen, inlen;
+	Py_ssize_t len, outlen;
 
 	if (!PyArg_ParseTuple(args, "OO:mpatch", &text, &bins))
 		return NULL;
@@ -87,17 +88,19 @@
 		return text;
 	}
 
-	if (PyObject_AsCharBuffer(text, &in, &inlen))
+	if (PyObject_GetBuffer(text, &buffer, PyBUF_CONTIG_RO)) {
 		return NULL;
+	}
 
 	patch = mpatch_fold(bins, cpygetitem, 0, len);
 	if (!patch) { /* error already set or memory error */
 		if (!PyErr_Occurred())
 			PyErr_NoMemory();
-		return NULL;
+		result = NULL;
+		goto cleanup;
 	}
 
-	outlen = mpatch_calcsize(inlen, patch);
+	outlen = mpatch_calcsize(buffer.len, patch);
 	if (outlen < 0) {
 		r = (int)outlen;
 		result = NULL;
@@ -112,7 +115,7 @@
 	/* clang-format off */
 	{
 		Py_BEGIN_ALLOW_THREADS
-		r = mpatch_apply(out, in, inlen, patch);
+		r = mpatch_apply(out, buffer.buf, buffer.len, patch);
 		Py_END_ALLOW_THREADS
 	}
 	/* clang-format on */
@@ -122,6 +125,7 @@
 	}
 cleanup:
 	mpatch_lfree(patch);
+	PyBuffer_Release(&buffer);
 	if (!result && !PyErr_Occurred())
 		setpyerr(r);
 	return result;
--- a/mercurial/cext/osutil.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/cext/osutil.c	Mon Oct 22 14:46:06 2018 -0400
@@ -1217,7 +1217,9 @@
 	char fpmode[4];
 	int fppos = 0;
 	int plus;
+#ifndef IS_PY3K
 	FILE *fp;
+#endif
 
 	if (!PyArg_ParseTupleAndKeywords(args, kwds, PY23("et|si:posixfile",
 							  "et|yi:posixfile"),
--- a/mercurial/cext/parsers.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/cext/parsers.c	Mon Oct 22 14:46:06 2018 -0400
@@ -713,7 +713,7 @@
 void manifest_module_init(PyObject *mod);
 void revlog_module_init(PyObject *mod);
 
-static const int version = 10;
+static const int version = 11;
 
 static void module_init(PyObject *mod)
 {
--- a/mercurial/cext/revlog.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/cext/revlog.c	Mon Oct 22 14:46:06 2018 -0400
@@ -28,17 +28,33 @@
 #define PyInt_AsLong PyLong_AsLong
 #endif
 
+typedef struct indexObjectStruct indexObject;
+
+typedef struct {
+	int children[16];
+} nodetreenode;
+
 /*
  * A base-16 trie for fast node->rev mapping.
  *
  * Positive value is index of the next node in the trie
- * Negative value is a leaf: -(rev + 1)
+ * Negative value is a leaf: -(rev + 2)
  * Zero is empty
  */
 typedef struct {
-	int children[16];
+	indexObject *index;
+	nodetreenode *nodes;
+	unsigned length;     /* # nodes in use */
+	unsigned capacity;   /* # nodes allocated */
+	int depth;           /* maximum depth of tree */
+	int splits;          /* # splits performed */
 } nodetree;
 
+typedef struct {
+	PyObject_HEAD
+	nodetree nt;
+} nodetreeObject;
+
 /*
  * This class has two behaviors.
  *
@@ -51,7 +67,7 @@
  * With string keys, we lazily perform a reverse mapping from node to
  * rev, using a base-16 trie.
  */
-typedef struct {
+struct indexObjectStruct {
 	PyObject_HEAD
 	/* Type-specific fields go here. */
 	PyObject *data;        /* raw bytes of index */
@@ -63,16 +79,13 @@
 	PyObject *added;       /* populated on demand */
 	PyObject *headrevs;    /* cache, invalidated on changes */
 	PyObject *filteredrevs;/* filtered revs set */
-	nodetree *nt;          /* base-16 trie */
-	unsigned ntlength;          /* # nodes in use */
-	unsigned ntcapacity;        /* # nodes allocated */
-	int ntdepth;           /* maximum depth of tree */
-	int ntsplits;          /* # splits performed */
+	nodetree nt;           /* base-16 trie */
+	int ntinitialized;     /* 0 or 1 */
 	int ntrev;             /* last rev scanned */
 	int ntlookups;         /* # lookups */
 	int ntmisses;          /* # lookups that miss the cache */
 	int inlined;
-} indexObject;
+};
 
 static Py_ssize_t index_length(const indexObject *self)
 {
@@ -81,8 +94,8 @@
 	return self->length + PyList_GET_SIZE(self->added);
 }
 
-static PyObject *nullentry;
-static const char nullid[20];
+static PyObject *nullentry = NULL;
+static const char nullid[20] = {0};
 
 static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
 
@@ -95,6 +108,36 @@
 /* A RevlogNG v1 index entry is 64 bytes long. */
 static const long v1_hdrsize = 64;
 
+static void raise_revlog_error(void)
+{
+	PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
+
+	mod = PyImport_ImportModule("mercurial.error");
+	if (mod == NULL) {
+		goto cleanup;
+	}
+
+	dict = PyModule_GetDict(mod);
+	if (dict == NULL) {
+		goto cleanup;
+	}
+	Py_INCREF(dict);
+
+	errclass = PyDict_GetItemString(dict, "RevlogError");
+	if (errclass == NULL) {
+		PyErr_SetString(PyExc_SystemError,
+				"could not find RevlogError");
+		goto cleanup;
+	}
+
+	/* value of exception is ignored by callers */
+	PyErr_SetString(errclass, "RevlogError");
+
+cleanup:
+	Py_XDECREF(dict);
+	Py_XDECREF(mod);
+}
+
 /*
  * Return a pointer to the beginning of a RevlogNG record.
  */
@@ -117,9 +160,8 @@
 static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
 				    int *ps, int maxrev)
 {
-	if (rev >= self->length - 1) {
-		PyObject *tuple = PyList_GET_ITEM(self->added,
-						  rev - self->length + 1);
+	if (rev >= self->length) {
+		PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
 		ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5));
 		ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6));
 	} else {
@@ -158,22 +200,19 @@
 	Py_ssize_t length = index_length(self);
 	PyObject *entry;
 
-	if (pos < 0)
-		pos += length;
+	if (pos == -1) {
+		Py_INCREF(nullentry);
+		return nullentry;
+	}
 
 	if (pos < 0 || pos >= length) {
 		PyErr_SetString(PyExc_IndexError, "revlog index out of range");
 		return NULL;
 	}
 
-	if (pos == length - 1) {
-		Py_INCREF(nullentry);
-		return nullentry;
-	}
-
-	if (pos >= self->length - 1) {
+	if (pos >= self->length) {
 		PyObject *obj;
-		obj = PyList_GET_ITEM(self->added, pos - self->length + 1);
+		obj = PyList_GET_ITEM(self->added, pos - self->length);
 		Py_INCREF(obj);
 		return obj;
 	}
@@ -231,15 +270,15 @@
 	Py_ssize_t length = index_length(self);
 	const char *data;
 
-	if (pos == length - 1 || pos == INT_MAX)
+	if (pos == -1)
 		return nullid;
 
 	if (pos >= length)
 		return NULL;
 
-	if (pos >= self->length - 1) {
+	if (pos >= self->length) {
 		PyObject *tuple, *str;
-		tuple = PyList_GET_ITEM(self->added, pos - self->length + 1);
+		tuple = PyList_GET_ITEM(self->added, pos - self->length);
 		str = PyTuple_GetItem(tuple, 7);
 		return str ? PyBytes_AS_STRING(str) : NULL;
 	}
@@ -262,47 +301,34 @@
 	return node;
 }
 
-static int nt_insert(indexObject *self, const char *node, int rev);
+static int nt_insert(nodetree *self, const char *node, int rev);
 
-static int node_check(PyObject *obj, char **node, Py_ssize_t *nodelen)
+static int node_check(PyObject *obj, char **node)
 {
-	if (PyBytes_AsStringAndSize(obj, node, nodelen) == -1)
+	Py_ssize_t nodelen;
+	if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1)
 		return -1;
-	if (*nodelen == 20)
+	if (nodelen == 20)
 		return 0;
 	PyErr_SetString(PyExc_ValueError, "20-byte hash required");
 	return -1;
 }
 
-static PyObject *index_insert(indexObject *self, PyObject *args)
+static PyObject *index_append(indexObject *self, PyObject *obj)
 {
-	PyObject *obj;
 	char *node;
-	int index;
-	Py_ssize_t len, nodelen;
-
-	if (!PyArg_ParseTuple(args, "iO", &index, &obj))
-		return NULL;
+	Py_ssize_t len;
 
 	if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) {
 		PyErr_SetString(PyExc_TypeError, "8-tuple required");
 		return NULL;
 	}
 
-	if (node_check(PyTuple_GET_ITEM(obj, 7), &node, &nodelen) == -1)
+	if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1)
 		return NULL;
 
 	len = index_length(self);
 
-	if (index < 0)
-		index += len;
-
-	if (index != len - 1) {
-		PyErr_SetString(PyExc_IndexError,
-				"insert only supported at index -1");
-		return NULL;
-	}
-
 	if (self->added == NULL) {
 		self->added = PyList_New(0);
 		if (self->added == NULL)
@@ -312,42 +338,13 @@
 	if (PyList_Append(self->added, obj) == -1)
 		return NULL;
 
-	if (self->nt)
-		nt_insert(self, node, index);
+	if (self->ntinitialized)
+		nt_insert(&self->nt, node, (int)len);
 
 	Py_CLEAR(self->headrevs);
 	Py_RETURN_NONE;
 }
 
-static void _index_clearcaches(indexObject *self)
-{
-	if (self->cache) {
-		Py_ssize_t i;
-
-		for (i = 0; i < self->raw_length; i++)
-			Py_CLEAR(self->cache[i]);
-		free(self->cache);
-		self->cache = NULL;
-	}
-	if (self->offsets) {
-		PyMem_Free(self->offsets);
-		self->offsets = NULL;
-	}
-	free(self->nt);
-	self->nt = NULL;
-	Py_CLEAR(self->headrevs);
-}
-
-static PyObject *index_clearcaches(indexObject *self)
-{
-	_index_clearcaches(self);
-	self->ntlength = self->ntcapacity = 0;
-	self->ntdepth = self->ntsplits = 0;
-	self->ntrev = -1;
-	self->ntlookups = self->ntmisses = 0;
-	Py_RETURN_NONE;
-}
-
 static PyObject *index_stats(indexObject *self)
 {
 	PyObject *obj = PyDict_New();
@@ -376,16 +373,18 @@
 		Py_DECREF(t);
 	}
 
-	if (self->raw_length != self->length - 1)
+	if (self->raw_length != self->length)
 		istat(raw_length, "revs on disk");
 	istat(length, "revs in memory");
-	istat(ntcapacity, "node trie capacity");
-	istat(ntdepth, "node trie depth");
-	istat(ntlength, "node trie count");
 	istat(ntlookups, "node trie lookups");
 	istat(ntmisses, "node trie misses");
 	istat(ntrev, "node trie last rev scanned");
-	istat(ntsplits, "node trie splits");
+	if (self->ntinitialized) {
+		istat(nt.capacity, "node trie capacity");
+		istat(nt.depth, "node trie depth");
+		istat(nt.length, "node trie count");
+		istat(nt.splits, "node trie splits");
+	}
 
 #undef istat
 
@@ -451,7 +450,7 @@
 {
 	PyObject *iter = NULL;
 	PyObject *iter_item = NULL;
-	Py_ssize_t min_idx = index_length(self) + 1;
+	Py_ssize_t min_idx = index_length(self) + 2;
 	long iter_item_long;
 
 	if (PyList_GET_SIZE(list) != 0) {
@@ -463,7 +462,7 @@
 			Py_DECREF(iter_item);
 			if (iter_item_long < min_idx)
 				min_idx = iter_item_long;
-			phases[iter_item_long] = marker;
+			phases[iter_item_long] = (char)marker;
 		}
 		Py_DECREF(iter);
 	}
@@ -493,7 +492,7 @@
 	PyObject *reachable = NULL;
 
 	PyObject *val;
-	Py_ssize_t len = index_length(self) - 1;
+	Py_ssize_t len = index_length(self);
 	long revnum;
 	Py_ssize_t k;
 	Py_ssize_t i;
@@ -615,7 +614,7 @@
 			      revstates[parents[1] + 1]) & RS_REACHABLE)
 			    && !(revstates[i + 1] & RS_REACHABLE)) {
 				revstates[i + 1] |= RS_REACHABLE;
-				val = PyInt_FromLong(i);
+				val = PyInt_FromSsize_t(i);
 				if (val == NULL)
 					goto bail;
 				r = PyList_Append(reachable, val);
@@ -645,7 +644,7 @@
 	PyObject *phaseset = NULL;
 	PyObject *phasessetlist = NULL;
 	PyObject *rev = NULL;
-	Py_ssize_t len = index_length(self) - 1;
+	Py_ssize_t len = index_length(self);
 	Py_ssize_t numphase = 0;
 	Py_ssize_t minrevallphases = 0;
 	Py_ssize_t minrevphase = 0;
@@ -702,7 +701,7 @@
 		}
 	}
 	/* Transform phase list to a python list */
-	phasessize = PyInt_FromLong(len);
+	phasessize = PyInt_FromSsize_t(len);
 	if (phasessize == NULL)
 		goto release;
 	for (i = 0; i < len; i++) {
@@ -711,7 +710,7 @@
 		 * is computed as a difference */
 		if (phase != 0) {
 			phaseset = PyList_GET_ITEM(phasessetlist, phase);
-			rev = PyInt_FromLong(i);
+			rev = PyInt_FromSsize_t(i);
 			if (rev == NULL)
 				goto release;
 			PySet_Add(phaseset, rev);
@@ -756,7 +755,7 @@
 		}
 	}
 
-	len = index_length(self) - 1;
+	len = index_length(self);
 	heads = PyList_New(0);
 	if (heads == NULL)
 		goto bail;
@@ -838,9 +837,8 @@
 {
 	const char *data;
 
-	if (rev >= self->length - 1) {
-		PyObject *tuple = PyList_GET_ITEM(self->added,
-			rev - self->length + 1);
+	if (rev >= self->length) {
+		PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
 		return (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3));
 	}
 	else {
@@ -881,7 +879,7 @@
 		return NULL;
 	}
 
-	if (rev < 0 || rev >= length - 1) {
+	if (rev < 0 || rev >= length) {
 		PyErr_SetString(PyExc_ValueError, "revlog index out of range");
 		return NULL;
 	}
@@ -924,7 +922,7 @@
 			break;
 		}
 
-		if (iterrev >= length - 1) {
+		if (iterrev >= length) {
 			PyErr_SetString(PyExc_IndexError, "revision outside index");
 			return NULL;
 		}
@@ -984,7 +982,7 @@
  *   -2: not found
  * rest: valid rev
  */
-static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen,
+static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
 		   int hex)
 {
 	int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
@@ -993,9 +991,6 @@
 	if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0)
 		return -1;
 
-	if (self->nt == NULL)
-		return -2;
-
 	if (hex)
 		maxlevel = nodelen > 40 ? 40 : (int)nodelen;
 	else
@@ -1003,15 +998,15 @@
 
 	for (level = off = 0; level < maxlevel; level++) {
 		int k = getnybble(node, level);
-		nodetree *n = &self->nt[off];
+		nodetreenode *n = &self->nodes[off];
 		int v = n->children[k];
 
 		if (v < 0) {
 			const char *n;
 			Py_ssize_t i;
 
-			v = -(v + 1);
-			n = index_node(self, v);
+			v = -(v + 2);
+			n = index_node(self->index, v);
 			if (n == NULL)
 				return -2;
 			for (i = level; i < maxlevel; i++)
@@ -1027,65 +1022,67 @@
 	return -4;
 }
 
-static int nt_new(indexObject *self)
+static int nt_new(nodetree *self)
 {
-	if (self->ntlength == self->ntcapacity) {
-		if (self->ntcapacity >= INT_MAX / (sizeof(nodetree) * 2)) {
-			PyErr_SetString(PyExc_MemoryError,
-					"overflow in nt_new");
+	if (self->length == self->capacity) {
+		unsigned newcapacity;
+		nodetreenode *newnodes;
+		newcapacity = self->capacity * 2;
+		if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
+			PyErr_SetString(PyExc_MemoryError, "overflow in nt_new");
 			return -1;
 		}
-		self->ntcapacity *= 2;
-		self->nt = realloc(self->nt,
-				   self->ntcapacity * sizeof(nodetree));
-		if (self->nt == NULL) {
+		newnodes = realloc(self->nodes, newcapacity * sizeof(nodetreenode));
+		if (newnodes == NULL) {
 			PyErr_SetString(PyExc_MemoryError, "out of memory");
 			return -1;
 		}
-		memset(&self->nt[self->ntlength], 0,
-		       sizeof(nodetree) * (self->ntcapacity - self->ntlength));
+		self->capacity = newcapacity;
+		self->nodes = newnodes;
+		memset(&self->nodes[self->length], 0,
+		       sizeof(nodetreenode) * (self->capacity - self->length));
 	}
-	return self->ntlength++;
+	return self->length++;
 }
 
-static int nt_insert(indexObject *self, const char *node, int rev)
+static int nt_insert(nodetree *self, const char *node, int rev)
 {
 	int level = 0;
 	int off = 0;
 
 	while (level < 40) {
 		int k = nt_level(node, level);
-		nodetree *n;
+		nodetreenode *n;
 		int v;
 
-		n = &self->nt[off];
+		n = &self->nodes[off];
 		v = n->children[k];
 
 		if (v == 0) {
-			n->children[k] = -rev - 1;
+			n->children[k] = -rev - 2;
 			return 0;
 		}
 		if (v < 0) {
-			const char *oldnode = index_node_existing(self, -(v + 1));
+			const char *oldnode = index_node_existing(self->index, -(v + 2));
 			int noff;
 
 			if (oldnode == NULL)
 				return -1;
 			if (!memcmp(oldnode, node, 20)) {
-				n->children[k] = -rev - 1;
+				n->children[k] = -rev - 2;
 				return 0;
 			}
 			noff = nt_new(self);
 			if (noff == -1)
 				return -1;
-			/* self->nt may have been changed by realloc */
-			self->nt[off].children[k] = noff;
+			/* self->nodes may have been changed by realloc */
+			self->nodes[off].children[k] = noff;
 			off = noff;
-			n = &self->nt[off];
+			n = &self->nodes[off];
 			n->children[nt_level(oldnode, ++level)] = v;
-			if (level > self->ntdepth)
-				self->ntdepth = level;
-			self->ntsplits += 1;
+			if (level > self->depth)
+				self->depth = level;
+			self->splits += 1;
 		} else {
 			level += 1;
 			off = v;
@@ -1095,167 +1092,69 @@
 	return -1;
 }
 
-static int nt_init(indexObject *self)
+static PyObject *ntobj_insert(nodetreeObject *self, PyObject *args)
 {
-	if (self->nt == NULL) {
-		if ((size_t)self->raw_length > INT_MAX / sizeof(nodetree)) {
-			PyErr_SetString(PyExc_ValueError, "overflow in nt_init");
-			return -1;
-		}
-		self->ntcapacity = self->raw_length < 4
-			? 4 : (int)self->raw_length / 2;
+	Py_ssize_t rev;
+	const char *node;
+	Py_ssize_t length;
+	if (!PyArg_ParseTuple(args, "n", &rev))
+		return NULL;
+	length = index_length(self->nt.index);
+	if (rev < 0 || rev >= length) {
+		PyErr_SetString(PyExc_ValueError, "revlog index out of range");
+		return NULL;
+	}
+	node = index_node_existing(self->nt.index, rev);
+	if (nt_insert(&self->nt, node, (int)rev) == -1)
+		return NULL;
+	Py_RETURN_NONE;
+}
 
-		self->nt = calloc(self->ntcapacity, sizeof(nodetree));
-		if (self->nt == NULL) {
-			PyErr_NoMemory();
-			return -1;
-		}
-		self->ntlength = 1;
-		self->ntrev = (int)index_length(self) - 1;
-		self->ntlookups = 1;
-		self->ntmisses = 0;
-		if (nt_insert(self, nullid, INT_MAX) == -1)
-			return -1;
+static int nt_delete_node(nodetree *self, const char *node)
+{
+	/* rev==-2 happens to get encoded as 0, which is interpreted as not set */
+	return nt_insert(self, node, -2);
+}
+
+static int nt_init(nodetree *self, indexObject *index, unsigned capacity)
+{
+	/* Initialize before overflow-checking to avoid nt_dealloc() crash. */
+	self->nodes = NULL;
+
+	self->index = index;
+	/* The input capacity is in terms of revisions, while the field is in
+	 * terms of nodetree nodes. */
+	self->capacity = (capacity < 4 ? 4 : capacity / 2);
+	self->depth = 0;
+	self->splits = 0;
+	if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) {
+		PyErr_SetString(PyExc_ValueError, "overflow in init_nt");
+		return -1;
 	}
+	self->nodes = calloc(self->capacity, sizeof(nodetreenode));
+	if (self->nodes == NULL) {
+		PyErr_NoMemory();
+		return -1;
+	}
+	self->length = 1;
 	return 0;
 }
 
-/*
- * Return values:
- *
- *   -3: error (exception set)
- *   -2: not found (no exception set)
- * rest: valid rev
- */
-static int index_find_node(indexObject *self,
-			   const char *node, Py_ssize_t nodelen)
-{
-	int rev;
-
-	self->ntlookups++;
-	rev = nt_find(self, node, nodelen, 0);
-	if (rev >= -1)
-		return rev;
-
-	if (nt_init(self) == -1)
-		return -3;
+static PyTypeObject indexType;
 
-	/*
-	 * For the first handful of lookups, we scan the entire index,
-	 * and cache only the matching nodes. This optimizes for cases
-	 * like "hg tip", where only a few nodes are accessed.
-	 *
-	 * After that, we cache every node we visit, using a single
-	 * scan amortized over multiple lookups.  This gives the best
-	 * bulk performance, e.g. for "hg log".
-	 */
-	if (self->ntmisses++ < 4) {
-		for (rev = self->ntrev - 1; rev >= 0; rev--) {
-			const char *n = index_node_existing(self, rev);
-			if (n == NULL)
-				return -3;
-			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
-				if (nt_insert(self, n, rev) == -1)
-					return -3;
-				break;
-			}
-		}
-	} else {
-		for (rev = self->ntrev - 1; rev >= 0; rev--) {
-			const char *n = index_node_existing(self, rev);
-			if (n == NULL)
-				return -3;
-			if (nt_insert(self, n, rev) == -1) {
-				self->ntrev = rev + 1;
-				return -3;
-			}
-			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
-				break;
-			}
-		}
-		self->ntrev = rev;
-	}
-
-	if (rev >= 0)
-		return rev;
-	return -2;
+static int ntobj_init(nodetreeObject *self, PyObject *args)
+{
+	PyObject *index;
+	unsigned capacity;
+	if (!PyArg_ParseTuple(args, "O!I", &indexType, &index, &capacity))
+		return -1;
+	Py_INCREF(index);
+	return nt_init(&self->nt, (indexObject*)index, capacity);
 }
 
-static void raise_revlog_error(void)
-{
-	PyObject *mod = NULL, *dict = NULL, *errclass = NULL;
-
-	mod = PyImport_ImportModule("mercurial.error");
-	if (mod == NULL) {
-		goto cleanup;
-	}
-
-	dict = PyModule_GetDict(mod);
-	if (dict == NULL) {
-		goto cleanup;
-	}
-	Py_INCREF(dict);
-
-	errclass = PyDict_GetItemString(dict, "RevlogError");
-	if (errclass == NULL) {
-		PyErr_SetString(PyExc_SystemError,
-				"could not find RevlogError");
-		goto cleanup;
-	}
-
-	/* value of exception is ignored by callers */
-	PyErr_SetString(errclass, "RevlogError");
-
-cleanup:
-	Py_XDECREF(dict);
-	Py_XDECREF(mod);
-}
-
-static PyObject *index_getitem(indexObject *self, PyObject *value)
-{
-	char *node;
-	Py_ssize_t nodelen;
-	int rev;
-
-	if (PyInt_Check(value))
-		return index_get(self, PyInt_AS_LONG(value));
-
-	if (node_check(value, &node, &nodelen) == -1)
-		return NULL;
-	rev = index_find_node(self, node, nodelen);
-	if (rev >= -1)
-		return PyInt_FromLong(rev);
-	if (rev == -2)
-		raise_revlog_error();
-	return NULL;
-}
-
-/*
- * Fully populate the radix tree.
- */
-static int nt_populate(indexObject *self) {
-	int rev;
-	if (self->ntrev > 0) {
-		for (rev = self->ntrev - 1; rev >= 0; rev--) {
-			const char *n = index_node_existing(self, rev);
-			if (n == NULL)
-				return -1;
-			if (nt_insert(self, n, rev) == -1)
-				return -1;
-		}
-		self->ntrev = -1;
-	}
-	return 0;
-}
-
-static int nt_partialmatch(indexObject *self, const char *node,
+static int nt_partialmatch(nodetree *self, const char *node,
 			   Py_ssize_t nodelen)
 {
-	if (nt_init(self) == -1)
-		return -3;
-	if (nt_populate(self) == -1)
-		return -3;
-
 	return nt_find(self, node, nodelen, 1);
 }
 
@@ -1268,24 +1167,19 @@
  *   -2: not found (no exception set)
  * rest: length of shortest prefix
  */
-static int nt_shortest(indexObject *self, const char *node)
+static int nt_shortest(nodetree *self, const char *node)
 {
 	int level, off;
 
-	if (nt_init(self) == -1)
-		return -3;
-	if (nt_populate(self) == -1)
-		return -3;
-
 	for (level = off = 0; level < 40; level++) {
 		int k, v;
-		nodetree *n = &self->nt[off];
+		nodetreenode *n = &self->nodes[off];
 		k = nt_level(node, level);
 		v = n->children[k];
 		if (v < 0) {
 			const char *n;
-			v = -(v + 1);
-			n = index_node_existing(self, v);
+			v = -(v + 2);
+			n = index_node_existing(self->index, v);
 			if (n == NULL)
 				return -3;
 			if (memcmp(node, n, 20) != 0)
@@ -1310,6 +1204,204 @@
 	return -3;
 }
 
+static PyObject *ntobj_shortest(nodetreeObject *self, PyObject *args)
+{
+	PyObject *val;
+	char *node;
+	int length;
+
+	if (!PyArg_ParseTuple(args, "O", &val))
+		return NULL;
+	if (node_check(val, &node) == -1)
+		return NULL;
+
+	length = nt_shortest(&self->nt, node);
+	if (length == -3)
+		return NULL;
+	if (length == -2) {
+		raise_revlog_error();
+		return NULL;
+	}
+	return PyInt_FromLong(length);
+}
+
+static void nt_dealloc(nodetree *self)
+{
+	free(self->nodes);
+	self->nodes = NULL;
+}
+
+static void ntobj_dealloc(nodetreeObject *self)
+{
+	Py_XDECREF(self->nt.index);
+	nt_dealloc(&self->nt);
+	PyObject_Del(self);
+}
+
+static PyMethodDef ntobj_methods[] = {
+	{"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
+	 "insert an index entry"},
+	{"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
+	 "find length of shortest hex nodeid of a binary ID"},
+	{NULL} /* Sentinel */
+};
+
+static PyTypeObject nodetreeType = {
+	PyVarObject_HEAD_INIT(NULL, 0) /* header */
+	"parsers.nodetree",        /* tp_name */
+	sizeof(nodetreeObject) ,   /* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)ntobj_dealloc, /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	0,                         /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	0,                         /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	"nodetree",                /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	0,                         /* tp_iter */
+	0,                         /* tp_iternext */
+	ntobj_methods,             /* tp_methods */
+	0,                         /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	(initproc)ntobj_init,      /* tp_init */
+	0,                         /* tp_alloc */
+};
+
+static int index_init_nt(indexObject *self)
+{
+	if (!self->ntinitialized) {
+		if (nt_init(&self->nt, self, (int)self->raw_length) == -1) {
+			nt_dealloc(&self->nt);
+			return -1;
+		}
+		if (nt_insert(&self->nt, nullid, -1) == -1) {
+			nt_dealloc(&self->nt);
+			return -1;
+		}
+		self->ntinitialized = 1;
+		self->ntrev = (int)index_length(self);
+		self->ntlookups = 1;
+		self->ntmisses = 0;
+	}
+	return 0;
+}
+
+/*
+ * Return values:
+ *
+ *   -3: error (exception set)
+ *   -2: not found (no exception set)
+ * rest: valid rev
+ */
+static int index_find_node(indexObject *self,
+			   const char *node, Py_ssize_t nodelen)
+{
+	int rev;
+
+	if (index_init_nt(self) == -1)
+		return -3;
+
+	self->ntlookups++;
+	rev = nt_find(&self->nt, node, nodelen, 0);
+	if (rev >= -1)
+		return rev;
+
+	/*
+	 * For the first handful of lookups, we scan the entire index,
+	 * and cache only the matching nodes. This optimizes for cases
+	 * like "hg tip", where only a few nodes are accessed.
+	 *
+	 * After that, we cache every node we visit, using a single
+	 * scan amortized over multiple lookups.  This gives the best
+	 * bulk performance, e.g. for "hg log".
+	 */
+	if (self->ntmisses++ < 4) {
+		for (rev = self->ntrev - 1; rev >= 0; rev--) {
+			const char *n = index_node_existing(self, rev);
+			if (n == NULL)
+				return -3;
+			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
+				if (nt_insert(&self->nt, n, rev) == -1)
+					return -3;
+				break;
+			}
+		}
+	} else {
+		for (rev = self->ntrev - 1; rev >= 0; rev--) {
+			const char *n = index_node_existing(self, rev);
+			if (n == NULL)
+				return -3;
+			if (nt_insert(&self->nt, n, rev) == -1) {
+				self->ntrev = rev + 1;
+				return -3;
+			}
+			if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) {
+				break;
+			}
+		}
+		self->ntrev = rev;
+	}
+
+	if (rev >= 0)
+		return rev;
+	return -2;
+}
+
+static PyObject *index_getitem(indexObject *self, PyObject *value)
+{
+	char *node;
+	int rev;
+
+	if (PyInt_Check(value))
+		return index_get(self, PyInt_AS_LONG(value));
+
+	if (node_check(value, &node) == -1)
+		return NULL;
+	rev = index_find_node(self, node, 20);
+	if (rev >= -1)
+		return PyInt_FromLong(rev);
+	if (rev == -2)
+		raise_revlog_error();
+	return NULL;
+}
+
+/*
+ * Fully populate the radix tree.
+ */
+static int index_populate_nt(indexObject *self) {
+	int rev;
+	if (self->ntrev > 0) {
+		for (rev = self->ntrev - 1; rev >= 0; rev--) {
+			const char *n = index_node_existing(self, rev);
+			if (n == NULL)
+				return -1;
+			if (nt_insert(&self->nt, n, rev) == -1)
+				return -1;
+		}
+		self->ntrev = -1;
+	}
+	return 0;
+}
+
 static PyObject *index_partialmatch(indexObject *self, PyObject *args)
 {
 	const char *fullnode;
@@ -1338,12 +1430,15 @@
 		Py_RETURN_NONE;
 	}
 
-	rev = nt_partialmatch(self, node, nodelen);
+	if (index_init_nt(self) == -1)
+		return NULL;
+	if (index_populate_nt(self) == -1)
+		return NULL;
+	rev = nt_partialmatch(&self->nt, node, nodelen);
 
 	switch (rev) {
 	case -4:
 		raise_revlog_error();
-	case -3:
 		return NULL;
 	case -2:
 		Py_RETURN_NONE;
@@ -1360,18 +1455,21 @@
 
 static PyObject *index_shortest(indexObject *self, PyObject *args)
 {
-	Py_ssize_t nodelen;
 	PyObject *val;
 	char *node;
 	int length;
 
 	if (!PyArg_ParseTuple(args, "O", &val))
 		return NULL;
-	if (node_check(val, &node, &nodelen) == -1)
+	if (node_check(val, &node) == -1)
 		return NULL;
 
 	self->ntlookups++;
-	length = nt_shortest(self, node);
+	if (index_init_nt(self) == -1)
+		return NULL;
+	if (index_populate_nt(self) == -1)
+		return NULL;
+	length = nt_shortest(&self->nt, node);
 	if (length == -3)
 		return NULL;
 	if (length == -2) {
@@ -1383,16 +1481,15 @@
 
 static PyObject *index_m_get(indexObject *self, PyObject *args)
 {
-	Py_ssize_t nodelen;
 	PyObject *val;
 	char *node;
 	int rev;
 
 	if (!PyArg_ParseTuple(args, "O", &val))
 		return NULL;
-	if (node_check(val, &node, &nodelen) == -1)
+	if (node_check(val, &node) == -1)
 		return NULL;
-	rev = index_find_node(self, node, nodelen);
+	rev = index_find_node(self, node, 20);
 	if (rev == -3)
 		return NULL;
 	if (rev == -2)
@@ -1403,17 +1500,16 @@
 static int index_contains(indexObject *self, PyObject *value)
 {
 	char *node;
-	Py_ssize_t nodelen;
 
 	if (PyInt_Check(value)) {
 		long rev = PyInt_AS_LONG(value);
 		return rev >= -1 && rev < index_length(self);
 	}
 
-	if (node_check(value, &node, &nodelen) == -1)
+	if (node_check(value, &node) == -1)
 		return -1;
 
-	switch (index_find_node(self, node, nodelen)) {
+	switch (index_find_node(self, node, 20)) {
 	case -3:
 		return -1;
 	case -2:
@@ -1554,7 +1650,7 @@
 		goto bail;
 	}
 
-	interesting = calloc(sizeof(*interesting), 1 << revcount);
+	interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount);
 	if (interesting == NULL) {
 		PyErr_NoMemory();
 		goto bail;
@@ -1687,7 +1783,7 @@
 	revs = PyMem_Malloc(argcount * sizeof(*revs));
 	if (argcount > 0 && revs == NULL)
 		return PyErr_NoMemory();
-	len = index_length(self) - 1;
+	len = index_length(self);
 
 	for (i = 0; i < argcount; i++) {
 		static const int capacity = 24;
@@ -1787,7 +1883,7 @@
 /*
  * Invalidate any trie entries introduced by added revs.
  */
-static void nt_invalidate_added(indexObject *self, Py_ssize_t start)
+static void index_invalidate_added(indexObject *self, Py_ssize_t start)
 {
 	Py_ssize_t i, len = PyList_GET_SIZE(self->added);
 
@@ -1795,7 +1891,7 @@
 		PyObject *tuple = PyList_GET_ITEM(self->added, i);
 		PyObject *node = PyTuple_GET_ITEM(tuple, 7);
 
-		nt_insert(self, PyBytes_AS_STRING(node), -1);
+		nt_delete_node(&self->nt, PyBytes_AS_STRING(node));
 	}
 
 	if (start == 0)
@@ -1809,16 +1905,17 @@
 static int index_slice_del(indexObject *self, PyObject *item)
 {
 	Py_ssize_t start, stop, step, slicelength;
-	Py_ssize_t length = index_length(self);
+	Py_ssize_t length = index_length(self) + 1;
 	int ret = 0;
 
 /* Argument changed from PySliceObject* to PyObject* in Python 3. */
 #ifdef IS_PY3K
 	if (PySlice_GetIndicesEx(item, length,
+				 &start, &stop, &step, &slicelength) < 0)
 #else
 	if (PySlice_GetIndicesEx((PySliceObject*)item, length,
+				 &start, &stop, &step, &slicelength) < 0)
 #endif
-				 &start, &stop, &step, &slicelength) < 0)
 		return -1;
 
 	if (slicelength <= 0)
@@ -1845,23 +1942,23 @@
 		return -1;
 	}
 
-	if (start < self->length - 1) {
-		if (self->nt) {
+	if (start < self->length) {
+		if (self->ntinitialized) {
 			Py_ssize_t i;
 
-			for (i = start + 1; i < self->length - 1; i++) {
+			for (i = start + 1; i < self->length; i++) {
 				const char *node = index_node_existing(self, i);
 				if (node == NULL)
 					return -1;
 
-				nt_insert(self, node, -1);
+				nt_delete_node(&self->nt, node);
 			}
 			if (self->added)
-				nt_invalidate_added(self, 0);
+				index_invalidate_added(self, 0);
 			if (self->ntrev > start)
 				self->ntrev = (int)start;
 		}
-		self->length = start + 1;
+		self->length = start;
 		if (start < self->raw_length) {
 			if (self->cache) {
 				Py_ssize_t i;
@@ -1873,13 +1970,13 @@
 		goto done;
 	}
 
-	if (self->nt) {
-		nt_invalidate_added(self, start - self->length + 1);
+	if (self->ntinitialized) {
+		index_invalidate_added(self, start - self->length);
 		if (self->ntrev > start)
 			self->ntrev = (int)start;
 	}
 	if (self->added)
-		ret = PyList_SetSlice(self->added, start - self->length + 1,
+		ret = PyList_SetSlice(self->added, start - self->length,
 				      PyList_GET_SIZE(self->added), NULL);
 done:
 	Py_CLEAR(self->headrevs);
@@ -1897,17 +1994,16 @@
 				  PyObject *value)
 {
 	char *node;
-	Py_ssize_t nodelen;
 	long rev;
 
 	if (PySlice_Check(item) && value == NULL)
 		return index_slice_del(self, item);
 
-	if (node_check(item, &node, &nodelen) == -1)
+	if (node_check(item, &node) == -1)
 		return -1;
 
 	if (value == NULL)
-		return self->nt ? nt_insert(self, node, -1) : 0;
+		return self->ntinitialized ? nt_delete_node(&self->nt, node) : 0;
 	rev = PyInt_AsLong(value);
 	if (rev > INT_MAX || rev < 0) {
 		if (!PyErr_Occurred())
@@ -1915,9 +2011,9 @@
 		return -1;
 	}
 
-	if (nt_init(self) == -1)
+	if (index_init_nt(self) == -1)
 		return -1;
-	return nt_insert(self, node, (int)rev);
+	return nt_insert(&self->nt, node, (int)rev);
 }
 
 /*
@@ -1966,7 +2062,7 @@
 	self->headrevs = NULL;
 	self->filteredrevs = Py_None;
 	Py_INCREF(Py_None);
-	self->nt = NULL;
+	self->ntinitialized = 0;
 	self->offsets = NULL;
 
 	if (!PyArg_ParseTuple(args, "OO", &data_obj, &inlined_obj))
@@ -1984,8 +2080,6 @@
 	self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
 	self->data = data_obj;
 
-	self->ntlength = self->ntcapacity = 0;
-	self->ntdepth = self->ntsplits = 0;
 	self->ntlookups = self->ntmisses = 0;
 	self->ntrev = -1;
 	Py_INCREF(self->data);
@@ -1995,14 +2089,14 @@
 		if (len == -1)
 			goto bail;
 		self->raw_length = len;
-		self->length = len + 1;
+		self->length = len;
 	} else {
 		if (size % v1_hdrsize) {
 			PyErr_SetString(PyExc_ValueError, "corrupt index file");
 			goto bail;
 		}
 		self->raw_length = size / v1_hdrsize;
-		self->length = self->raw_length + 1;
+		self->length = self->raw_length;
 	}
 
 	return 0;
@@ -2016,6 +2110,35 @@
 	return (PyObject *)self;
 }
 
+static void _index_clearcaches(indexObject *self)
+{
+	if (self->cache) {
+		Py_ssize_t i;
+
+		for (i = 0; i < self->raw_length; i++)
+			Py_CLEAR(self->cache[i]);
+		free(self->cache);
+		self->cache = NULL;
+	}
+	if (self->offsets) {
+		PyMem_Free((void *)self->offsets);
+		self->offsets = NULL;
+	}
+	if (self->ntinitialized) {
+		nt_dealloc(&self->nt);
+	}
+	self->ntinitialized = 0;
+	Py_CLEAR(self->headrevs);
+}
+
+static PyObject *index_clearcaches(indexObject *self)
+{
+	_index_clearcaches(self);
+	self->ntrev = -1;
+	self->ntlookups = self->ntmisses = 0;
+	Py_RETURN_NONE;
+}
+
 static void index_dealloc(indexObject *self)
 {
 	_index_clearcaches(self);
@@ -2066,8 +2189,8 @@
 	 "get filtered head revisions"}, /* Can always do filtering */
 	{"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
 	 "determine revisions with deltas to reconstruct fulltext"},
-	{"insert", (PyCFunction)index_insert, METH_VARARGS,
-	 "insert an index entry"},
+	{"append", (PyCFunction)index_append, METH_O,
+	 "append an index entry"},
 	{"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
 	 "match a potentially ambiguous node ID"},
 	{"shortest", (PyCFunction)index_shortest, METH_VARARGS,
@@ -2167,6 +2290,171 @@
 	return NULL;
 }
 
+#ifdef WITH_RUST
+
+/* rustlazyancestors: iteration over ancestors implemented in Rust
+ *
+ * This class holds a reference to an index and to the Rust iterator.
+ */
+typedef struct rustlazyancestorsObjectStruct rustlazyancestorsObject;
+
+struct rustlazyancestorsObjectStruct {
+	PyObject_HEAD
+	/* Type-specific fields go here. */
+	indexObject *index;    /* Ref kept to avoid GC'ing the index */
+	void *iter;        /* Rust iterator */
+};
+
+/* FFI exposed from Rust code */
+rustlazyancestorsObject *rustlazyancestors_init(
+	indexObject *index,
+	/* to pass index_get_parents() */
+	int (*)(indexObject *, Py_ssize_t, int*, int),
+	/* intrevs vector */
+	int initrevslen, long *initrevs,
+	long stoprev,
+	int inclusive);
+void rustlazyancestors_drop(rustlazyancestorsObject *self);
+int rustlazyancestors_next(rustlazyancestorsObject *self);
+int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
+
+/* CPython instance methods */
+static int rustla_init(rustlazyancestorsObject *self,
+                       PyObject *args) {
+	PyObject *initrevsarg = NULL;
+	PyObject *inclusivearg = NULL;
+	long stoprev = 0;
+	long *initrevs = NULL;
+	int inclusive = 0;
+	Py_ssize_t i;
+
+	indexObject *index;
+	if (!PyArg_ParseTuple(args, "O!O!lO!",
+			      &indexType, &index,
+                              &PyList_Type, &initrevsarg,
+                              &stoprev,
+                              &PyBool_Type, &inclusivearg))
+	return -1;
+
+	Py_INCREF(index);
+	self->index = index;
+
+	if (inclusivearg == Py_True)
+		inclusive = 1;
+
+	Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
+
+	initrevs = (long*)calloc(linit, sizeof(long));
+
+	if (initrevs == NULL) {
+		PyErr_NoMemory();
+		goto bail;
+	}
+
+	for (i=0; i<linit; i++) {
+		initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
+	}
+	if (PyErr_Occurred())
+		goto bail;
+
+	self->iter = rustlazyancestors_init(index,
+		                            index_get_parents,
+		                            linit, initrevs,
+		                            stoprev, inclusive);
+	if (self->iter == NULL) {
+		/* if this is because of GraphError::ParentOutOfRange
+		 * index_get_parents() has already set the proper ValueError */
+		goto bail;
+	}
+
+	free(initrevs);
+	return 0;
+
+bail:
+	free(initrevs);
+	return -1;
+};
+
+static void rustla_dealloc(rustlazyancestorsObject *self)
+{
+	Py_XDECREF(self->index);
+	if (self->iter != NULL) { /* can happen if rustla_init failed */
+		rustlazyancestors_drop(self->iter);
+	}
+	PyObject_Del(self);
+}
+
+static PyObject *rustla_next(rustlazyancestorsObject *self) {
+	int res = rustlazyancestors_next(self->iter);
+	if (res == -1) {
+		/* Setting an explicit exception seems unnecessary
+		 * as examples from Python source code (Objects/rangeobjets.c and
+		 * Modules/_io/stringio.c) seem to demonstrate.
+		 */
+		return NULL;
+	}
+	return PyInt_FromLong(res);
+}
+
+static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev) {
+	if (!(PyInt_Check(rev))) {
+		return 0;
+	}
+	return rustlazyancestors_contains(self->iter, PyInt_AS_LONG(rev));
+}
+
+static PySequenceMethods rustla_sequence_methods = {
+	0,                       /* sq_length */
+	0,                       /* sq_concat */
+	0,                       /* sq_repeat */
+	0,                       /* sq_item */
+	0,                       /* sq_slice */
+	0,                       /* sq_ass_item */
+	0,                       /* sq_ass_slice */
+	(objobjproc)rustla_contains, /* sq_contains */
+};
+
+static PyTypeObject rustlazyancestorsType = {
+	PyVarObject_HEAD_INIT(NULL, 0) /* header */
+	"parsers.rustlazyancestors",           /* tp_name */
+	sizeof(rustlazyancestorsObject),       /* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)rustla_dealloc, /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	&rustla_sequence_methods,  /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	0,                         /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	"Iterator over ancestors, implemented in Rust", /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	0,                         /* tp_iter */
+	(iternextfunc)rustla_next, /* tp_iternext */
+	0,                         /* tp_methods */
+	0,                         /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	(initproc)rustla_init,     /* tp_init */
+	0,                         /* tp_alloc */
+};
+#endif /* WITH_RUST */
+
 void revlog_module_init(PyObject *mod)
 {
 	indexType.tp_new = PyType_GenericNew;
@@ -2175,8 +2463,26 @@
 	Py_INCREF(&indexType);
 	PyModule_AddObject(mod, "index", (PyObject *)&indexType);
 
-	nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
-				  -1, -1, -1, -1, nullid, 20);
+	nodetreeType.tp_new = PyType_GenericNew;
+	if (PyType_Ready(&nodetreeType) < 0)
+		return;
+	Py_INCREF(&nodetreeType);
+	PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
+
+	if (!nullentry) {
+		nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
+					  -1, -1, -1, -1, nullid, 20);
+	}
 	if (nullentry)
 		PyObject_GC_UnTrack(nullentry);
+
+#ifdef WITH_RUST
+	rustlazyancestorsType.tp_new = PyType_GenericNew;
+	if (PyType_Ready(&rustlazyancestorsType) < 0)
+		return;
+	Py_INCREF(&rustlazyancestorsType);
+	PyModule_AddObject(mod, "rustlazyancestors",
+		(PyObject *)&rustlazyancestorsType);
+#endif
+
 }
--- a/mercurial/changegroup.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/changegroup.py	Mon Oct 22 14:46:06 2018 -0400
@@ -14,33 +14,27 @@
 from .i18n import _
 from .node import (
     hex,
+    nullid,
     nullrev,
     short,
 )
 
 from . import (
-    dagutil,
     error,
+    match as matchmod,
     mdiff,
     phases,
     pycompat,
+    repository,
     util,
 )
 
-from .utils import (
-    stringutil,
-)
-
-_CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
-_CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
-_CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
+_CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
+_CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
+_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
 
 LFS_REQUIREMENT = 'lfs'
 
-# When narrowing is finalized and no longer subject to format changes,
-# we should move this to just "narrow" or similar.
-NARROW_REQUIREMENT = 'narrowhg-experimental'
-
 readexactly = util.readexactly
 
 def getchunk(stream):
@@ -61,6 +55,10 @@
     """return a changegroup chunk header (string) for a zero-length chunk"""
     return struct.pack(">l", 0)
 
+def _fileheader(path):
+    """Obtain a changegroup chunk header for a named path."""
+    return chunkheader(len(path)) + path
+
 def writechunks(ui, chunks, filename, vfs=None):
     """Write chunks to a file and return its filename.
 
@@ -114,7 +112,7 @@
     bundlerepo and some debug commands - their use is discouraged.
     """
     deltaheader = _CHANGEGROUPV1_DELTA_HEADER
-    deltaheadersize = struct.calcsize(deltaheader)
+    deltaheadersize = deltaheader.size
     version = '01'
     _grouplistcount = 1 # One list of files after the manifests
 
@@ -187,7 +185,7 @@
         if not l:
             return {}
         headerdata = readexactly(self._stream, self.deltaheadersize)
-        header = struct.unpack(self.deltaheader, headerdata)
+        header = self.deltaheader.unpack(headerdata)
         delta = readexactly(self._stream, l - self.deltaheadersize)
         node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
         return (node, p1, p2, cs, deltabase, delta, flags)
@@ -245,7 +243,7 @@
         # be empty during the pull
         self.manifestheader()
         deltas = self.deltaiter()
-        repo.manifestlog.addgroup(deltas, revmap, trp)
+        repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
         prog.complete()
         self.callback = None
 
@@ -305,7 +303,7 @@
             efiles = len(efiles)
 
             if not cgnodes:
-                repo.ui.develwarn('applied empty changegroup',
+                repo.ui.develwarn('applied empty changelog from changegroup',
                                   config='warn-empty-changegroup')
             clend = len(cl)
             changesets = clend - clstart
@@ -325,7 +323,7 @@
                 cl = repo.changelog
                 ml = repo.manifestlog
                 # validate incoming csets have their manifests
-                for cset in xrange(clstart, clend):
+                for cset in pycompat.xrange(clstart, clend):
                     mfnode = cl.changelogrevision(cset).manifest
                     mfest = ml[mfnode].readdelta()
                     # store file cgnodes we must see
@@ -367,7 +365,7 @@
                 repo.hook('pretxnchangegroup',
                           throw=True, **pycompat.strkwargs(hookargs))
 
-            added = [cl.node(r) for r in xrange(clstart, clend)]
+            added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
             phaseall = None
             if srctype in ('push', 'serve'):
                 # Old servers can not push the boundary themselves.
@@ -446,7 +444,7 @@
     remain the same.
     """
     deltaheader = _CHANGEGROUPV2_DELTA_HEADER
-    deltaheadersize = struct.calcsize(deltaheader)
+    deltaheadersize = deltaheader.size
     version = '02'
 
     def _deltaheader(self, headertuple, prevnode):
@@ -462,7 +460,7 @@
     separating manifests and files.
     """
     deltaheader = _CHANGEGROUPV3_DELTA_HEADER
-    deltaheadersize = struct.calcsize(deltaheader)
+    deltaheadersize = deltaheader.size
     version = '03'
     _grouplistcount = 2 # One list of manifests and one list of files
 
@@ -476,9 +474,8 @@
             # If we get here, there are directory manifests in the changegroup
             d = chunkdata["filename"]
             repo.ui.debug("adding %s revisions\n" % d)
-            dirlog = repo.manifestlog._revlog.dirlog(d)
             deltas = self.deltaiter()
-            if not dirlog.addgroup(deltas, revmap, trp):
+            if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
                 raise error.Abort(_("received dir revlog group is empty"))
 
 class headerlessfixup(object):
@@ -493,189 +490,493 @@
             return d
         return readexactly(self._fh, n)
 
-class cg1packer(object):
-    deltaheader = _CHANGEGROUPV1_DELTA_HEADER
-    version = '01'
-    def __init__(self, repo, bundlecaps=None):
+def _revisiondeltatochunks(delta, headerfn):
+    """Serialize a revisiondelta to changegroup chunks."""
+
+    # The captured revision delta may be encoded as a delta against
+    # a base revision or as a full revision. The changegroup format
+    # requires that everything on the wire be deltas. So for full
+    # revisions, we need to invent a header that says to rewrite
+    # data.
+
+    if delta.delta is not None:
+        prefix, data = b'', delta.delta
+    elif delta.basenode == nullid:
+        data = delta.revision
+        prefix = mdiff.trivialdiffheader(len(data))
+    else:
+        data = delta.revision
+        prefix = mdiff.replacediffheader(delta.baserevisionsize,
+                                         len(data))
+
+    meta = headerfn(delta)
+
+    yield chunkheader(len(meta) + len(prefix) + len(data))
+    yield meta
+    if prefix:
+        yield prefix
+    yield data
+
+def _sortnodesellipsis(store, nodes, cl, lookup):
+    """Sort nodes for changegroup generation."""
+    # Ellipses serving mode.
+    #
+    # In a perfect world, we'd generate better ellipsis-ified graphs
+    # for non-changelog revlogs. In practice, we haven't started doing
+    # that yet, so the resulting DAGs for the manifestlog and filelogs
+    # are actually full of bogus parentage on all the ellipsis
+    # nodes. This has the side effect that, while the contents are
+    # correct, the individual DAGs might be completely out of whack in
+    # a case like 882681bc3166 and its ancestors (back about 10
+    # revisions or so) in the main hg repo.
+    #
+    # The one invariant we *know* holds is that the new (potentially
+    # bogus) DAG shape will be valid if we order the nodes in the
+    # order that they're introduced in dramatis personae by the
+    # changelog, so what we do is we sort the non-changelog histories
+    # by the order in which they are used by the changelog.
+    key = lambda n: cl.rev(lookup(n))
+    return sorted(nodes, key=key)
+
+def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev,
+                               linknode, clrevtolocalrev, fullclnodes,
+                               precomputedellipsis):
+    linkparents = precomputedellipsis[linkrev]
+    def local(clrev):
+        """Turn a changelog revnum into a local revnum.
+
+        The ellipsis dag is stored as revnums on the changelog,
+        but when we're producing ellipsis entries for
+        non-changelog revlogs, we need to turn those numbers into
+        something local. This does that for us, and during the
+        changelog sending phase will also expand the stored
+        mappings as needed.
+        """
+        if clrev == nullrev:
+            return nullrev
+
+        if ischangelog:
+            return clrev
+
+        # Walk the ellipsis-ized changelog breadth-first looking for a
+        # change that has been linked from the current revlog.
+        #
+        # For a flat manifest revlog only a single step should be necessary
+        # as all relevant changelog entries are relevant to the flat
+        # manifest.
+        #
+        # For a filelog or tree manifest dirlog however not every changelog
+        # entry will have been relevant, so we need to skip some changelog
+        # nodes even after ellipsis-izing.
+        walk = [clrev]
+        while walk:
+            p = walk[0]
+            walk = walk[1:]
+            if p in clrevtolocalrev:
+                return clrevtolocalrev[p]
+            elif p in fullclnodes:
+                walk.extend([pp for pp in cl.parentrevs(p)
+                                if pp != nullrev])
+            elif p in precomputedellipsis:
+                walk.extend([pp for pp in precomputedellipsis[p]
+                                if pp != nullrev])
+            else:
+                # In this case, we've got an ellipsis with parents
+                # outside the current bundle (likely an
+                # incremental pull). We "know" that we can use the
+                # value of this same revlog at whatever revision
+                # is pointed to by linknode. "Know" is in scare
+                # quotes because I haven't done enough examination
+                # of edge cases to convince myself this is really
+                # a fact - it works for all the (admittedly
+                # thorough) cases in our testsuite, but I would be
+                # somewhat unsurprised to find a case in the wild
+                # where this breaks down a bit. That said, I don't
+                # know if it would hurt anything.
+                for i in pycompat.xrange(rev, 0, -1):
+                    if store.linkrev(i) == clrev:
+                        return i
+                # We failed to resolve a parent for this node, so
+                # we crash the changegroup construction.
+                raise error.Abort(
+                    'unable to resolve parent while packing %r %r'
+                    ' for changeset %r' % (store.indexfile, rev, clrev))
+
+        return nullrev
+
+    if not linkparents or (
+        store.parentrevs(rev) == (nullrev, nullrev)):
+        p1, p2 = nullrev, nullrev
+    elif len(linkparents) == 1:
+        p1, = sorted(local(p) for p in linkparents)
+        p2 = nullrev
+    else:
+        p1, p2 = sorted(local(p) for p in linkparents)
+
+    p1node, p2node = store.node(p1), store.node(p2)
+
+    return p1node, p2node, linknode
+
+def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
+               topic=None,
+               ellipses=False, clrevtolocalrev=None, fullclnodes=None,
+               precomputedellipsis=None):
+    """Calculate deltas for a set of revisions.
+
+    Is a generator of ``revisiondelta`` instances.
+
+    If topic is not None, progress detail will be generated using this
+    topic name (e.g. changesets, manifests, etc).
+    """
+    if not nodes:
+        return
+
+    cl = repo.changelog
+
+    if ischangelog:
+        # `hg log` shows changesets in storage order. To preserve order
+        # across clones, send out changesets in storage order.
+        nodesorder = 'storage'
+    elif ellipses:
+        nodes = _sortnodesellipsis(store, nodes, cl, lookup)
+        nodesorder = 'nodes'
+    else:
+        nodesorder = None
+
+    # Perform ellipses filtering and revision massaging. We do this before
+    # emitrevisions() because a) filtering out revisions creates less work
+    # for emitrevisions() b) dropping revisions would break emitrevisions()'s
+    # assumptions about delta choices and we would possibly send a delta
+    # referencing a missing base revision.
+    #
+    # Also, calling lookup() has side-effects with regards to populating
+    # data structures. If we don't call lookup() for each node or if we call
+    # lookup() after the first pass through each node, things can break -
+    # possibly intermittently depending on the python hash seed! For that
+    # reason, we store a mapping of all linknodes during the initial node
+    # pass rather than use lookup() on the output side.
+    if ellipses:
+        filtered = []
+        adjustedparents = {}
+        linknodes = {}
+
+        for node in nodes:
+            rev = store.rev(node)
+            linknode = lookup(node)
+            linkrev = cl.rev(linknode)
+            clrevtolocalrev[linkrev] = rev
+
+            # If linknode is in fullclnodes, it means the corresponding
+            # changeset was a full changeset and is being sent unaltered.
+            if linknode in fullclnodes:
+                linknodes[node] = linknode
+
+            # If the corresponding changeset wasn't in the set computed
+            # as relevant to us, it should be dropped outright.
+            elif linkrev not in precomputedellipsis:
+                continue
+
+            else:
+                # We could probably do this later and avoid the dict
+                # holding state. But it likely doesn't matter.
+                p1node, p2node, linknode = _resolvenarrowrevisioninfo(
+                    cl, store, ischangelog, rev, linkrev, linknode,
+                    clrevtolocalrev, fullclnodes, precomputedellipsis)
+
+                adjustedparents[node] = (p1node, p2node)
+                linknodes[node] = linknode
+
+            filtered.append(node)
+
+        nodes = filtered
+
+    # We expect the first pass to be fast, so we only engage the progress
+    # meter for constructing the revision deltas.
+    progress = None
+    if topic is not None:
+        progress = repo.ui.makeprogress(topic, unit=_('chunks'),
+                                        total=len(nodes))
+
+    revisions = store.emitrevisions(
+        nodes,
+        nodesorder=nodesorder,
+        revisiondata=True,
+        assumehaveparentrevisions=not ellipses,
+        deltaprevious=forcedeltaparentprev)
+
+    for i, revision in enumerate(revisions):
+        if progress:
+            progress.update(i + 1)
+
+        if ellipses:
+            linknode = linknodes[revision.node]
+
+            if revision.node in adjustedparents:
+                p1node, p2node = adjustedparents[revision.node]
+                revision.p1node = p1node
+                revision.p2node = p2node
+                revision.flags |= repository.REVISION_FLAG_ELLIPSIS
+
+        else:
+            linknode = lookup(revision.node)
+
+        revision.linknode = linknode
+        yield revision
+
+    if progress:
+        progress.complete()
+
+class cgpacker(object):
+    def __init__(self, repo, oldmatcher, matcher, version,
+                 builddeltaheader, manifestsend,
+                 forcedeltaparentprev=False,
+                 bundlecaps=None, ellipses=False,
+                 shallow=False, ellipsisroots=None, fullnodes=None):
         """Given a source repo, construct a bundler.
 
+        oldmatcher is a matcher that matches on files the client already has.
+        These will not be included in the changegroup.
+
+        matcher is a matcher that matches on files to include in the
+        changegroup. Used to facilitate sparse changegroups.
+
+        forcedeltaparentprev indicates whether delta parents must be against
+        the previous revision in a delta group. This should only be used for
+        compatibility with changegroup version 1.
+
+        builddeltaheader is a callable that constructs the header for a group
+        delta.
+
+        manifestsend is a chunk to send after manifests have been fully emitted.
+
+        ellipses indicates whether ellipsis serving mode is enabled.
+
         bundlecaps is optional and can be used to specify the set of
         capabilities which can be used to build the bundle. While bundlecaps is
         unused in core Mercurial, extensions rely on this feature to communicate
         capabilities to customize the changegroup packer.
+
+        shallow indicates whether shallow data might be sent. The packer may
+        need to pack file contents not introduced by the changes being packed.
+
+        fullnodes is the set of changelog nodes which should not be ellipsis
+        nodes. We store this rather than the set of nodes that should be
+        ellipsis because for very large histories we expect this to be
+        significantly smaller.
         """
+        assert oldmatcher
+        assert matcher
+        self._oldmatcher = oldmatcher
+        self._matcher = matcher
+
+        self.version = version
+        self._forcedeltaparentprev = forcedeltaparentprev
+        self._builddeltaheader = builddeltaheader
+        self._manifestsend = manifestsend
+        self._ellipses = ellipses
+
         # Set of capabilities we can use to build the bundle.
         if bundlecaps is None:
             bundlecaps = set()
         self._bundlecaps = bundlecaps
-        # experimental config: bundle.reorder
-        reorder = repo.ui.config('bundle', 'reorder')
-        if reorder == 'auto':
-            reorder = None
-        else:
-            reorder = stringutil.parsebool(reorder)
+        self._isshallow = shallow
+        self._fullclnodes = fullnodes
+
+        # Maps ellipsis revs to their roots at the changelog level.
+        self._precomputedellipsis = ellipsisroots
+
         self._repo = repo
-        self._reorder = reorder
+
         if self._repo.ui.verbose and not self._repo.ui.debugflag:
             self._verbosenote = self._repo.ui.note
         else:
             self._verbosenote = lambda s: None
 
-    def close(self):
-        return closechunk()
-
-    def fileheader(self, fname):
-        return chunkheader(len(fname)) + fname
-
-    # Extracted both for clarity and for overriding in extensions.
-    def _sortgroup(self, revlog, nodelist, lookup):
-        """Sort nodes for change group and turn them into revnums."""
-        # for generaldelta revlogs, we linearize the revs; this will both be
-        # much quicker and generate a much smaller bundle
-        if (revlog._generaldelta and self._reorder is None) or self._reorder:
-            dag = dagutil.revlogdag(revlog)
-            return dag.linearize(set(revlog.rev(n) for n in nodelist))
-        else:
-            return sorted([revlog.rev(n) for n in nodelist])
-
-    def group(self, nodelist, revlog, lookup, units=None):
-        """Calculate a delta group, yielding a sequence of changegroup chunks
-        (strings).
-
-        Given a list of changeset revs, return a set of deltas and
-        metadata corresponding to nodes. The first delta is
-        first parent(nodelist[0]) -> nodelist[0], the receiver is
-        guaranteed to have this parent as it has all history before
-        these changesets. In the case firstparent is nullrev the
-        changegroup starts with a full revision.
-
-        If units is not None, progress detail will be generated, units specifies
-        the type of revlog that is touched (changelog, manifest, etc.).
+    def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
+                 changelog=True):
+        """Yield a sequence of changegroup byte chunks.
+        If changelog is False, changelog data won't be added to changegroup
         """
-        # if we don't have any revisions touched by these changesets, bail
-        if len(nodelist) == 0:
-            yield self.close()
-            return
-
-        revs = self._sortgroup(revlog, nodelist, lookup)
 
-        # add the parent of the first rev
-        p = revlog.parentrevs(revs[0])[0]
-        revs.insert(0, p)
-
-        # build deltas
-        progress = None
-        if units is not None:
-            progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
-                                                  total=(len(revs) - 1))
-        for r in xrange(len(revs) - 1):
-            if progress:
-                progress.update(r + 1)
-            prev, curr = revs[r], revs[r + 1]
-            linknode = lookup(revlog.node(curr))
-            for c in self.revchunk(revlog, curr, prev, linknode):
-                yield c
-
-        if progress:
-            progress.complete()
-        yield self.close()
-
-    # filter any nodes that claim to be part of the known set
-    def prune(self, revlog, missing, commonrevs):
-        rr, rl = revlog.rev, revlog.linkrev
-        return [n for n in missing if rl(rr(n)) not in commonrevs]
-
-    def _packmanifests(self, dir, mfnodes, lookuplinknode):
-        """Pack flat manifests into a changegroup stream."""
-        assert not dir
-        for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
-                                lookuplinknode, units=_('manifests')):
-            yield chunk
-
-    def _manifestsdone(self):
-        return ''
-
-    def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
-        '''yield a sequence of changegroup chunks (strings)'''
         repo = self._repo
         cl = repo.changelog
 
-        clrevorder = {}
-        mfs = {} # needed manifests
-        fnodes = {} # needed file nodes
-        changedfiles = set()
-
-        # Callback for the changelog, used to collect changed files and manifest
-        # nodes.
-        # Returns the linkrev node (identity in the changelog case).
-        def lookupcl(x):
-            c = cl.read(x)
-            clrevorder[x] = len(clrevorder)
-            n = c[0]
-            # record the first changeset introducing this manifest version
-            mfs.setdefault(n, x)
-            # Record a complete list of potentially-changed files in
-            # this manifest.
-            changedfiles.update(c[3])
-            return x
-
         self._verbosenote(_('uncompressed size of bundle content:\n'))
         size = 0
-        for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
-            size += len(chunk)
-            yield chunk
+
+        clstate, deltas = self._generatechangelog(cl, clnodes)
+        for delta in deltas:
+            if changelog:
+                for chunk in _revisiondeltatochunks(delta,
+                                                    self._builddeltaheader):
+                    size += len(chunk)
+                    yield chunk
+
+        close = closechunk()
+        size += len(close)
+        yield closechunk()
+
         self._verbosenote(_('%8.i (changelog)\n') % size)
 
+        clrevorder = clstate['clrevorder']
+        manifests = clstate['manifests']
+        changedfiles = clstate['changedfiles']
+
         # We need to make sure that the linkrev in the changegroup refers to
         # the first changeset that introduced the manifest or file revision.
         # The fastpath is usually safer than the slowpath, because the filelogs
         # are walked in revlog order.
         #
-        # When taking the slowpath with reorder=None and the manifest revlog
-        # uses generaldelta, the manifest may be walked in the "wrong" order.
-        # Without 'clrevorder', we would get an incorrect linkrev (see fix in
-        # cc0ff93d0c0c).
+        # When taking the slowpath when the manifest revlog uses generaldelta,
+        # the manifest may be walked in the "wrong" order. Without 'clrevorder',
+        # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
         #
         # When taking the fastpath, we are only vulnerable to reordering
-        # of the changelog itself. The changelog never uses generaldelta, so
-        # it is only reordered when reorder=True. To handle this case, we
-        # simply take the slowpath, which already has the 'clrevorder' logic.
-        # This was also fixed in cc0ff93d0c0c.
-        fastpathlinkrev = fastpathlinkrev and not self._reorder
+        # of the changelog itself. The changelog never uses generaldelta and is
+        # never reordered. To handle this case, we simply take the slowpath,
+        # which already has the 'clrevorder' logic. This was also fixed in
+        # cc0ff93d0c0c.
+
         # Treemanifests don't work correctly with fastpathlinkrev
         # either, because we don't discover which directory nodes to
         # send along with files. This could probably be fixed.
         fastpathlinkrev = fastpathlinkrev and (
             'treemanifest' not in repo.requirements)
 
-        for chunk in self.generatemanifests(commonrevs, clrevorder,
-                fastpathlinkrev, mfs, fnodes, source):
-            yield chunk
-        mfs.clear()
+        fnodes = {}  # needed file nodes
+
+        size = 0
+        it = self.generatemanifests(
+            commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
+            clstate['clrevtomanifestrev'])
+
+        for tree, deltas in it:
+            if tree:
+                assert self.version == b'03'
+                chunk = _fileheader(tree)
+                size += len(chunk)
+                yield chunk
+
+            for delta in deltas:
+                chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
+                for chunk in chunks:
+                    size += len(chunk)
+                    yield chunk
+
+            close = closechunk()
+            size += len(close)
+            yield close
+
+        self._verbosenote(_('%8.i (manifests)\n') % size)
+        yield self._manifestsend
+
+        mfdicts = None
+        if self._ellipses and self._isshallow:
+            mfdicts = [(self._repo.manifestlog[n].read(), lr)
+                       for (n, lr) in manifests.iteritems()]
+
+        manifests.clear()
         clrevs = set(cl.rev(x) for x in clnodes)
 
-        if not fastpathlinkrev:
-            def linknodes(unused, fname):
-                return fnodes.get(fname, {})
-        else:
-            cln = cl.node
-            def linknodes(filerevlog, fname):
-                llr = filerevlog.linkrev
-                fln = filerevlog.node
-                revs = ((r, llr(r)) for r in filerevlog)
-                return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
+        it = self.generatefiles(changedfiles, commonrevs,
+                                source, mfdicts, fastpathlinkrev,
+                                fnodes, clrevs)
+
+        for path, deltas in it:
+            h = _fileheader(path)
+            size = len(h)
+            yield h
 
-        for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
-                                        source):
-            yield chunk
+            for delta in deltas:
+                chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
+                for chunk in chunks:
+                    size += len(chunk)
+                    yield chunk
 
-        yield self.close()
+            close = closechunk()
+            size += len(close)
+            yield close
+
+            self._verbosenote(_('%8.i  %s\n') % (size, path))
+
+        yield closechunk()
 
         if clnodes:
             repo.hook('outgoing', node=hex(clnodes[0]), source=source)
 
-    def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
-                          fnodes, source):
+    def _generatechangelog(self, cl, nodes):
+        """Generate data for changelog chunks.
+
+        Returns a 2-tuple of a dict containing state and an iterable of
+        byte chunks. The state will not be fully populated until the
+        chunk stream has been fully consumed.
+        """
+        clrevorder = {}
+        manifests = {}
+        mfl = self._repo.manifestlog
+        changedfiles = set()
+        clrevtomanifestrev = {}
+
+        # Callback for the changelog, used to collect changed files and
+        # manifest nodes.
+        # Returns the linkrev node (identity in the changelog case).
+        def lookupcl(x):
+            c = cl.changelogrevision(x)
+            clrevorder[x] = len(clrevorder)
+
+            if self._ellipses:
+                # Only update manifests if x is going to be sent. Otherwise we
+                # end up with bogus linkrevs specified for manifests and
+                # we skip some manifest nodes that we should otherwise
+                # have sent.
+                if (x in self._fullclnodes
+                    or cl.rev(x) in self._precomputedellipsis):
+
+                    manifestnode = c.manifest
+                    # Record the first changeset introducing this manifest
+                    # version.
+                    manifests.setdefault(manifestnode, x)
+                    # Set this narrow-specific dict so we have the lowest
+                    # manifest revnum to look up for this cl revnum. (Part of
+                    # mapping changelog ellipsis parents to manifest ellipsis
+                    # parents)
+                    clrevtomanifestrev.setdefault(
+                        cl.rev(x), mfl.rev(manifestnode))
+                # We can't trust the changed files list in the changeset if the
+                # client requested a shallow clone.
+                if self._isshallow:
+                    changedfiles.update(mfl[c.manifest].read().keys())
+                else:
+                    changedfiles.update(c.files)
+            else:
+                # record the first changeset introducing this manifest version
+                manifests.setdefault(c.manifest, x)
+                # Record a complete list of potentially-changed files in
+                # this manifest.
+                changedfiles.update(c.files)
+
+            return x
+
+        state = {
+            'clrevorder': clrevorder,
+            'manifests': manifests,
+            'changedfiles': changedfiles,
+            'clrevtomanifestrev': clrevtomanifestrev,
+        }
+
+        gen = deltagroup(
+            self._repo, cl, nodes, True, lookupcl,
+            self._forcedeltaparentprev,
+            ellipses=self._ellipses,
+            topic=_('changesets'),
+            clrevtolocalrev={},
+            fullclnodes=self._fullclnodes,
+            precomputedellipsis=self._precomputedellipsis)
+
+        return state, gen
+
+    def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
+                          manifests, fnodes, source, clrevtolocalrev):
         """Returns an iterator of changegroup chunks containing manifests.
 
         `source` is unused here, but is used by extensions like remotefilelog to
@@ -683,16 +984,15 @@
         """
         repo = self._repo
         mfl = repo.manifestlog
-        dirlog = mfl._revlog.dirlog
-        tmfnodes = {'': mfs}
+        tmfnodes = {'': manifests}
 
         # Callback for the manifest, used to collect linkrevs for filelog
         # revisions.
         # Returns the linkrev node (collected in lookupcl).
-        def makelookupmflinknode(dir, nodes):
+        def makelookupmflinknode(tree, nodes):
             if fastpathlinkrev:
-                assert not dir
-                return mfs.__getitem__
+                assert not tree
+                return manifests.__getitem__
 
             def lookupmflinknode(x):
                 """Callback for looking up the linknode for manifests.
@@ -711,16 +1011,16 @@
                 treemanifests to send.
                 """
                 clnode = nodes[x]
-                mdata = mfl.get(dir, x).readfast(shallow=True)
+                mdata = mfl.get(tree, x).readfast(shallow=True)
                 for p, n, fl in mdata.iterentries():
                     if fl == 't': # subdirectory manifest
-                        subdir = dir + p + '/'
-                        tmfclnodes = tmfnodes.setdefault(subdir, {})
+                        subtree = tree + p + '/'
+                        tmfclnodes = tmfnodes.setdefault(subtree, {})
                         tmfclnode = tmfclnodes.setdefault(n, clnode)
                         if clrevorder[clnode] < clrevorder[tmfclnode]:
                             tmfclnodes[n] = clnode
                     else:
-                        f = dir + p
+                        f = tree + p
                         fclnodes = fnodes.setdefault(f, {})
                         fclnode = fclnodes.setdefault(n, clnode)
                         if clrevorder[clnode] < clrevorder[fclnode]:
@@ -728,22 +1028,104 @@
                 return clnode
             return lookupmflinknode
 
-        size = 0
         while tmfnodes:
-            dir, nodes = tmfnodes.popitem()
-            prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
-            if not dir or prunednodes:
-                for x in self._packmanifests(dir, prunednodes,
-                                             makelookupmflinknode(dir, nodes)):
-                    size += len(x)
-                    yield x
-        self._verbosenote(_('%8.i (manifests)\n') % size)
-        yield self._manifestsdone()
+            tree, nodes = tmfnodes.popitem()
+            store = mfl.getstorage(tree)
+
+            if not self._matcher.visitdir(store.tree[:-1] or '.'):
+                # No nodes to send because this directory is out of
+                # the client's view of the repository (probably
+                # because of narrow clones).
+                prunednodes = []
+            else:
+                # Avoid sending any manifest nodes we can prove the
+                # client already has by checking linkrevs. See the
+                # related comment in generatefiles().
+                prunednodes = self._prunemanifests(store, nodes, commonrevs)
+            if tree and not prunednodes:
+                continue
+
+            lookupfn = makelookupmflinknode(tree, nodes)
+
+            deltas = deltagroup(
+                self._repo, store, prunednodes, False, lookupfn,
+                self._forcedeltaparentprev,
+                ellipses=self._ellipses,
+                topic=_('manifests'),
+                clrevtolocalrev=clrevtolocalrev,
+                fullclnodes=self._fullclnodes,
+                precomputedellipsis=self._precomputedellipsis)
+
+            if not self._oldmatcher.visitdir(store.tree[:-1] or '.'):
+                yield tree, deltas
+            else:
+                # 'deltas' is a generator and we need to consume it even if
+                # we are not going to send it because a side-effect is that
+                # it updates tmdnodes (via lookupfn)
+                for d in deltas:
+                    pass
+                if not tree:
+                    yield tree, []
+
+    def _prunemanifests(self, store, nodes, commonrevs):
+        # This is split out as a separate method to allow filtering
+        # commonrevs in extension code.
+        #
+        # TODO(augie): this shouldn't be required, instead we should
+        # make filtering of revisions to send delegated to the store
+        # layer.
+        frev, flr = store.rev, store.linkrev
+        return [n for n in nodes if flr(frev(n)) not in commonrevs]
 
     # The 'source' parameter is useful for extensions
-    def generatefiles(self, changedfiles, linknodes, commonrevs, source):
+    def generatefiles(self, changedfiles, commonrevs, source,
+                      mfdicts, fastpathlinkrev, fnodes, clrevs):
+        changedfiles = [f for f in changedfiles
+                        if self._matcher(f) and not self._oldmatcher(f)]
+
+        if not fastpathlinkrev:
+            def normallinknodes(unused, fname):
+                return fnodes.get(fname, {})
+        else:
+            cln = self._repo.changelog.node
+
+            def normallinknodes(store, fname):
+                flinkrev = store.linkrev
+                fnode = store.node
+                revs = ((r, flinkrev(r)) for r in store)
+                return dict((fnode(r), cln(lr))
+                            for r, lr in revs if lr in clrevs)
+
+        clrevtolocalrev = {}
+
+        if self._isshallow:
+            # In a shallow clone, the linknodes callback needs to also include
+            # those file nodes that are in the manifests we sent but weren't
+            # introduced by those manifests.
+            commonctxs = [self._repo[c] for c in commonrevs]
+            clrev = self._repo.changelog.rev
+
+            def linknodes(flog, fname):
+                for c in commonctxs:
+                    try:
+                        fnode = c.filenode(fname)
+                        clrevtolocalrev[c.rev()] = flog.rev(fnode)
+                    except error.ManifestLookupError:
+                        pass
+                links = normallinknodes(flog, fname)
+                if len(links) != len(mfdicts):
+                    for mf, lr in mfdicts:
+                        fnode = mf.get(fname, None)
+                        if fnode in links:
+                            links[fnode] = min(links[fnode], lr, key=clrev)
+                        elif fnode:
+                            links[fnode] = lr
+                return links
+        else:
+            linknodes = normallinknodes
+
         repo = self._repo
-        progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
+        progress = repo.ui.makeprogress(_('files'), unit=_('files'),
                                         total=len(changedfiles))
         for i, fname in enumerate(sorted(changedfiles)):
             filerevlog = repo.file(fname)
@@ -751,129 +1133,90 @@
                 raise error.Abort(_("empty or missing file data for %s") %
                                   fname)
 
+            clrevtolocalrev.clear()
+
             linkrevnodes = linknodes(filerevlog, fname)
             # Lookup for filenodes, we collected the linkrev nodes above in the
             # fastpath case and with lookupmf in the slowpath case.
             def lookupfilelog(x):
                 return linkrevnodes[x]
 
-            filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
-            if filenodes:
-                progress.update(i + 1, item=fname)
-                h = self.fileheader(fname)
-                size = len(h)
-                yield h
-                for chunk in self.group(filenodes, filerevlog, lookupfilelog):
-                    size += len(chunk)
-                    yield chunk
-                self._verbosenote(_('%8.i  %s\n') % (size, fname))
+            frev, flr = filerevlog.rev, filerevlog.linkrev
+            # Skip sending any filenode we know the client already
+            # has. This avoids over-sending files relatively
+            # inexpensively, so it's not a problem if we under-filter
+            # here.
+            filenodes = [n for n in linkrevnodes
+                         if flr(frev(n)) not in commonrevs]
+
+            if not filenodes:
+                continue
+
+            progress.update(i + 1, item=fname)
+
+            deltas = deltagroup(
+                self._repo, filerevlog, filenodes, False, lookupfilelog,
+                self._forcedeltaparentprev,
+                ellipses=self._ellipses,
+                clrevtolocalrev=clrevtolocalrev,
+                fullclnodes=self._fullclnodes,
+                precomputedellipsis=self._precomputedellipsis)
+
+            yield fname, deltas
+
         progress.complete()
 
-    def deltaparent(self, revlog, rev, p1, p2, prev):
-        if not revlog.candelta(prev, rev):
-            raise error.ProgrammingError('cg1 should not be used in this case')
-        return prev
-
-    def revchunk(self, revlog, rev, prev, linknode):
-        node = revlog.node(rev)
-        p1, p2 = revlog.parentrevs(rev)
-        base = self.deltaparent(revlog, rev, p1, p2, prev)
+def _makecg1packer(repo, oldmatcher, matcher, bundlecaps,
+                   ellipses=False, shallow=False, ellipsisroots=None,
+                   fullnodes=None):
+    builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
+        d.node, d.p1node, d.p2node, d.linknode)
 
-        prefix = ''
-        if revlog.iscensored(base) or revlog.iscensored(rev):
-            try:
-                delta = revlog.revision(node, raw=True)
-            except error.CensoredNodeError as e:
-                delta = e.tombstone
-            if base == nullrev:
-                prefix = mdiff.trivialdiffheader(len(delta))
-            else:
-                baselen = revlog.rawsize(base)
-                prefix = mdiff.replacediffheader(baselen, len(delta))
-        elif base == nullrev:
-            delta = revlog.revision(node, raw=True)
-            prefix = mdiff.trivialdiffheader(len(delta))
-        else:
-            delta = revlog.revdiff(base, rev)
-        p1n, p2n = revlog.parents(node)
-        basenode = revlog.node(base)
-        flags = revlog.flags(rev)
-        meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
-        meta += prefix
-        l = len(meta) + len(delta)
-        yield chunkheader(l)
-        yield meta
-        yield delta
-    def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
-        # do nothing with basenode, it is implicitly the previous one in HG10
-        # do nothing with flags, it is implicitly 0 for cg1 and cg2
-        return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
+    return cgpacker(repo, oldmatcher, matcher, b'01',
+                    builddeltaheader=builddeltaheader,
+                    manifestsend=b'',
+                    forcedeltaparentprev=True,
+                    bundlecaps=bundlecaps,
+                    ellipses=ellipses,
+                    shallow=shallow,
+                    ellipsisroots=ellipsisroots,
+                    fullnodes=fullnodes)
 
-class cg2packer(cg1packer):
-    version = '02'
-    deltaheader = _CHANGEGROUPV2_DELTA_HEADER
-
-    def __init__(self, repo, bundlecaps=None):
-        super(cg2packer, self).__init__(repo, bundlecaps)
-        if self._reorder is None:
-            # Since generaldelta is directly supported by cg2, reordering
-            # generally doesn't help, so we disable it by default (treating
-            # bundle.reorder=auto just like bundle.reorder=False).
-            self._reorder = False
+def _makecg2packer(repo, oldmatcher, matcher, bundlecaps,
+                   ellipses=False, shallow=False, ellipsisroots=None,
+                   fullnodes=None):
+    builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
+        d.node, d.p1node, d.p2node, d.basenode, d.linknode)
 
-    def deltaparent(self, revlog, rev, p1, p2, prev):
-        dp = revlog.deltaparent(rev)
-        if dp == nullrev and revlog.storedeltachains:
-            # Avoid sending full revisions when delta parent is null. Pick prev
-            # in that case. It's tempting to pick p1 in this case, as p1 will
-            # be smaller in the common case. However, computing a delta against
-            # p1 may require resolving the raw text of p1, which could be
-            # expensive. The revlog caches should have prev cached, meaning
-            # less CPU for changegroup generation. There is likely room to add
-            # a flag and/or config option to control this behavior.
-            base = prev
-        elif dp == nullrev:
-            # revlog is configured to use full snapshot for a reason,
-            # stick to full snapshot.
-            base = nullrev
-        elif dp not in (p1, p2, prev):
-            # Pick prev when we can't be sure remote has the base revision.
-            return prev
-        else:
-            base = dp
-        if base != nullrev and not revlog.candelta(base, rev):
-            base = nullrev
-        return base
+    return cgpacker(repo, oldmatcher, matcher, b'02',
+                    builddeltaheader=builddeltaheader,
+                    manifestsend=b'',
+                    bundlecaps=bundlecaps,
+                    ellipses=ellipses,
+                    shallow=shallow,
+                    ellipsisroots=ellipsisroots,
+                    fullnodes=fullnodes)
+
+def _makecg3packer(repo, oldmatcher, matcher, bundlecaps,
+                   ellipses=False, shallow=False, ellipsisroots=None,
+                   fullnodes=None):
+    builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
+        d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
 
-    def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
-        # Do nothing with flags, it is implicitly 0 in cg1 and cg2
-        return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
-
-class cg3packer(cg2packer):
-    version = '03'
-    deltaheader = _CHANGEGROUPV3_DELTA_HEADER
-
-    def _packmanifests(self, dir, mfnodes, lookuplinknode):
-        if dir:
-            yield self.fileheader(dir)
+    return cgpacker(repo, oldmatcher, matcher, b'03',
+                    builddeltaheader=builddeltaheader,
+                    manifestsend=closechunk(),
+                    bundlecaps=bundlecaps,
+                    ellipses=ellipses,
+                    shallow=shallow,
+                    ellipsisroots=ellipsisroots,
+                    fullnodes=fullnodes)
 
-        dirlog = self._repo.manifestlog._revlog.dirlog(dir)
-        for chunk in self.group(mfnodes, dirlog, lookuplinknode,
-                                units=_('manifests')):
-            yield chunk
-
-    def _manifestsdone(self):
-        return self.close()
-
-    def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
-        return struct.pack(
-            self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
-
-_packermap = {'01': (cg1packer, cg1unpacker),
+_packermap = {'01': (_makecg1packer, cg1unpacker),
              # cg2 adds support for exchanging generaldelta
-             '02': (cg2packer, cg2unpacker),
+             '02': (_makecg2packer, cg2unpacker),
              # cg3 adds support for exchanging revlog flags and treemanifests
-             '03': (cg3packer, cg3unpacker),
+             '03': (_makecg3packer, cg3unpacker),
 }
 
 def allsupportedversions(repo):
@@ -899,7 +1242,7 @@
         # support versions 01 and 02.
         versions.discard('01')
         versions.discard('02')
-    if NARROW_REQUIREMENT in repo.requirements:
+    if repository.NARROW_REQUIREMENT in repo.requirements:
         # Versions 01 and 02 don't support revlog flags, and we need to
         # support that for stripping and unbundling to work.
         versions.discard('01')
@@ -927,9 +1270,33 @@
     assert versions
     return min(versions)
 
-def getbundler(version, repo, bundlecaps=None):
+def getbundler(version, repo, bundlecaps=None, oldmatcher=None,
+               matcher=None, ellipses=False, shallow=False,
+               ellipsisroots=None, fullnodes=None):
     assert version in supportedoutgoingversions(repo)
-    return _packermap[version][0](repo, bundlecaps)
+
+    if matcher is None:
+        matcher = matchmod.alwaysmatcher(repo.root, '')
+    if oldmatcher is None:
+        oldmatcher = matchmod.nevermatcher(repo.root, '')
+
+    if version == '01' and not matcher.always():
+        raise error.ProgrammingError('version 01 changegroups do not support '
+                                     'sparse file matchers')
+
+    if ellipses and version in (b'01', b'02'):
+        raise error.Abort(
+            _('ellipsis nodes require at least cg3 on client and server, '
+              'but negotiated version %s') % version)
+
+    # Requested files could include files not in the local store. So
+    # filter those out.
+    matcher = repo.narrowmatch(matcher)
+
+    fn = _packermap[version][0]
+    return fn(repo, oldmatcher, matcher, bundlecaps, ellipses=ellipses,
+              shallow=shallow, ellipsisroots=ellipsisroots,
+              fullnodes=fullnodes)
 
 def getunbundler(version, fh, alg, extras=None):
     return _packermap[version][1](fh, alg, extras=extras)
@@ -950,8 +1317,9 @@
                         {'clcount': len(outgoing.missing) })
 
 def makestream(repo, outgoing, version, source, fastpath=False,
-               bundlecaps=None):
-    bundler = getbundler(version, repo, bundlecaps=bundlecaps)
+               bundlecaps=None, matcher=None):
+    bundler = getbundler(version, repo, bundlecaps=bundlecaps,
+                         matcher=matcher)
 
     repo = repo.unfiltered()
     commonrevs = outgoing.common
@@ -989,7 +1357,7 @@
         revisions += len(fl) - o
         if f in needfiles:
             needs = needfiles[f]
-            for new in xrange(o, len(fl)):
+            for new in pycompat.xrange(o, len(fl)):
                 n = fl.node(new)
                 if n in needs:
                     needs.remove(n)
--- a/mercurial/changelog.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/changelog.py	Mon Oct 22 14:46:06 2018 -0400
@@ -22,7 +22,6 @@
     error,
     pycompat,
     revlog,
-    util,
 )
 from .utils import (
     dateutil,
@@ -304,7 +303,7 @@
         # Delta chains for changelogs tend to be very small because entries
         # tend to be small and don't delta well with each. So disable delta
         # chains.
-        self.storedeltachains = False
+        self._storedeltachains = False
 
         self._realopener = opener
         self._delayed = False
@@ -313,7 +312,7 @@
         self.filteredrevs = frozenset()
 
     def tiprev(self):
-        for i in xrange(len(self) -1, -2, -1):
+        for i in pycompat.xrange(len(self) -1, -2, -1):
             if i not in self.filteredrevs:
                 return i
 
@@ -332,7 +331,7 @@
             return revlog.revlog.__iter__(self)
 
         def filterediter():
-            for i in xrange(len(self)):
+            for i in pycompat.xrange(len(self)):
                 if i not in self.filteredrevs:
                     yield i
 
@@ -344,12 +343,6 @@
             if i not in self.filteredrevs:
                 yield i
 
-    @util.propertycache
-    def nodemap(self):
-        # XXX need filtering too
-        self.rev(self.node(0))
-        return self._nodecache
-
     def reachableroots(self, minroot, heads, roots, includepath=False):
         return self.index.reachableroots2(minroot, heads, roots, includepath)
 
@@ -520,10 +513,10 @@
         # revision text contain two "\n\n" sequences -> corrupt
         # repository since read cannot unpack the revision.
         if not user:
-            raise error.RevlogError(_("empty username"))
+            raise error.StorageError(_("empty username"))
         if "\n" in user:
-            raise error.RevlogError(_("username %r contains a newline")
-                                    % pycompat.bytestr(user))
+            raise error.StorageError(_("username %r contains a newline")
+                                     % pycompat.bytestr(user))
 
         desc = stripdesc(desc)
 
@@ -536,8 +529,8 @@
             if branch in ("default", ""):
                 del extra["branch"]
             elif branch in (".", "null", "tip"):
-                raise error.RevlogError(_('the name \'%s\' is reserved')
-                                        % branch)
+                raise error.StorageError(_('the name \'%s\' is reserved')
+                                         % branch)
         if extra:
             extra = encodeextra(extra)
             parseddate = "%s %s" % (parseddate, extra)
@@ -553,18 +546,9 @@
         extra = self.read(rev)[5]
         return encoding.tolocal(extra.get("branch")), 'close' in extra
 
-    def _addrevision(self, node, rawtext, transaction, *args, **kwargs):
-        # overlay over the standard revlog._addrevision to track the new
-        # revision on the transaction.
-        rev = len(self)
-        node = super(changelog, self)._addrevision(node, rawtext, transaction,
-                                                   *args, **kwargs)
-        revs = transaction.changes.get('revs')
-        if revs is not None:
-            if revs:
-                assert revs[-1] + 1 == rev
-                revs = xrange(revs[0], rev + 1)
-            else:
-                revs = xrange(rev, rev + 1)
-            transaction.changes['revs'] = revs
-        return node
+    def _nodeduplicatecallback(self, transaction, node):
+        # keep track of revisions that got "re-added", eg: unbunde of know rev.
+        #
+        # We track them in a list to preserve their order from the source bundle
+        duplicates = transaction.changes.setdefault('revduplicates', [])
+        duplicates.append(self.rev(node))
--- a/mercurial/chgserver.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/chgserver.py	Mon Oct 22 14:46:06 2018 -0400
@@ -19,7 +19,8 @@
 'setenv' command
     replace os.environ completely
 
-'setumask' command
+'setumask' command (DEPRECATED)
+'setumask2' command
     set umask
 
 'validate' command
@@ -313,6 +314,7 @@
             _newchgui(ui, channeledsystem(fin, fout, 'S'), self.attachio),
             repo, fin, fout)
         self.clientsock = sock
+        self._ioattached = False
         self._oldios = []  # original (self.ch, ui.fp, fd) before "attachio"
         self.hashstate = hashstate
         self.baseaddress = baseaddress
@@ -326,6 +328,7 @@
         # handled by dispatch._dispatch()
         self.ui.flush()
         self._restoreio()
+        self._ioattached = False
 
     def attachio(self):
         """Attach to client's stdio passed via unix domain socket; all
@@ -339,13 +342,13 @@
 
         ui = self.ui
         ui.flush()
-        first = self._saveio()
+        self._saveio()
         for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
             assert fd > 0
             fp = getattr(ui, fn)
             os.dup2(fd, fp.fileno())
             os.close(fd)
-            if not first:
+            if self._ioattached:
                 continue
             # reset buffering mode when client is first attached. as we want
             # to see output immediately on pager, the mode stays unchanged
@@ -364,18 +367,18 @@
                 setattr(ui, fn, newfp)
             setattr(self, cn, newfp)
 
+        self._ioattached = True
         self.cresult.write(struct.pack('>i', len(clientfds)))
 
     def _saveio(self):
         if self._oldios:
-            return False
+            return
         ui = self.ui
         for cn, fn, _mode in _iochannels:
             ch = getattr(self, cn)
             fp = getattr(ui, fn)
             fd = os.dup(fp.fileno())
             self._oldios.append((ch, fp, fd))
-        return True
 
     def _restoreio(self):
         ui = self.ui
@@ -422,6 +425,13 @@
             self.ui.flush()
             self.cresult.write('exit 255')
             return
+        except error.Abort as inst:
+            self.ui.error(_("abort: %s\n") % inst)
+            if inst.hint:
+                self.ui.error(_("(%s)\n") % inst.hint)
+            self.ui.flush()
+            self.cresult.write('exit 255')
+            return
         newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
         insts = []
         if newhash.mtimehash != self.hashstate.mtimehash:
@@ -450,13 +460,34 @@
         os.chdir(path)
 
     def setumask(self):
+        """Change umask (DEPRECATED)"""
+        # BUG: this does not follow the message frame structure, but kept for
+        # backward compatibility with old chg clients for some time
+        self._setumask(self._read(4))
+
+    def setumask2(self):
         """Change umask"""
-        mask = struct.unpack('>I', self._read(4))[0]
+        data = self._readstr()
+        if len(data) != 4:
+            raise ValueError('invalid mask length in setumask2 request')
+        self._setumask(data)
+
+    def _setumask(self, data):
+        mask = struct.unpack('>I', data)[0]
         _log('setumask %r\n' % mask)
         os.umask(mask)
 
     def runcommand(self):
-        return super(chgcmdserver, self).runcommand()
+        # pager may be attached within the runcommand session, which should
+        # be detached at the end of the session. otherwise the pager wouldn't
+        # receive EOF.
+        globaloldios = self._oldios
+        self._oldios = []
+        try:
+            return super(chgcmdserver, self).runcommand()
+        finally:
+            self._restoreio()
+            self._oldios = globaloldios
 
     def setenv(self):
         """Clear and update os.environ
@@ -477,7 +508,8 @@
                          'chdir': chdir,
                          'runcommand': runcommand,
                          'setenv': setenv,
-                         'setumask': setumask})
+                         'setumask': setumask,
+                         'setumask2': setumask2})
 
     if util.safehasattr(procutil, 'setprocname'):
         def setprocname(self):
--- a/mercurial/cmdutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/cmdutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -97,10 +97,9 @@
      _('record the specified user as committer'), _('USER')),
 ]
 
-# hidden for now
 formatteropts = [
     ('T', 'template', '',
-     _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
+     _('display with template'), _('TEMPLATE')),
 ]
 
 templateopts = [
@@ -150,7 +149,7 @@
 ]
 
 mergetoolopts = [
-    ('t', 'tool', '', _('specify merge tool')),
+    ('t', 'tool', '', _('specify merge tool'), _('TOOL')),
 ]
 
 similarityopts = [
@@ -186,7 +185,7 @@
     return newlyaddedandmodifiedfiles
 
 def parsealiases(cmd):
-    return cmd.lstrip("^").split("|")
+    return cmd.split("|")
 
 def setupwrapcolorwrite(ui):
     # wrap ui.write so diff output can be labeled/colorized
@@ -366,8 +365,8 @@
             if backups:
                 # Equivalent to hg.revert
                 m = scmutil.matchfiles(repo, backups.keys())
-                mergemod.update(repo, repo.dirstate.p1(),
-                        False, True, matcher=m)
+                mergemod.update(repo, repo.dirstate.p1(), branchmerge=False,
+                                force=True, matcher=m)
 
             # 3b. (apply)
             if dopatch:
@@ -581,8 +580,8 @@
     unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
     if unresolvedlist:
         mergeliststr = '\n'.join(
-            ['    %s' % util.pathto(repo.root, pycompat.getcwd(), path)
-             for path in unresolvedlist])
+            ['    %s' % util.pathto(repo.root, encoding.getcwd(), path)
+             for path in sorted(unresolvedlist)])
         msg = _('''Unresolved merge conflicts:
 
 %s
@@ -607,17 +606,13 @@
 def _unshelvemsg():
     return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
 
-def _updatecleanmsg(dest=None):
-    warning = _('warning: this will discard uncommitted changes')
-    return 'hg update --clean %s (%s)' % (dest or '.', warning)
-
 def _graftmsg():
     # tweakdefaults requires `update` to have a rev hence the `.`
-    return _helpmessage('hg graft --continue', _updatecleanmsg())
+    return _helpmessage('hg graft --continue', 'hg graft --abort')
 
 def _mergemsg():
     # tweakdefaults requires `update` to have a rev hence the `.`
-     return _helpmessage('hg commit', _updatecleanmsg())
+    return _helpmessage('hg commit', 'hg merge --abort')
 
 def _bisectmsg():
     msg = _('To mark the changeset good:    hg bisect --good\n'
@@ -658,16 +653,15 @@
     statetuple = _getrepostate(repo)
     label = 'status.morestatus'
     if statetuple:
-        fm.startitem()
         state, statedetectionpredicate, helpfulmsg = statetuple
         statemsg = _('The repository is in an unfinished *%s* state.') % state
-        fm.write('statemsg', '%s\n',  _commentlines(statemsg), label=label)
+        fm.plain('%s\n' % _commentlines(statemsg), label=label)
         conmsg = _conflictsmsg(repo)
         if conmsg:
-            fm.write('conflictsmsg', '%s\n', conmsg, label=label)
+            fm.plain('%s\n' % conmsg, label=label)
         if helpfulmsg:
             helpmsg = helpfulmsg()
-            fm.write('helpmsg', '%s\n', helpmsg, label=label)
+            fm.plain('%s\n' % helpmsg, label=label)
 
 def findpossible(cmd, table, strict=False):
     """
@@ -679,7 +673,7 @@
     debugchoice = {}
 
     if cmd in table:
-        # short-circuit exact matches, "log" alias beats "^log|history"
+        # short-circuit exact matches, "log" alias beats "log|history"
         keys = [cmd]
     else:
         keys = table.keys()
@@ -867,7 +861,7 @@
     if isinstance(ctxorbool, bool):
         if ctxorbool:
             return baseformname + ".merge"
-    elif 1 < len(ctxorbool.parents()):
+    elif len(ctxorbool.parents()) > 1:
         return baseformname + ".merge"
 
     return baseformname + ".normal"
@@ -1058,7 +1052,7 @@
     fn = makefilename(ctx, pat, **props)
     return open(fn, mode)
 
-def openrevlog(repo, cmd, file_, opts):
+def openstorage(repo, cmd, file_, opts, returnrevlog=False):
     """opens the changelog, manifest, a filelog or a given revlog"""
     cl = opts['changelog']
     mf = opts['manifest']
@@ -1087,24 +1081,50 @@
                                    "treemanifest enabled"))
             if not dir.endswith('/'):
                 dir = dir + '/'
-            dirlog = repo.manifestlog._revlog.dirlog(dir)
+            dirlog = repo.manifestlog.getstorage(dir)
             if len(dirlog):
                 r = dirlog
         elif mf:
-            r = repo.manifestlog._revlog
+            r = repo.manifestlog.getstorage(b'')
         elif file_:
             filelog = repo.file(file_)
             if len(filelog):
                 r = filelog
+
+        # Not all storage may be revlogs. If requested, try to return an actual
+        # revlog instance.
+        if returnrevlog:
+            if isinstance(r, revlog.revlog):
+                pass
+            elif util.safehasattr(r, '_revlog'):
+                r = r._revlog
+            elif r is not None:
+                raise error.Abort(_('%r does not appear to be a revlog') % r)
+
     if not r:
+        if not returnrevlog:
+            raise error.Abort(_('cannot give path to non-revlog'))
+
         if not file_:
             raise error.CommandError(cmd, _('invalid arguments'))
         if not os.path.isfile(file_):
             raise error.Abort(_("revlog '%s' not found") % file_)
-        r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
+        r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
                           file_[:-2] + ".i")
     return r
 
+def openrevlog(repo, cmd, file_, opts):
+    """Obtain a revlog backing storage of an item.
+
+    This is similar to ``openstorage()`` except it always returns a revlog.
+
+    In most cases, a caller cares about the main storage object - not the
+    revlog backing it. Therefore, this function should only be used by code
+    that needs to examine low-level revlog implementation details. e.g. debug
+    commands.
+    """
+    return openstorage(repo, cmd, file_, opts, returnrevlog=True)
+
 def copy(ui, repo, pats, opts, rename=False):
     # called with the repo lock held
     #
@@ -1162,7 +1182,7 @@
             ui.warn(_('%s: not overwriting - %s collides with %s\n') %
                     (reltarget, repo.pathto(abssrc, cwd),
                      repo.pathto(prevsrc, cwd)))
-            return
+            return True # report a failure
 
         # check for overwrites
         exists = os.path.lexists(target)
@@ -1172,7 +1192,7 @@
                 repo.dirstate.normalize(abstarget)):
                 if not rename:
                     ui.warn(_("%s: can't copy - same file\n") % reltarget)
-                    return
+                    return True # report a failure
                 exists = False
                 samefile = True
 
@@ -1185,20 +1205,20 @@
                     else:
                         flags = '--force'
                     if rename:
-                        hint = _('(hg rename %s to replace the file by '
+                        hint = _("('hg rename %s' to replace the file by "
                                  'recording a rename)\n') % flags
                     else:
-                        hint = _('(hg copy %s to replace the file by '
+                        hint = _("('hg copy %s' to replace the file by "
                                  'recording a copy)\n') % flags
                 else:
                     msg = _('%s: not overwriting - file exists\n')
                     if rename:
-                        hint = _('(hg rename --after to record the rename)\n')
+                        hint = _("('hg rename --after' to record the rename)\n")
                     else:
-                        hint = _('(hg copy --after to record the copy)\n')
+                        hint = _("('hg copy --after' to record the copy)\n")
                 ui.warn(msg % reltarget)
                 ui.warn(hint)
-                return
+                return True # report a failure
 
         if after:
             if not exists:
@@ -1208,7 +1228,7 @@
                 else:
                     ui.warn(_('%s: not recording copy - %s does not exist\n') %
                             (relsrc, reltarget))
-                return
+                return True # report a failure
         elif not dryrun:
             try:
                 if exists:
@@ -1232,6 +1252,10 @@
                 else:
                     ui.warn(_('%s: cannot copy - %s\n') %
                             (relsrc, encoding.strtolocal(inst.strerror)))
+                    if rename:
+                        hint = _("('hg rename --after' to record the rename)\n")
+                    else:
+                        hint = _("('hg copy --after' to record the copy)\n")
                     return True # report a failure
 
         if ui.verbose or not exact:
@@ -1349,9 +1373,6 @@
             if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
                 errors += 1
 
-    if errors:
-        ui.warn(_('(consider using --after)\n'))
-
     return errors != 0
 
 ## facility to let extension process additional data into an import patch
@@ -1755,7 +1776,7 @@
         """
         cl_count = len(repo)
         revs = []
-        for j in xrange(0, last + 1):
+        for j in pycompat.xrange(0, last + 1):
             linkrev = filelog.linkrev(j)
             if linkrev < minrev:
                 continue
@@ -1889,9 +1910,6 @@
     revs = _walkrevs(repo, opts)
     if not revs:
         return []
-    if allfiles and len(revs) > 1:
-        raise error.Abort(_("multiple revisions not supported with "
-                            "--all-files"))
     wanted = set()
     slowpath = match.anypats() or (not match.always() and opts.get('removed'))
     fncache = {}
@@ -1902,7 +1920,7 @@
     # wanted: a cache of filenames that were changed (ctx.files()) and that
     # match the file filtering conditions.
 
-    if match.always():
+    if match.always() or allfiles:
         # No files, no patterns.  Display all revs.
         wanted = revs
     elif not slowpath:
@@ -1966,7 +1984,7 @@
         rev = repo[rev].rev()
         ff = _followfilter(repo)
         stop = min(revs[0], revs[-1])
-        for x in xrange(rev, stop - 1, -1):
+        for x in pycompat.xrange(rev, stop - 1, -1):
             if ff.match(x):
                 wanted = wanted - [x]
 
@@ -1985,7 +2003,7 @@
         stopiteration = False
         for windowsize in increasingwindows():
             nrevs = []
-            for i in xrange(windowsize):
+            for i in pycompat.xrange(windowsize):
                 rev = next(it, None)
                 if rev is None:
                     stopiteration = True
@@ -2026,6 +2044,7 @@
     if abort or warn:
         cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
 
+    match = repo.narrowmatch(match, includeexact=True)
     badmatch = matchmod.badmatch(match, badfn)
     dirstate = repo.dirstate
     # We don't want to just call wctx.walk here, since it would return a lot of
@@ -2038,7 +2057,8 @@
                 cca(f)
             names.append(f)
             if ui.verbose or not exact:
-                ui.status(_('adding %s\n') % match.rel(f))
+                ui.status(_('adding %s\n') % match.rel(f),
+                          label='ui.addremove.added')
 
     for subpath in sorted(wctx.substate):
         sub = wctx.sub(subpath)
@@ -2136,7 +2156,8 @@
 
     for f in forget:
         if ui.verbose or not match.exact(f) or interactive:
-            ui.status(_('removing %s\n') % match.rel(f))
+            ui.status(_('removing %s\n') % match.rel(f),
+                      label='ui.addremove.removed')
 
     if not dryrun:
         rejected = wctx.forget(forget, prefix)
@@ -2154,8 +2175,8 @@
         if needsfctx:
             fc = ctx[f]
             fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
-        fm.data(abspath=f)
-        fm.write('path', fmt, m.rel(f))
+        fm.data(path=f)
+        fm.plain(fmt % m.rel(f))
         ret = 0
 
     for subpath in sorted(ctx.substate):
@@ -2269,7 +2290,8 @@
     for f in list:
         if ui.verbose or not m.exact(f):
             progress.increment()
-            ui.status(_('removing %s\n') % m.rel(f))
+            ui.status(_('removing %s\n') % m.rel(f),
+                      label='ui.addremove.removed')
     progress.complete()
 
     if not dryrun:
@@ -2300,7 +2322,7 @@
     fm.startitem()
     fm.context(ctx=ctx)
     fm.write('data', '%s', data)
-    fm.data(abspath=path, path=matcher.rel(path))
+    fm.data(path=path)
 
 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
     err = 1
@@ -2428,7 +2450,7 @@
         if len(old.parents()) > 1:
             # ctx.files() isn't reliable for merges, so fall back to the
             # slower repo.status() method
-            files = set([fn for st in repo.status(base, old)[:3]
+            files = set([fn for st in base.status(old)[:3]
                          for fn in st])
         else:
             files = set(old.files())
@@ -2556,8 +2578,10 @@
         obsmetadata = None
         if opts.get('note'):
             obsmetadata = {'note': encoding.fromlocal(opts['note'])}
+        backup = ui.configbool('ui', 'history-editing-backup')
         scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
-                             fixphase=True, targetphase=commitphase)
+                             fixphase=True, targetphase=commitphase,
+                             backup=backup)
 
         # Fixing the dirstate because localrepo.commitctx does not update
         # it. This is rather convenient because we did not need to update
@@ -2605,7 +2629,7 @@
         committext = buildcommittext(repo, ctx, subs, extramsg)
 
     # run editor in the repository root
-    olddir = pycompat.getcwd()
+    olddir = encoding.getcwd()
     os.chdir(repo.root)
 
     # make in-memory changes visible to external process
@@ -2749,7 +2773,7 @@
 
     # `names` is a mapping for all elements in working copy and target revision
     # The mapping is in the form:
-    #   <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
+    #   <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
     names = {}
 
     with repo.wlock():
@@ -2994,10 +3018,9 @@
                                     util.copyfile(target, bakname)
                                 else:
                                     util.rename(target, bakname)
-                    if ui.verbose or not exact:
-                        if not isinstance(msg, bytes):
-                            msg = msg(abs)
-                        ui.status(msg % rel)
+                    if opts.get('dry_run'):
+                        if ui.verbose or not exact:
+                            ui.status(msg % rel)
                 elif exact:
                     ui.warn(msg % rel)
                 break
@@ -3010,7 +3033,8 @@
             prefetch(repo, [ctx.rev()],
                      matchfiles(repo,
                                 [f for sublist in oplist for f in sublist]))
-            _performrevert(repo, parents, ctx, actions, interactive, tobackup)
+            _performrevert(repo, parents, ctx, names, actions, interactive,
+                           tobackup)
 
         if targetsubs:
             # Revert the subrepos on the revert list
@@ -3022,7 +3046,7 @@
                     raise error.Abort("subrepository '%s' does not exist in %s!"
                                       % (sub, short(ctx.node())))
 
-def _performrevert(repo, parents, ctx, actions, interactive=False,
+def _performrevert(repo, parents, ctx, names, actions, interactive=False,
                    tobackup=None):
     """function that actually perform all the actions computed for revert
 
@@ -3047,16 +3071,23 @@
             pass
         repo.dirstate.remove(f)
 
+    def prntstatusmsg(action, f):
+        rel, exact = names[f]
+        if repo.ui.verbose or not exact:
+            repo.ui.status(actions[action][1] % rel)
+
     audit_path = pathutil.pathauditor(repo.root, cached=True)
     for f in actions['forget'][0]:
         if interactive:
             choice = repo.ui.promptchoice(
                 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
             if choice == 0:
+                prntstatusmsg('forget', f)
                 repo.dirstate.drop(f)
             else:
                 excluded_files.append(f)
         else:
+            prntstatusmsg('forget', f)
             repo.dirstate.drop(f)
     for f in actions['remove'][0]:
         audit_path(f)
@@ -3064,13 +3095,16 @@
             choice = repo.ui.promptchoice(
                 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
             if choice == 0:
+                prntstatusmsg('remove', f)
                 doremove(f)
             else:
                 excluded_files.append(f)
         else:
+            prntstatusmsg('remove', f)
             doremove(f)
     for f in actions['drop'][0]:
         audit_path(f)
+        prntstatusmsg('drop', f)
         repo.dirstate.remove(f)
 
     normal = None
@@ -3117,14 +3151,18 @@
             tobackup = set()
         # Apply changes
         fp = stringio()
+        # chunks are serialized per file, but files aren't sorted
+        for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
+            prntstatusmsg('revert', f)
         for c in chunks:
-            # Create a backup file only if this hunk should be backed up
-            if ishunk(c) and c.header.filename() in tobackup:
+            if ishunk(c):
                 abs = c.header.filename()
-                target = repo.wjoin(abs)
-                bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
-                util.copyfile(target, bakname)
-                tobackup.remove(abs)
+                # Create a backup file only if this hunk should be backed up
+                if c.header.filename() in tobackup:
+                    target = repo.wjoin(abs)
+                    bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
+                    util.copyfile(target, bakname)
+                    tobackup.remove(abs)
             c.write(fp)
         dopatch = fp.tell()
         fp.seek(0)
@@ -3136,6 +3174,7 @@
         del fp
     else:
         for f in actions['revert'][0]:
+            prntstatusmsg('revert', f)
             checkout(f)
             if normal:
                 normal(f)
@@ -3143,6 +3182,7 @@
     for f in actions['add'][0]:
         # Don't checkout modified files, they are already created by the diff
         if f not in newlyaddedandmodifiedfiles:
+            prntstatusmsg('add', f)
             checkout(f)
             repo.dirstate.add(f)
 
@@ -3150,6 +3190,7 @@
     if node == parent and p2 == nullid:
         normal = repo.dirstate.normal
     for f in actions['undelete'][0]:
+        prntstatusmsg('undelete', f)
         checkout(f)
         normal(f)
 
--- a/mercurial/color.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/color.py	Mon Oct 22 14:46:06 2018 -0400
@@ -117,6 +117,9 @@
     'formatvariant.config.default': 'green',
     'formatvariant.default': '',
     'histedit.remaining': 'red bold',
+    'ui.addremove.added': 'green',
+    'ui.addremove.removed': 'red',
+    'ui.error': 'red',
     'ui.prompt': 'yellow',
     'log.changeset': 'yellow',
     'patchbomb.finalsummary': '',
@@ -293,9 +296,9 @@
                 if valideffect(ui, e):
                     good.append(e)
                 else:
-                    ui.warn(_("ignoring unknown color/effect %r "
+                    ui.warn(_("ignoring unknown color/effect %s "
                               "(configured in color.%s)\n")
-                            % (e, status))
+                            % (stringutil.pprint(e), status))
             ui._styles[status] = ' '.join(good)
 
 def _activeeffects(ui):
@@ -405,21 +408,21 @@
     _INVALID_HANDLE_VALUE = -1
 
     class _COORD(ctypes.Structure):
-        _fields_ = [('X', ctypes.c_short),
-                    ('Y', ctypes.c_short)]
+        _fields_ = [(r'X', ctypes.c_short),
+                    (r'Y', ctypes.c_short)]
 
     class _SMALL_RECT(ctypes.Structure):
-        _fields_ = [('Left', ctypes.c_short),
-                    ('Top', ctypes.c_short),
-                    ('Right', ctypes.c_short),
-                    ('Bottom', ctypes.c_short)]
+        _fields_ = [(r'Left', ctypes.c_short),
+                    (r'Top', ctypes.c_short),
+                    (r'Right', ctypes.c_short),
+                    (r'Bottom', ctypes.c_short)]
 
     class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
-        _fields_ = [('dwSize', _COORD),
-                    ('dwCursorPosition', _COORD),
-                    ('wAttributes', _WORD),
-                    ('srWindow', _SMALL_RECT),
-                    ('dwMaximumWindowSize', _COORD)]
+        _fields_ = [(r'dwSize', _COORD),
+                    (r'dwCursorPosition', _COORD),
+                    (r'wAttributes', _WORD),
+                    (r'srWindow', _SMALL_RECT),
+                    (r'dwMaximumWindowSize', _COORD)]
 
     _STD_OUTPUT_HANDLE = 0xfffffff5 # (DWORD)-11
     _STD_ERROR_HANDLE = 0xfffffff4  # (DWORD)-12
@@ -481,7 +484,7 @@
             w32effects = None
         else:
             origattr = csbi.wAttributes
-            ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
+            ansire = re.compile(b'\033\[([^m]*)m([^\033]*)(.*)',
                                 re.MULTILINE | re.DOTALL)
 
     def win32print(ui, writefunc, *msgs, **opts):
@@ -513,15 +516,15 @@
                     # them if not found
                     pass
         # hack to ensure regexp finds data
-        if not text.startswith('\033['):
-            text = '\033[m' + text
+        if not text.startswith(b'\033['):
+            text = b'\033[m' + text
 
         # Look for ANSI-like codes embedded in text
         m = re.match(ansire, text)
 
         try:
             while m:
-                for sattr in m.group(1).split(';'):
+                for sattr in m.group(1).split(b';'):
                     if sattr:
                         attr = mapcolor(int(sattr), attr)
                 ui.flush()
--- a/mercurial/commands.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/commands.py	Mon Oct 22 14:46:06 2018 -0400
@@ -19,6 +19,8 @@
     nullid,
     nullrev,
     short,
+    wdirhex,
+    wdirrev,
 )
 from . import (
     archival,
@@ -35,6 +37,7 @@
     error,
     exchange,
     extensions,
+    filemerge,
     formatter,
     graphmod,
     hbisect,
@@ -42,6 +45,7 @@
     hg,
     logcmdutil,
     merge as mergemod,
+    narrowspec,
     obsolete,
     obsutil,
     patch,
@@ -127,10 +131,11 @@
 
 # Commands start here, listed alphabetically
 
-@command('^add',
+@command('add',
     walkopts + subrepoopts + dryrunopts,
     _('[OPTION]... [FILE]...'),
-    inferrepo=True)
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+    helpbasic=True, inferrepo=True)
 def add(ui, repo, *pats, **opts):
     """add the specified files on the next commit
 
@@ -181,6 +186,7 @@
 @command('addremove',
     similarityopts + subrepoopts + walkopts + dryrunopts,
     _('[OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
     inferrepo=True)
 def addremove(ui, repo, *pats, **opts):
     """add all new files, delete all missing files
@@ -250,7 +256,7 @@
     matcher = scmutil.match(repo[None], pats, opts)
     return scmutil.addremove(repo, matcher, "", opts)
 
-@command('^annotate|blame',
+@command('annotate|blame',
     [('r', 'rev', '', _('annotate the specified revision'), _('REV')),
     ('', 'follow', None,
      _('follow copies/renames and list the filename (DEPRECATED)')),
@@ -265,7 +271,8 @@
     ('', 'skip', [], _('revision to not display (EXPERIMENTAL)'), _('REV')),
     ] + diffwsopts + walkopts + formatteropts,
     _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
-    inferrepo=True)
+    helpcategory=command.CATEGORY_FILE_CONTENTS,
+    helpbasic=True, inferrepo=True)
 def annotate(ui, repo, *pats, **opts):
     """show changeset information by line for each file
 
@@ -283,6 +290,25 @@
     anyway, although the results will probably be neither useful
     nor desirable.
 
+    .. container:: verbose
+
+      Template:
+
+      The following keywords are supported in addition to the common template
+      keywords and functions. See also :hg:`help templates`.
+
+      :lines:   List of lines with annotation data.
+      :path:    String. Repository-absolute path of the specified file.
+
+      And each entry of ``{lines}`` provides the following sub-keywords in
+      addition to ``{date}``, ``{node}``, ``{rev}``, ``{user}``, etc.
+
+      :line:    String. Line content.
+      :lineno:  Integer. Line number at that revision.
+      :path:    String. Repository-absolute path of the file at that revision.
+
+      See :hg:`help templates.operators` for the list expansion syntax.
+
     Returns 0 on success.
     """
     opts = pycompat.byteskwargs(opts)
@@ -300,46 +326,47 @@
     ctx = scmutil.revsingle(repo, rev)
 
     rootfm = ui.formatter('annotate', opts)
+    if ui.debugflag:
+        shorthex = pycompat.identity
+    else:
+        def shorthex(h):
+            return h[:12]
     if ui.quiet:
         datefunc = dateutil.shortdate
     else:
         datefunc = dateutil.datestr
     if ctx.rev() is None:
-        def hexfn(node):
-            if node is None:
-                return None
-            else:
-                return rootfm.hexfunc(node)
         if opts.get('changeset'):
             # omit "+" suffix which is appended to node hex
             def formatrev(rev):
-                if rev is None:
+                if rev == wdirrev:
                     return '%d' % ctx.p1().rev()
                 else:
                     return '%d' % rev
         else:
             def formatrev(rev):
-                if rev is None:
+                if rev == wdirrev:
                     return '%d+' % ctx.p1().rev()
                 else:
                     return '%d ' % rev
-        def formathex(hex):
-            if hex is None:
-                return '%s+' % rootfm.hexfunc(ctx.p1().node())
+        def formathex(h):
+            if h == wdirhex:
+                return '%s+' % shorthex(hex(ctx.p1().node()))
             else:
-                return '%s ' % hex
+                return '%s ' % shorthex(h)
     else:
-        hexfn = rootfm.hexfunc
-        formatrev = formathex = pycompat.bytestr
+        formatrev = b'%d'.__mod__
+        formathex = shorthex
 
     opmap = [('user', ' ', lambda x: x.fctx.user(), ui.shortuser),
-             ('rev', ' ', lambda x: x.fctx.rev(), formatrev),
-             ('node', ' ', lambda x: hexfn(x.fctx.node()), formathex),
+             ('rev', ' ', lambda x: scmutil.intrev(x.fctx), formatrev),
+             ('node', ' ', lambda x: hex(scmutil.binnode(x.fctx)), formathex),
              ('date', ' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)),
-             ('file', ' ', lambda x: x.fctx.path(), pycompat.bytestr),
-             ('line_number', ':', lambda x: x.lineno, pycompat.bytestr),
+             ('path', ' ', lambda x: x.fctx.path(), pycompat.bytestr),
+             ('lineno', ':', lambda x: x.lineno, pycompat.bytestr),
             ]
-    opnamemap = {'rev': 'number', 'node': 'changeset'}
+    opnamemap = {'rev': 'number', 'node': 'changeset', 'path': 'file',
+                 'lineno': 'line_number'}
 
     if (not opts.get('user') and not opts.get('changeset')
         and not opts.get('date') and not opts.get('file')):
@@ -379,7 +406,7 @@
     for abs in ctx.walk(m):
         fctx = ctx[abs]
         rootfm.startitem()
-        rootfm.data(abspath=abs, path=m.rel(abs))
+        rootfm.data(path=abs)
         if not opts.get('text') and fctx.isbinary():
             rootfm.plain(_("%s: binary file\n")
                          % ((pats and m.rel(abs)) or abs))
@@ -427,7 +454,8 @@
     ('r', 'rev', '', _('revision to distribute'), _('REV')),
     ('t', 'type', '', _('type of distribution to create'), _('TYPE')),
     ] + subrepoopts + walkopts,
-    _('[OPTION]... DEST'))
+    _('[OPTION]... DEST'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT)
 def archive(ui, repo, dest, **opts):
     '''create an unversioned archive of a repository revision
 
@@ -506,7 +534,8 @@
     ('r', 'rev', '', _('revision to backout'), _('REV')),
     ('e', 'edit', False, _('invoke editor on commit messages')),
     ] + mergetoolopts + walkopts + commitopts + commitopts2,
-    _('[OPTION]... [-r] REV'))
+    _('[OPTION]... [-r] REV'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
 def backout(ui, repo, node=None, rev=None, **opts):
     '''reverse effect of earlier changeset
 
@@ -614,7 +643,9 @@
         with dirstateguard.dirstateguard(repo, 'backout'):
             overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
             with ui.configoverride(overrides, 'backout'):
-                stats = mergemod.update(repo, parent, True, True, node, False)
+                stats = mergemod.update(repo, parent, branchmerge=True,
+                                        force=True, ancestor=node,
+                                        mergeancestor=False)
             repo.setparents(op1, op2)
         hg._showstats(repo, stats)
         if stats.unresolvedcount:
@@ -669,7 +700,8 @@
     ('e', 'extend', False, _('extend the bisect range')),
     ('c', 'command', '', _('use command to check changeset state'), _('CMD')),
     ('U', 'noupdate', False, _('do not update to target'))],
-    _("[-gbsr] [-U] [-c CMD] [REV]"))
+    _("[-gbsr] [-U] [-c CMD] [REV]"),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION)
 def bisect(ui, repo, rev=None, extra=None, command=None,
                reset=None, good=None, bad=None, skip=None, extend=None,
                noupdate=None):
@@ -900,8 +932,10 @@
     ('d', 'delete', False, _('delete a given bookmark')),
     ('m', 'rename', '', _('rename a given bookmark'), _('OLD')),
     ('i', 'inactive', False, _('mark a bookmark inactive')),
+    ('l', 'list', False, _('list existing bookmarks')),
     ] + formatteropts,
-    _('hg bookmarks [OPTIONS]... [NAME]...'))
+    _('hg bookmarks [OPTIONS]... [NAME]...'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def bookmark(ui, repo, *names, **opts):
     '''create a new bookmark or list existing bookmarks
 
@@ -920,7 +954,7 @@
     diverged, a new 'divergent bookmark' of the form 'name@path' will
     be created. Using :hg:`merge` will resolve the divergence.
 
-    Specifying bookmark as '.' to -m or -d options is equivalent to specifying
+    Specifying bookmark as '.' to -m/-d/-l options is equivalent to specifying
     the active bookmark's name.
 
     A bookmark named '@' has the special property that :hg:`clone` will
@@ -928,6 +962,14 @@
 
     .. container:: verbose
 
+      Template:
+
+      The following keywords are supported in addition to the common template
+      keywords and functions such as ``{bookmark}``. See also
+      :hg:`help templates`.
+
+      :active:  Boolean. True if the bookmark is active.
+
       Examples:
 
       - create an active bookmark for a new line of development::
@@ -949,45 +991,63 @@
       - move the '@' bookmark from another branch::
 
           hg book -f @
+
+      - print only the active bookmark name::
+
+          hg book -ql .
     '''
-    force = opts.get(r'force')
-    rev = opts.get(r'rev')
-    delete = opts.get(r'delete')
-    rename = opts.get(r'rename')
-    inactive = opts.get(r'inactive')
-
-    if delete and rename:
-        raise error.Abort(_("--delete and --rename are incompatible"))
-    if delete and rev:
-        raise error.Abort(_("--rev is incompatible with --delete"))
-    if rename and rev:
-        raise error.Abort(_("--rev is incompatible with --rename"))
-    if not names and (delete or rev):
+    opts = pycompat.byteskwargs(opts)
+    force = opts.get('force')
+    rev = opts.get('rev')
+    inactive = opts.get('inactive')  # meaning add/rename to inactive bookmark
+
+    selactions = [k for k in ['delete', 'rename', 'list'] if opts.get(k)]
+    if len(selactions) > 1:
+        raise error.Abort(_('--%s and --%s are incompatible')
+                          % tuple(selactions[:2]))
+    if selactions:
+        action = selactions[0]
+    elif names or rev:
+        action = 'add'
+    elif inactive:
+        action = 'inactive'  # meaning deactivate
+    else:
+        action = 'list'
+
+    if rev and action in {'delete', 'rename', 'list'}:
+        raise error.Abort(_("--rev is incompatible with --%s") % action)
+    if inactive and action in {'delete', 'list'}:
+        raise error.Abort(_("--inactive is incompatible with --%s") % action)
+    if not names and action in {'add', 'delete'}:
         raise error.Abort(_("bookmark name required"))
 
-    if delete or rename or names or inactive:
+    if action in {'add', 'delete', 'rename', 'inactive'}:
         with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
-            if delete:
+            if action == 'delete':
                 names = pycompat.maplist(repo._bookmarks.expandname, names)
                 bookmarks.delete(repo, tr, names)
-            elif rename:
+            elif action == 'rename':
                 if not names:
                     raise error.Abort(_("new bookmark name required"))
                 elif len(names) > 1:
                     raise error.Abort(_("only one new bookmark name allowed"))
-                rename = repo._bookmarks.expandname(rename)
-                bookmarks.rename(repo, tr, rename, names[0], force, inactive)
-            elif names:
+                oldname = repo._bookmarks.expandname(opts['rename'])
+                bookmarks.rename(repo, tr, oldname, names[0], force, inactive)
+            elif action == 'add':
                 bookmarks.addbookmarks(repo, tr, names, rev, force, inactive)
-            elif inactive:
+            elif action == 'inactive':
                 if len(repo._bookmarks) == 0:
                     ui.status(_("no bookmarks set\n"))
                 elif not repo._activebookmark:
                     ui.status(_("no active bookmark\n"))
                 else:
                     bookmarks.deactivate(repo)
-    else: # show bookmarks
-        bookmarks.printbookmarks(ui, repo, **opts)
+    elif action == 'list':
+        names = pycompat.maplist(repo._bookmarks.expandname, names)
+        with ui.formatter('bookmarks', opts) as fm:
+            bookmarks.printbookmarks(ui, repo, fm, names)
+    else:
+        raise error.ProgrammingError('invalid action: %s' % action)
 
 @command('branch',
     [('f', 'force', None,
@@ -995,7 +1055,8 @@
      ('C', 'clean', None, _('reset branch name to parent branch name')),
      ('r', 'rev', [], _('change branches of the given revs (EXPERIMENTAL)')),
     ],
-    _('[-fC] [NAME]'))
+    _('[-fC] [NAME]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def branch(ui, repo, label=None, **opts):
     """set or show the current branch name
 
@@ -1070,6 +1131,7 @@
      ('c', 'closed', False, _('show normal and closed branches')),
     ] + formatteropts,
     _('[-c]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
     intents={INTENT_READONLY})
 def branches(ui, repo, active=False, closed=False, **opts):
     """list repository named branches
@@ -1080,6 +1142,18 @@
 
     Use the command :hg:`update` to switch to an existing branch.
 
+    .. container:: verbose
+
+      Template:
+
+      The following keywords are supported in addition to the common template
+      keywords and functions such as ``{branch}``. See also
+      :hg:`help templates`.
+
+      :active:  Boolean. True if the branch is active.
+      :closed:  Boolean. True if the branch is closed.
+      :current: Boolean. True if it is the current branch.
+
     Returns 0.
     """
 
@@ -1143,7 +1217,8 @@
     ('a', 'all', None, _('bundle all changesets in the repository')),
     ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
     ] + remoteopts,
-    _('[-f] [-t BUNDLESPEC] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
+    _('[-f] [-t BUNDLESPEC] [-a] [-r REV]... [--base REV]... FILE [DEST]'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT)
 def bundle(ui, repo, fname, dest=None, **opts):
     """create a bundle file
 
@@ -1200,7 +1275,7 @@
                                 "a destination"))
         if opts.get('base'):
             ui.warn(_("ignoring --base because --all was specified\n"))
-        base = ['null']
+        base = [nullrev]
     else:
         base = scmutil.revrange(repo, opts.get('base'))
     if cgversion not in changegroup.supportedoutgoingversions(repo):
@@ -1270,6 +1345,7 @@
     ('', 'decode', None, _('apply any matching decode filter')),
     ] + walkopts + formatteropts,
     _('[OPTION]... FILE...'),
+    helpcategory=command.CATEGORY_FILE_CONTENTS,
     inferrepo=True,
     intents={INTENT_READONLY})
 def cat(ui, repo, file1, *pats, **opts):
@@ -1294,6 +1370,16 @@
     :``%b``: basename of the exporting repository
     :``\\``: literal "\\" character
 
+    .. container:: verbose
+
+      Template:
+
+      The following keywords are supported in addition to the common template
+      keywords and functions. See also :hg:`help templates`.
+
+      :data:    String. File content.
+      :path:    String. Repository-absolute path of the file.
+
     Returns 0 on success.
     """
     opts = pycompat.byteskwargs(opts)
@@ -1315,7 +1401,7 @@
         return cmdutil.cat(ui, repo, ctx, m, fm, fntemplate, '',
                            **pycompat.strkwargs(opts))
 
-@command('^clone',
+@command('clone',
     [('U', 'noupdate', None, _('the clone will include an empty working '
                                'directory (only a repository)')),
     ('u', 'updaterev', '', _('revision, tag, or branch to check out'),
@@ -1331,7 +1417,8 @@
        _('clone with minimal data processing')),
     ] + remoteopts,
     _('[OPTION]... SOURCE [DEST]'),
-    norepo=True)
+    helpcategory=command.CATEGORY_REPO_CREATION,
+    helpbasic=True, norepo=True)
 def clone(ui, source, dest=None, **opts):
     """make a copy of an existing repository
 
@@ -1444,17 +1531,34 @@
     if opts.get('noupdate') and opts.get('updaterev'):
         raise error.Abort(_("cannot specify both --noupdate and --updaterev"))
 
+    # --include/--exclude can come from narrow or sparse.
+    includepats, excludepats = None, None
+
+    # hg.clone() differentiates between None and an empty set. So make sure
+    # patterns are sets if narrow is requested without patterns.
+    if opts.get('narrow'):
+        includepats = set()
+        excludepats = set()
+
+        if opts.get('include'):
+            includepats = narrowspec.parsepatterns(opts.get('include'))
+        if opts.get('exclude'):
+            excludepats = narrowspec.parsepatterns(opts.get('exclude'))
+
     r = hg.clone(ui, opts, source, dest,
                  pull=opts.get('pull'),
                  stream=opts.get('stream') or opts.get('uncompressed'),
                  revs=opts.get('rev'),
                  update=opts.get('updaterev') or not opts.get('noupdate'),
                  branch=opts.get('branch'),
-                 shareopts=opts.get('shareopts'))
+                 shareopts=opts.get('shareopts'),
+                 storeincludepats=includepats,
+                 storeexcludepats=excludepats,
+                 depth=opts.get('depth') or None)
 
     return r is None
 
-@command('^commit|ci',
+@command('commit|ci',
     [('A', 'addremove', None,
      _('mark new/missing files as added/removed before committing')),
     ('', 'close-branch', None,
@@ -1465,6 +1569,7 @@
     ('i', 'interactive', None, _('use interactive mode')),
     ] + walkopts + commitopts + commitopts2 + subrepoopts,
     _('[OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
     inferrepo=True)
 def commit(ui, repo, *pats, **opts):
     """commit the specified files or all outstanding changes
@@ -1617,6 +1722,7 @@
      ('l', 'local', None, _('edit repository config')),
      ('g', 'global', None, _('edit global config'))] + formatteropts,
     _('[-u] [NAME]...'),
+    helpcategory=command.CATEGORY_HELP,
     optionalrepo=True,
     intents={INTENT_READONLY})
 def config(ui, repo, *values, **opts):
@@ -1639,6 +1745,16 @@
 
     See :hg:`help config` for more information about config files.
 
+    .. container:: verbose
+
+      Template:
+
+      The following keywords are supported. See also :hg:`help templates`.
+
+      :name:    String. Config name.
+      :source:  String. Filename and line number where the item is defined.
+      :value:   String. Config value.
+
     Returns 0 on success, 1 if NAME does not exist.
 
     """
@@ -1725,7 +1841,8 @@
     [('A', 'after', None, _('record a copy that has already occurred')),
     ('f', 'force', None, _('forcibly copy over an existing managed file')),
     ] + walkopts + dryrunopts,
-    _('[OPTION]... [SOURCE]... DEST'))
+    _('[OPTION]... [SOURCE]... DEST'),
+    helpcategory=command.CATEGORY_FILE_CONTENTS)
 def copy(ui, repo, *pats, **opts):
     """mark files as copied for the next commit
 
@@ -1746,17 +1863,21 @@
     with repo.wlock(False):
         return cmdutil.copy(ui, repo, pats, opts)
 
-@command('debugcommands', [], _('[COMMAND]'), norepo=True)
+@command(
+    'debugcommands', [], _('[COMMAND]'),
+    helpcategory=command.CATEGORY_HELP,
+    norepo=True)
 def debugcommands(ui, cmd='', *args):
     """list all available commands and options"""
     for cmd, vals in sorted(table.iteritems()):
-        cmd = cmd.split('|')[0].strip('^')
+        cmd = cmd.split('|')[0]
         opts = ', '.join([i[1] for i in vals[1]])
         ui.write('%s: %s\n' % (cmd, opts))
 
 @command('debugcomplete',
     [('o', 'options', None, _('show the command options'))],
     _('[-o] CMD'),
+    helpcategory=command.CATEGORY_HELP,
     norepo=True)
 def debugcomplete(ui, cmd='', **opts):
     """returns the completion list associated with the given command"""
@@ -1782,13 +1903,13 @@
         cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
     ui.write("%s\n" % "\n".join(sorted(cmdlist)))
 
-@command('^diff',
+@command('diff',
     [('r', 'rev', [], _('revision'), _('REV')),
     ('c', 'change', '', _('change made by revision'), _('REV'))
     ] + diffopts + diffopts2 + walkopts + subrepoopts,
     _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'),
-    inferrepo=True,
-    intents={INTENT_READONLY})
+    helpcategory=command.CATEGORY_FILE_CONTENTS,
+    helpbasic=True, inferrepo=True, intents={INTENT_READONLY})
 def diff(ui, repo, *pats, **opts):
     """diff repository (or selected files)
 
@@ -1870,21 +1991,23 @@
 
     diffopts = patch.diffallopts(ui, opts)
     m = scmutil.match(ctx2, pats, opts)
+    m = repo.narrowmatch(m)
     ui.pager('diff')
     logcmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
                               listsubrepos=opts.get('subrepos'),
                               root=opts.get('root'))
 
-@command('^export',
+@command('export',
     [('B', 'bookmark', '',
-     _('export changes only reachable by given bookmark')),
+     _('export changes only reachable by given bookmark'), _('BOOKMARK')),
     ('o', 'output', '',
      _('print output to file with formatted name'), _('FORMAT')),
     ('', 'switch-parent', None, _('diff against the second parent')),
     ('r', 'rev', [], _('revisions to export'), _('REV')),
     ] + diffopts + formatteropts,
     _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'),
-    intents={INTENT_READONLY})
+    helpcategory=command.CATEGORY_IMPORT_EXPORT,
+    helpbasic=True, intents={INTENT_READONLY})
 def export(ui, repo, *changesets, **opts):
     """dump the header and diffs for one or more changesets
 
@@ -1932,6 +2055,14 @@
 
     .. container:: verbose
 
+      Template:
+
+      The following keywords are supported in addition to the common template
+      keywords and functions. See also :hg:`help templates`.
+
+      :diff:    String. Diff content.
+      :parents: List of strings. Parent nodes of the changeset.
+
       Examples:
 
       - use export and import to transplant a bugfix to the current
@@ -1996,6 +2127,7 @@
      ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
     ] + walkopts + formatteropts + subrepoopts,
     _('[OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
     intents={INTENT_READONLY})
 def files(ui, repo, *pats, **opts):
     """list tracked files
@@ -2009,6 +2141,15 @@
 
     .. container:: verbose
 
+      Template:
+
+      The following keywords are supported in addition to the common template
+      keywords and functions. See also :hg:`help templates`.
+
+      :flags:   String. Character denoting file's symlink and executable bits.
+      :path:    String. Repository-absolute path of the file.
+      :size:    Integer. Size of the file in bytes.
+
       Examples:
 
       - list all files under the current directory::
@@ -2059,10 +2200,12 @@
         return cmdutil.files(ui, ctx, m, fm, fmt, opts.get('subrepos'))
 
 @command(
-    '^forget',
+    'forget',
     [('i', 'interactive', None, _('use interactive mode')),
     ] + walkopts + dryrunopts,
-    _('[OPTION]... FILE...'), inferrepo=True)
+    _('[OPTION]... FILE...'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+    helpbasic=True, inferrepo=True)
 def forget(ui, repo, *pats, **opts):
     """forget the specified files on the next commit
 
@@ -2117,9 +2260,10 @@
      ('D', 'currentdate', False,
       _('record the current date as commit date')),
      ('U', 'currentuser', False,
-      _('record the current user as committer'), _('DATE'))]
+      _('record the current user as committer'))]
     + commitopts2 + mergetoolopts  + dryrunopts,
-    _('[OPTION]... [-r REV]... REV...'))
+    _('[OPTION]... [-r REV]... REV...'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
 def graft(ui, repo, *revs, **opts):
     '''copy changes from other branches onto the current branch
 
@@ -2507,6 +2651,7 @@
     ('d', 'date', None, _('list the date (short with -q)')),
     ] + formatteropts + walkopts,
     _('[OPTION]... PATTERN [FILE]...'),
+    helpcategory=command.CATEGORY_FILE_CONTENTS,
     inferrepo=True,
     intents={INTENT_READONLY})
 def grep(ui, repo, pattern, *pats, **opts):
@@ -2528,10 +2673,31 @@
     the repository are searched, including those that don't exist in the
     current branch or have been deleted in a prior changeset.
 
+    .. container:: verbose
+
+      Template:
+
+      The following keywords are supported in addition to the common template
+      keywords and functions. See also :hg:`help templates`.
+
+      :change:  String. Character denoting insertion ``+`` or removal ``-``.
+                Available if ``--diff`` is specified.
+      :lineno:  Integer. Line number of the match.
+      :path:    String. Repository-absolute path of the file.
+      :texts:   List of text chunks.
+
+      And each entry of ``{texts}`` provides the following sub-keywords.
+
+      :matched: Boolean. True if the chunk matches the specified pattern.
+      :text:    String. Chunk content.
+
+      See :hg:`help templates.operators` for the list expansion syntax.
+
     Returns 0 if a match is found, 1 otherwise.
     """
     opts = pycompat.byteskwargs(opts)
     diff = opts.get('all') or opts.get('diff')
+    all_files = opts.get('all_files')
     if diff and opts.get('all_files'):
         raise error.Abort(_('--diff and --all-files are mutually exclusive'))
     # TODO: remove "not opts.get('rev')" if --all-files -rMULTIREV gets working
@@ -2606,16 +2772,16 @@
     def difflinestates(a, b):
         sm = difflib.SequenceMatcher(None, a, b)
         for tag, alo, ahi, blo, bhi in sm.get_opcodes():
-            if tag == 'insert':
-                for i in xrange(blo, bhi):
+            if tag == r'insert':
+                for i in pycompat.xrange(blo, bhi):
                     yield ('+', b[i])
-            elif tag == 'delete':
-                for i in xrange(alo, ahi):
+            elif tag == r'delete':
+                for i in pycompat.xrange(alo, ahi):
                     yield ('-', a[i])
-            elif tag == 'replace':
-                for i in xrange(alo, ahi):
+            elif tag == r'replace':
+                for i in pycompat.xrange(alo, ahi):
                     yield ('-', a[i])
-                for i in xrange(blo, bhi):
+                for i in pycompat.xrange(blo, bhi):
                     yield ('+', b[i])
 
     def display(fm, fn, ctx, pstates, states):
@@ -2623,7 +2789,7 @@
         if fm.isplain():
             formatuser = ui.shortuser
         else:
-            formatuser = str
+            formatuser = pycompat.bytestr
         if ui.quiet:
             datefmt = '%Y-%m-%d'
         else:
@@ -2637,7 +2803,7 @@
             except error.WdirUnsupported:
                 return ctx[fn].isbinary()
 
-        fieldnamemap = {'filename': 'file', 'linenumber': 'line_number'}
+        fieldnamemap = {'filename': 'path', 'linenumber': 'lineno'}
         if diff:
             iter = difflinestates(pstates, states)
         else:
@@ -2648,20 +2814,22 @@
             fm.data(node=fm.hexfunc(scmutil.binnode(ctx)))
 
             cols = [
-                ('filename', fn, True),
-                ('rev', rev, not plaingrep),
-                ('linenumber', l.linenum, opts.get('line_number')),
+                ('filename', '%s', fn, True),
+                ('rev', '%d', rev, not plaingrep),
+                ('linenumber', '%d', l.linenum, opts.get('line_number')),
             ]
             if diff:
-                cols.append(('change', change, True))
+                cols.append(('change', '%s', change, True))
             cols.extend([
-                ('user', formatuser(ctx.user()), opts.get('user')),
-                ('date', fm.formatdate(ctx.date(), datefmt), opts.get('date')),
+                ('user', '%s', formatuser(ctx.user()), opts.get('user')),
+                ('date', '%s', fm.formatdate(ctx.date(), datefmt),
+                 opts.get('date')),
             ])
-            lastcol = next(name for name, data, cond in reversed(cols) if cond)
-            for name, data, cond in cols:
+            lastcol = next(
+                name for name, fmt, data, cond in reversed(cols) if cond)
+            for name, fmt, data, cond in cols:
                 field = fieldnamemap.get(name, name)
-                fm.condwrite(cond, field, '%s', data, label='grep.%s' % name)
+                fm.condwrite(cond, field, fmt, data, label='grep.%s' % name)
                 if cond and name != lastcol:
                     fm.plain(sep, label='grep.sep')
             if not opts.get('files_with_matches'):
@@ -2756,7 +2924,7 @@
             if pstates or states:
                 r = display(fm, fn, ctx, pstates, states)
                 found = found or r
-                if r and not diff:
+                if r and not diff and not all_files:
                     skip[fn] = True
                     if copy:
                         skip[copy] = True
@@ -2777,6 +2945,7 @@
     ('c', 'closed', False, _('show normal and closed branch heads')),
     ] + templateopts,
     _('[-ct] [-r STARTREV] [REV]...'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
     intents={INTENT_READONLY})
 def heads(ui, repo, *branchrevs, **opts):
     """show branch heads
@@ -2850,9 +3019,11 @@
     [('e', 'extension', None, _('show only help for extensions')),
      ('c', 'command', None, _('show only help for commands')),
      ('k', 'keyword', None, _('show topics matching keyword')),
-     ('s', 'system', [], _('show help for specific platform(s)')),
+     ('s', 'system', [],
+      _('show help for specific platform(s)'), _('PLATFORM')),
      ],
-    _('[-ecks] [TOPIC]'),
+    _('[-eck] [-s PLATFORM] [TOPIC]'),
+    helpcategory=command.CATEGORY_HELP,
     norepo=True,
     intents={INTENT_READONLY})
 def help_(ui, name=None, **opts):
@@ -2896,6 +3067,7 @@
     ('B', 'bookmarks', None, _('show bookmarks')),
     ] + remoteopts + formatteropts,
     _('[-nibtB] [-r REV] [SOURCE]'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
     optionalrepo=True,
     intents={INTENT_READONLY})
 def identify(ui, repo, source=None, rev=None,
@@ -2916,6 +3088,16 @@
 
     .. container:: verbose
 
+      Template:
+
+      The following keywords are supported in addition to the common template
+      keywords and functions. See also :hg:`help templates`.
+
+      :dirty:   String. Character ``+`` denoting if the working directory has
+                uncommitted changes.
+      :id:      String. One or two nodes, optionally followed by ``+``.
+      :parents: List of strings. Parent nodes of the changeset.
+
       Examples:
 
       - generate a build identifier for the working directory::
@@ -2941,10 +3123,6 @@
         raise error.Abort(_("there is no Mercurial repository here "
                            "(.hg not found)"))
 
-    if ui.debugflag:
-        hexfunc = hex
-    else:
-        hexfunc = short
     default = not (num or id or branch or tags or bookmarks)
     output = []
     revs = []
@@ -2968,11 +3146,12 @@
             rev = "tip"
 
         remoterev = peer.lookup(rev)
-        hexrev = hexfunc(remoterev)
+        hexrev = fm.hexfunc(remoterev)
         if default or id:
             output = [hexrev]
         fm.data(id=hexrev)
 
+        @util.cachefunc
         def getbms():
             bms = []
 
@@ -2983,17 +3162,18 @@
 
             return sorted(bms)
 
-        bms = getbms()
-        if bookmarks:
-            output.extend(bms)
-        elif default and not ui.quiet:
-            # multiple bookmarks for a single parent separated by '/'
-            bm = '/'.join(bms)
-            if bm:
-                output.append(bm)
-
-        fm.data(node=hex(remoterev))
-        fm.data(bookmarks=fm.formatlist(bms, name='bookmark'))
+        if fm.isplain():
+            if bookmarks:
+                output.extend(getbms())
+            elif default and not ui.quiet:
+                # multiple bookmarks for a single parent separated by '/'
+                bm = '/'.join(getbms())
+                if bm:
+                    output.append(bm)
+        else:
+            fm.data(node=hex(remoterev))
+            if bookmarks or 'bookmarks' in fm.datahint():
+                fm.data(bookmarks=fm.formatlist(getbms(), name='bookmark'))
     else:
         if rev:
             repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
@@ -3011,7 +3191,7 @@
                 dirty = '+'
             fm.data(dirty=dirty)
 
-            hexoutput = [hexfunc(p.node()) for p in parents]
+            hexoutput = [fm.hexfunc(p.node()) for p in parents]
             if default or id:
                 output = ["%s%s" % ('+'.join(hexoutput), dirty)]
             fm.data(id="%s%s" % ('+'.join(hexoutput), dirty))
@@ -3020,15 +3200,10 @@
                 numoutput = ["%d" % p.rev() for p in parents]
                 output.append("%s%s" % ('+'.join(numoutput), dirty))
 
-            fn = fm.nested('parents', tmpl='{rev}:{node|formatnode}', sep=' ')
-            for p in parents:
-                fn.startitem()
-                fn.data(rev=p.rev())
-                fn.data(node=p.hex())
-                fn.context(ctx=p)
-            fn.end()
+            fm.data(parents=fm.formatlist([fm.hexfunc(p.node())
+                                           for p in parents], name='node'))
         else:
-            hexoutput = hexfunc(ctx.node())
+            hexoutput = fm.hexfunc(ctx.node())
             if default or id:
                 output = [hexoutput]
             fm.data(id=hexoutput)
@@ -3091,7 +3266,8 @@
     ('', 'import-branch', None,
      _('use any branch information in patch (implied by --exact)'))] +
     commitopts + commitopts2 + similarityopts,
-    _('[OPTION]... PATCH...'))
+    _('[OPTION]... PATCH...'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT)
 def import_(ui, repo, patch1=None, *patches, **opts):
     """import an ordered set of patches
 
@@ -3288,7 +3464,8 @@
     ('b', 'branch', [],
      _('a specific branch you would like to pull'), _('BRANCH')),
     ] + logopts + remoteopts + subrepoopts,
-    _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'))
+    _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'),
+    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT)
 def incoming(ui, repo, source="default", **opts):
     """show new changesets found in source
 
@@ -3375,8 +3552,9 @@
         del repo._subtoppath
 
 
-@command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'),
-         norepo=True)
+@command('init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'),
+         helpcategory=command.CATEGORY_REPO_CREATION,
+         helpbasic=True, norepo=True)
 def init(ui, dest=".", **opts):
     """create a new repository in the given directory
 
@@ -3398,7 +3576,8 @@
     ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
     ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
     ] + walkopts,
-    _('[OPTION]... [PATTERN]...'))
+    _('[OPTION]... [PATTERN]...'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY)
 def locate(ui, repo, *pats, **opts):
     """locate files matching specific patterns (DEPRECATED)
 
@@ -3448,7 +3627,7 @@
 
     return ret
 
-@command('^log|history',
+@command('log|history',
     [('f', 'follow', None,
      _('follow changeset history, or file history across copies and renames')),
     ('', 'follow-first', None,
@@ -3473,7 +3652,8 @@
      _('do not display revision or any of its ancestors'), _('REV')),
     ] + logopts + walkopts,
     _('[OPTION]... [FILE]'),
-    inferrepo=True,
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
+    helpbasic=True, inferrepo=True,
     intents={INTENT_READONLY})
 def log(ui, repo, *pats, **opts):
     """show revision history of entire repository or files
@@ -3643,6 +3823,7 @@
      ('', 'all', False, _("list files from all revisions"))]
          + formatteropts,
     _('[-r REV]'),
+    helpcategory=command.CATEGORY_MAINTENANCE,
     intents={INTENT_READONLY})
 def manifest(ui, repo, node=None, rev=None, **opts):
     """output the current or given revision of the project manifest
@@ -3700,7 +3881,7 @@
         fm.write('path', '%s\n', f)
     fm.end()
 
-@command('^merge',
+@command('merge',
     [('f', 'force', None,
       _('force a merge including outstanding changes (DEPRECATED)')),
     ('r', 'rev', '', _('revision to merge'), _('REV')),
@@ -3708,7 +3889,8 @@
      _('review revisions to merge (no merge is performed)')),
     ('', 'abort', None, _('abort the ongoing merge')),
      ] + mergetoolopts,
-    _('[-P] [[-r] REV]'))
+    _('[-P] [[-r] REV]'),
+    helpcategory=command.CATEGORY_CHANGE_MANAGEMENT, helpbasic=True)
 def merge(ui, repo, node=None, **opts):
     """merge another revision into working directory
 
@@ -3789,7 +3971,8 @@
     ('b', 'branch', [], _('a specific branch you would like to push'),
      _('BRANCH')),
     ] + logopts + remoteopts + subrepoopts,
-    _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'))
+    _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'),
+    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT)
 def outgoing(ui, repo, dest=None, **opts):
     """show changesets not found in the destination
 
@@ -3869,6 +4052,7 @@
     [('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
     ] + templateopts,
     _('[-r REV] [FILE]'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
     inferrepo=True)
 def parents(ui, repo, file_=None, **opts):
     """show the parents of the working directory or revision (DEPRECATED)
@@ -3925,8 +4109,9 @@
             displayer.show(repo[n])
     displayer.close()
 
-@command('paths', formatteropts, _('[NAME]'), optionalrepo=True,
-    intents={INTENT_READONLY})
+@command('paths', formatteropts, _('[NAME]'),
+    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
+    optionalrepo=True, intents={INTENT_READONLY})
 def paths(ui, repo, search=None, **opts):
     """show aliases for remote repositories
 
@@ -3956,6 +4141,16 @@
 
     See :hg:`help urls` for more information.
 
+    .. container:: verbose
+
+      Template:
+
+      The following keywords are supported. See also :hg:`help templates`.
+
+      :name:    String. Symbolic name of the path alias.
+      :pushurl: String. URL for push operations.
+      :url:     String. URL or directory path for the other operations.
+
     Returns 0 on success.
     """
 
@@ -4004,7 +4199,8 @@
      ('f', 'force', False, _('allow to move boundary backward')),
      ('r', 'rev', [], _('target revision'), _('REV')),
     ],
-    _('[-p|-d|-s] [-f] [-r] [REV...]'))
+    _('[-p|-d|-s] [-f] [-r] [REV...]'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def phase(ui, repo, *revs, **opts):
     """set or show the current phase name
 
@@ -4026,7 +4222,7 @@
     # search for a unique phase argument
     targetphase = None
     for idx, name in enumerate(phases.phasenames):
-        if opts[name]:
+        if opts.get(name, False):
             if targetphase is not None:
                 raise error.Abort(_('only one phase can be specified'))
             targetphase = idx
@@ -4112,7 +4308,7 @@
     elif not ui.configbool('commands', 'update.requiredest'):
         ui.status(_("(run 'hg update' to get a working copy)\n"))
 
-@command('^pull',
+@command('pull',
     [('u', 'update', None,
      _('update to new branch head if new descendants were pulled')),
     ('f', 'force', None, _('run even when remote repository is unrelated')),
@@ -4121,7 +4317,9 @@
     ('b', 'branch', [], _('a specific branch you would like to pull'),
      _('BRANCH')),
     ] + remoteopts,
-    _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'))
+    _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'),
+    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
+    helpbasic=True)
 def pull(ui, repo, source="default", **opts):
     """pull changes from the specified source
 
@@ -4241,7 +4439,7 @@
         other.close()
     return ret
 
-@command('^push',
+@command('push',
     [('f', 'force', None, _('force push')),
     ('r', 'rev', [],
      _('a changeset intended to be included in the destination'),
@@ -4252,7 +4450,9 @@
     ('', 'new-branch', False, _('allow pushing a new branch')),
     ('', 'pushvars', [], _('variables that can be sent to server (ADVANCED)')),
     ] + remoteopts,
-    _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'))
+    _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'),
+    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
+    helpbasic=True)
 def push(ui, repo, dest=None, **opts):
     """push changes to the specified destination
 
@@ -4378,7 +4578,7 @@
 
     return result
 
-@command('recover', [])
+@command('recover', [], helpcategory=command.CATEGORY_MAINTENANCE)
 def recover(ui, repo):
     """roll back an interrupted transaction
 
@@ -4394,13 +4594,14 @@
         return hg.verify(repo)
     return 1
 
-@command('^remove|rm',
+@command('remove|rm',
     [('A', 'after', None, _('record delete for missing files')),
     ('f', 'force', None,
      _('forget added files, delete modified files')),
     ] + subrepoopts + walkopts + dryrunopts,
     _('[OPTION]... FILE...'),
-    inferrepo=True)
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+    helpbasic=True, inferrepo=True)
 def remove(ui, repo, *pats, **opts):
     """remove the specified files on the next commit
 
@@ -4455,7 +4656,8 @@
     [('A', 'after', None, _('record a rename that has already occurred')),
     ('f', 'force', None, _('forcibly copy over an existing managed file')),
     ] + walkopts + dryrunopts,
-    _('[OPTION]... SOURCE... DEST'))
+    _('[OPTION]... SOURCE... DEST'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY)
 def rename(ui, repo, *pats, **opts):
     """rename files; equivalent of copy + remove
 
@@ -4481,9 +4683,11 @@
     ('l', 'list', None, _('list state of files needing merge')),
     ('m', 'mark', None, _('mark files as resolved')),
     ('u', 'unmark', None, _('mark files as unresolved')),
-    ('n', 'no-status', None, _('hide status prefix'))]
+    ('n', 'no-status', None, _('hide status prefix')),
+    ('', 're-merge', None, _('re-merge files'))]
     + mergetoolopts + walkopts + formatteropts,
     _('[OPTION]... [FILE]...'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
     inferrepo=True)
 def resolve(ui, repo, *pats, **opts):
     """redo merges or set/view the merge status of files
@@ -4498,9 +4702,9 @@
 
     The resolve command can be used in the following ways:
 
-    - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified
-      files, discarding any previous merge attempts. Re-merging is not
-      performed for files already marked as resolved. Use ``--all/-a``
+    - :hg:`resolve [--re-merge] [--tool TOOL] FILE...`: attempt to re-merge
+      the specified files, discarding any previous merge attempts. Re-merging
+      is not performed for files already marked as resolved. Use ``--all/-a``
       to select all unresolved files. ``--tool`` can be used to specify
       the merge tool used for the given files. It overrides the HGMERGE
       environment variable and your configuration files.  Previous file
@@ -4524,22 +4728,52 @@
        conflicts. You must use :hg:`resolve -m ...` before you can
        commit after a conflicting merge.
 
+    .. container:: verbose
+
+      Template:
+
+      The following keywords are supported in addition to the common template
+      keywords and functions. See also :hg:`help templates`.
+
+      :mergestatus: String. Character denoting merge conflicts, ``U`` or ``R``.
+      :path:    String. Repository-absolute path of the file.
+
     Returns 0 on success, 1 if any files fail a resolve attempt.
     """
 
     opts = pycompat.byteskwargs(opts)
-    flaglist = 'all mark unmark list no_status'.split()
-    all, mark, unmark, show, nostatus = \
+    confirm = ui.configbool('commands', 'resolve.confirm')
+    flaglist = 'all mark unmark list no_status re_merge'.split()
+    all, mark, unmark, show, nostatus, remerge = \
         [opts.get(o) for o in flaglist]
 
-    if (show and (mark or unmark)) or (mark and unmark):
-        raise error.Abort(_("too many options specified"))
+    actioncount = len(list(filter(None, [show, mark, unmark, remerge])))
+    if actioncount > 1:
+        raise error.Abort(_("too many actions specified"))
+    elif (actioncount == 0
+          and ui.configbool('commands', 'resolve.explicit-re-merge')):
+        hint = _('use --mark, --unmark, --list or --re-merge')
+        raise error.Abort(_('no action specified'), hint=hint)
     if pats and all:
         raise error.Abort(_("can't specify --all and patterns"))
     if not (all or pats or show or mark or unmark):
         raise error.Abort(_('no files or directories specified'),
                          hint=('use --all to re-merge all unresolved files'))
 
+    if confirm:
+        if all:
+            if ui.promptchoice(_(b're-merge all unresolved files (yn)?'
+                                 b'$$ &Yes $$ &No')):
+                raise error.Abort(_('user quit'))
+        if mark and not pats:
+            if ui.promptchoice(_(b'mark all unresolved files as resolved (yn)?'
+                                 b'$$ &Yes $$ &No')):
+                raise error.Abort(_('user quit'))
+        if unmark and not pats:
+            if ui.promptchoice(_(b'mark all resolved files as unresolved (yn)?'
+                                 b'$$ &Yes $$ &No')):
+                raise error.Abort(_('user quit'))
+
     if show:
         ui.pager('resolve')
         fm = ui.formatter('resolve', opts)
@@ -4566,7 +4800,7 @@
             label, key = mergestateinfo[ms[f]]
             fm.startitem()
             fm.context(ctx=wctx)
-            fm.condwrite(not nostatus, 'status', '%s ', key, label=label)
+            fm.condwrite(not nostatus, 'mergestatus', '%s ', key, label=label)
             fm.write('path', '%s\n', f, label=label)
         fm.end()
         return 0
@@ -4594,6 +4828,12 @@
         runconclude = False
 
         tocomplete = []
+        hasconflictmarkers = []
+        if mark:
+            markcheck = ui.config('commands', 'resolve.mark-check')
+            if markcheck not in ['warn', 'abort']:
+                # Treat all invalid / unrecognized values as 'none'.
+                markcheck = False
         for f in ms:
             if not m(f):
                 continue
@@ -4629,6 +4869,12 @@
                 continue
 
             if mark:
+                if markcheck:
+                    with repo.wvfs(f) as fobj:
+                        fdata = fobj.read()
+                    if filemerge.hasconflictmarkers(fdata) and \
+                        ms[f] != mergemod.MERGE_RECORD_RESOLVED:
+                        hasconflictmarkers.append(f)
                 ms.mark(f, mergemod.MERGE_RECORD_RESOLVED)
             elif unmark:
                 ms.mark(f, mergemod.MERGE_RECORD_UNRESOLVED)
@@ -4663,6 +4909,13 @@
                         if inst.errno != errno.ENOENT:
                             raise
 
+        if hasconflictmarkers:
+            ui.warn(_('warning: the following files still have conflict '
+                      'markers:\n  ') + '\n  '.join(hasconflictmarkers) + '\n')
+            if markcheck == 'abort' and not all:
+                raise error.Abort(_('conflict markers detected'),
+                                  hint=_('use --all to mark anyway'))
+
         for f in tocomplete:
             try:
                 # resolve file
@@ -4693,8 +4946,11 @@
                 for f in ms:
                     if not m(f):
                         continue
-                    flags = ''.join(['-%s ' % o[0:1] for o in flaglist
-                                                   if opts.get(o)])
+                    def flag(o):
+                        if o == 're_merge':
+                            return '--re-merge '
+                        return '-%s ' % o[0:1]
+                    flags = ''.join([flag(o) for o in flaglist if opts.get(o)])
                     hint = _("(try: hg resolve %s%s)\n") % (
                              flags,
                              ' '.join(pats))
@@ -4733,7 +4989,8 @@
     ('C', 'no-backup', None, _('do not save backup copies of files')),
     ('i', 'interactive', None, _('interactively select the changes')),
     ] + walkopts + dryrunopts,
-    _('[OPTION]... [-r REV] [NAME]...'))
+    _('[OPTION]... [-r REV] [NAME]...'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY)
 def revert(ui, repo, *pats, **opts):
     """restore files to their checkout state
 
@@ -4800,10 +5057,10 @@
         if node != parent:
             if dirty:
                 hint = _("uncommitted changes, use --all to discard all"
-                         " changes, or 'hg update %s' to update") % ctx.rev()
+                         " changes, or 'hg update %d' to update") % ctx.rev()
             else:
                 hint = _("use --all to revert all files,"
-                         " or 'hg update %s' to update") % ctx.rev()
+                         " or 'hg update %d' to update") % ctx.rev()
         elif dirty:
             hint = _("uncommitted changes, use --all to discard all changes")
         else:
@@ -4813,8 +5070,10 @@
     return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats,
                           **pycompat.strkwargs(opts))
 
-@command('rollback', dryrunopts +
-         [('f', 'force', False, _('ignore safety measures'))])
+@command(
+    'rollback',
+    dryrunopts + [('f', 'force', False, _('ignore safety measures'))],
+    helpcategory=command.CATEGORY_MAINTENANCE)
 def rollback(ui, repo, **opts):
     """roll back the last transaction (DANGEROUS) (DEPRECATED)
 
@@ -4866,7 +5125,9 @@
     return repo.rollback(dryrun=opts.get(r'dry_run'),
                          force=opts.get(r'force'))
 
-@command('root', [], intents={INTENT_READONLY})
+@command(
+    'root', [], intents={INTENT_READONLY},
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY)
 def root(ui, repo):
     """print the root (top) of the current working directory
 
@@ -4876,7 +5137,7 @@
     """
     ui.write(repo.root + "\n")
 
-@command('^serve',
+@command('serve',
     [('A', 'accesslog', '', _('name of access log file to write to'),
      _('FILE')),
     ('d', 'daemon', None, _('run server in background')),
@@ -4904,7 +5165,8 @@
     ('', 'print-url', None, _('start and print only the URL'))]
      + subrepoopts,
     _('[OPTION]...'),
-    optionalrepo=True)
+    helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
+    helpbasic=True, optionalrepo=True)
 def serve(ui, repo, **opts):
     """start stand-alone webserver
 
@@ -4948,7 +5210,7 @@
 
 _NOTTERSE = 'nothing'
 
-@command('^status|st',
+@command('status|st',
     [('A', 'all', None, _('show status of all files')),
     ('m', 'modified', None, _('show only modified files')),
     ('a', 'added', None, _('show only added files')),
@@ -4965,7 +5227,8 @@
     ('', 'change', '', _('list the changed files of a revision'), _('REV')),
     ] + walkopts + subrepoopts + formatteropts,
     _('[OPTION]... [FILE]...'),
-    inferrepo=True,
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+    helpbasic=True, inferrepo=True,
     intents={INTENT_READONLY})
 def status(ui, repo, *pats, **opts):
     """show changed files in the working directory
@@ -5023,6 +5286,16 @@
       ``commands.status.skipstates`` to one or more of: 'bisect', 'graft',
       'histedit', 'merge', 'rebase', or 'unshelve'.
 
+      Template:
+
+      The following keywords are supported in addition to the common template
+      keywords and functions. See also :hg:`help templates`.
+
+      :path:    String. Repository-absolute path of the file.
+      :source:  String. Repository-absolute path of the file originated from.
+                Available if ``--copies`` is specified.
+      :status:  String. Character denoting file's status.
+
       Examples:
 
       - show changes in the working directory relative to a
@@ -5128,10 +5401,12 @@
             for f in files:
                 fm.startitem()
                 fm.context(ctx=ctx2)
+                fm.data(path=f)
                 fm.condwrite(showchar, 'status', '%s ', char, label=label)
-                fm.write('path', fmt, repo.pathto(f, cwd), label=label)
+                fm.plain(fmt % repo.pathto(f, cwd), label=label)
                 if f in copy:
-                    fm.write("copy", '  %s' + end, repo.pathto(copy[f], cwd),
+                    fm.data(source=copy[f])
+                    fm.plain(('  %s' + end) % repo.pathto(copy[f], cwd),
                              label='status.copied')
 
     if ((ui.verbose or ui.configbool('commands', 'status.verbose'))
@@ -5139,9 +5414,11 @@
         cmdutil.morestatus(repo, fm)
     fm.end()
 
-@command('^summary|sum',
+@command('summary|sum',
     [('', 'remote', None, _('check for push and pull'))],
     '[--remote]',
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+    helpbasic=True,
     intents={INTENT_READONLY})
 def summary(ui, repo, **opts):
     """summarize working directory state
@@ -5432,7 +5709,8 @@
     ('e', 'edit', None, _('invoke editor on commit messages')),
     ('m', 'message', '', _('use text as commit message'), _('TEXT')),
     ] + commitopts2,
-    _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'))
+    _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'),
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
 def tag(ui, repo, name1, *names, **opts):
     """add one or more tags for the current or given revision
 
@@ -5538,7 +5816,10 @@
         tagsmod.tag(repo, names, node, message, opts.get('local'),
                     opts.get('user'), date, editor=editor)
 
-@command('tags', formatteropts, '', intents={INTENT_READONLY})
+@command(
+    'tags', formatteropts, '',
+    helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
+    intents={INTENT_READONLY})
 def tags(ui, repo, **opts):
     """list repository tags
 
@@ -5546,13 +5827,22 @@
     switch is used, a third column "local" is printed for local tags.
     When the -q/--quiet switch is used, only the tag name is printed.
 
+    .. container:: verbose
+
+      Template:
+
+      The following keywords are supported in addition to the common template
+      keywords and functions such as ``{tag}``. See also
+      :hg:`help templates`.
+
+      :type:    String. ``local`` for local tags.
+
     Returns 0 on success.
     """
 
     opts = pycompat.byteskwargs(opts)
     ui.pager('tags')
     fm = ui.formatter('tags', opts)
-    contexthint = fm.contexthint('tag rev node type')
     hexfunc = fm.hexfunc
     tagtype = ""
 
@@ -5565,8 +5855,7 @@
             tagtype = 'local'
 
         fm.startitem()
-        if 'ctx' in contexthint:
-            fm.context(ctx=repo[n])
+        fm.context(repo=repo)
         fm.write('tag', '%s', t, label=label)
         fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s'
         fm.condwrite(not ui.quiet, 'rev node', fmt,
@@ -5580,7 +5869,8 @@
     [('p', 'patch', None, _('show patch')),
     ('g', 'git', None, _('use git extended diff format')),
     ] + templateopts,
-    _('[-p] [-g]'))
+    _('[-p] [-g]'),
+    helpcategory=command.CATEGORY_CHANGE_NAVIGATION)
 def tip(ui, repo, **opts):
     """show the tip revision (DEPRECATED)
 
@@ -5605,7 +5895,8 @@
 @command('unbundle',
     [('u', 'update', None,
      _('update to new branch head if changesets were unbundled'))],
-    _('[-u] FILE...'))
+    _('[-u] FILE...'),
+    helpcategory=command.CATEGORY_IMPORT_EXPORT)
 def unbundle(ui, repo, fname1, *fnames, **opts):
     """apply one or more bundle files
 
@@ -5642,14 +5933,16 @@
 
     return postincoming(ui, repo, modheads, opts.get(r'update'), None, None)
 
-@command('^update|up|checkout|co',
+@command('update|up|checkout|co',
     [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
     ('c', 'check', None, _('require clean working directory')),
     ('m', 'merge', None, _('merge uncommitted changes')),
     ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
     ('r', 'rev', '', _('revision'), _('REV'))
      ] + mergetoolopts,
-    _('[-C|-c|-m] [-d DATE] [[-r] REV]'))
+    _('[-C|-c|-m] [-d DATE] [[-r] REV]'),
+    helpcategory=command.CATEGORY_WORKING_DIRECTORY,
+    helpbasic=True)
 def update(ui, repo, node=None, **opts):
     """update working directory (or switch revisions)
 
@@ -5760,7 +6053,7 @@
                 ui.warn("(%s)\n" % obsfatemsg)
         return ret
 
-@command('verify', [])
+@command('verify', [], helpcategory=command.CATEGORY_MAINTENANCE)
 def verify(ui, repo):
     """verify the integrity of the repository
 
@@ -5779,10 +6072,27 @@
     """
     return hg.verify(repo)
 
-@command('version', [] + formatteropts, norepo=True,
-         intents={INTENT_READONLY})
+@command(
+    'version', [] + formatteropts, helpcategory=command.CATEGORY_HELP,
+    norepo=True, intents={INTENT_READONLY})
 def version_(ui, **opts):
-    """output version and copyright information"""
+    """output version and copyright information
+
+    .. container:: verbose
+
+      Template:
+
+      The following keywords are supported. See also :hg:`help templates`.
+
+      :extensions: List of extensions.
+      :ver:     String. Version number.
+
+      And each entry of ``{extensions}`` provides the following sub-keywords
+      in addition to ``{ver}``.
+
+      :bundled: Boolean. True if included in the release.
+      :name:    String. Extension name.
+    """
     opts = pycompat.byteskwargs(opts)
     if ui.verbose:
         ui.pager('version')
@@ -5829,6 +6139,16 @@
 def loadcmdtable(ui, name, cmdtable):
     """Load command functions from specified cmdtable
     """
+    cmdtable = cmdtable.copy()
+    for cmd in list(cmdtable):
+        if not cmd.startswith('^'):
+            continue
+        ui.deprecwarn("old-style command registration '%s' in extension '%s'"
+                      % (cmd, name), '4.8')
+        entry = cmdtable.pop(cmd)
+        entry[0].helpbasic = True
+        cmdtable[cmd[1:]] = entry
+
     overrides = [cmd for cmd in cmdtable if cmd in table]
     if overrides:
         ui.warn(_("extension '%s' overrides commands: %s\n")
--- a/mercurial/commandserver.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/commandserver.py	Mon Oct 22 14:46:06 2018 -0400
@@ -26,7 +26,6 @@
 from . import (
     encoding,
     error,
-    pycompat,
     util,
 )
 from .utils import (
@@ -67,7 +66,7 @@
         self.out.flush()
 
     def __getattr__(self, attr):
-        if attr in ('isatty', 'fileno', 'tell', 'seek'):
+        if attr in (r'isatty', r'fileno', r'tell', r'seek'):
             raise AttributeError(attr)
         return getattr(self.out, attr)
 
@@ -150,8 +149,10 @@
             raise StopIteration
         return l
 
+    __next__ = next
+
     def __getattr__(self, attr):
-        if attr in ('isatty', 'fileno', 'tell', 'seek'):
+        if attr in (r'isatty', r'fileno', r'tell', r'seek'):
             raise AttributeError(attr)
         return getattr(self.in_, attr)
 
@@ -161,7 +162,7 @@
     based stream to fout.
     """
     def __init__(self, ui, repo, fin, fout):
-        self.cwd = pycompat.getcwd()
+        self.cwd = encoding.getcwd()
 
         # developer config: cmdserver.log
         logpath = ui.config("cmdserver", "log")
@@ -343,8 +344,8 @@
     random.seed()
 
 def _serverequest(ui, repo, conn, createcmdserver):
-    fin = conn.makefile('rb')
-    fout = conn.makefile('wb')
+    fin = conn.makefile(r'rb')
+    fout = conn.makefile(r'wb')
     sv = None
     try:
         sv = createcmdserver(repo, conn, fin, fout)
@@ -353,7 +354,7 @@
         # handle exceptions that may be raised by command server. most of
         # known exceptions are caught by dispatch.
         except error.Abort as inst:
-            ui.warn(_('abort: %s\n') % inst)
+            ui.error(_('abort: %s\n') % inst)
         except IOError as inst:
             if inst.errno != errno.EPIPE:
                 raise
@@ -368,7 +369,7 @@
             cerr = sv.cerr
         else:
             cerr = channeledoutput(fout, 'e')
-        traceback.print_exc(file=cerr)
+        cerr.write(encoding.strtolocal(traceback.format_exc()))
         raise
     finally:
         fin.close()
--- a/mercurial/compat.h	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/compat.h	Mon Oct 22 14:46:06 2018 -0400
@@ -3,6 +3,7 @@
 
 #ifdef _WIN32
 #ifdef _MSC_VER
+#if _MSC_VER < 1900
 /* msvc 6.0 has problems */
 #define inline __inline
 #if defined(_WIN64)
@@ -21,6 +22,18 @@
 typedef unsigned long uint32_t;
 typedef unsigned __int64 uint64_t;
 #else
+/* VC++ 14 */
+#include <stdint.h>
+
+#if defined(_WIN64)
+typedef __int64 ssize_t;
+#else
+typedef int ssize_t;
+#endif
+#endif /* _MSC_VER < 1900 */
+
+#else
+/* not msvc */
 #include <stdint.h>
 #endif
 #else
--- a/mercurial/configitems.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/configitems.py	Mon Oct 22 14:46:06 2018 -0400
@@ -161,10 +161,6 @@
 coreconfigitem('bundle', 'mainreporoot',
     default='',
 )
-# bundle.reorder: experimental config
-coreconfigitem('bundle', 'reorder',
-    default='auto',
-)
 coreconfigitem('censor', 'policy',
     default='abort',
 )
@@ -190,6 +186,15 @@
 coreconfigitem('commands', 'grep.all-files',
     default=False,
 )
+coreconfigitem('commands', 'resolve.confirm',
+    default=False,
+)
+coreconfigitem('commands', 'resolve.explicit-re-merge',
+    default=False,
+)
+coreconfigitem('commands', 'resolve.mark-check',
+    default='none',
+)
 coreconfigitem('commands', 'show.aliasprefix',
     default=list,
 )
@@ -372,6 +377,9 @@
 coreconfigitem('devel', 'warn-config-unknown',
     default=None,
 )
+coreconfigitem('devel', 'debug.copies',
+    default=False,
+)
 coreconfigitem('devel', 'debug.extensions',
     default=False,
 )
@@ -447,9 +455,6 @@
 coreconfigitem('experimental', 'bundle2.pushback',
     default=False,
 )
-coreconfigitem('experimental', 'bundle2.stream',
-    default=False,
-)
 coreconfigitem('experimental', 'bundle2lazylocking',
     default=False,
 )
@@ -533,6 +538,9 @@
 coreconfigitem('experimental', 'mmapindexthreshold',
     default=None,
 )
+coreconfigitem('experimental', 'narrow',
+    default=False,
+)
 coreconfigitem('experimental', 'nonnormalparanoidcheck',
     default=False,
 )
@@ -566,6 +574,9 @@
 coreconfigitem('experimental', 'httppeer.advertise-v2',
     default=False,
 )
+coreconfigitem('experimental', 'httppeer.v2-encoder-order',
+    default=None,
+)
 coreconfigitem('experimental', 'httppostargs',
     default=False,
 )
@@ -584,18 +595,30 @@
 coreconfigitem('experimental', 'removeemptydirs',
     default=True,
 )
+coreconfigitem('experimental', 'revisions.prefixhexnode',
+    default=False,
+)
 coreconfigitem('experimental', 'revlogv2',
     default=None,
 )
+coreconfigitem('experimental', 'revisions.disambiguatewithin',
+    default=None,
+)
+coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
+    default=50000,
+)
+coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
+    default=100000,
+)
+coreconfigitem('experimental.server', 'stream-narrow-clones',
+    default=False,
+)
 coreconfigitem('experimental', 'single-head-per-branch',
     default=False,
 )
 coreconfigitem('experimental', 'sshserver.support-v2',
     default=False,
 )
-coreconfigitem('experimental', 'spacemovesdown',
-    default=False,
-)
 coreconfigitem('experimental', 'sparse-read',
     default=False,
 )
@@ -650,7 +673,7 @@
     default=None,
 )
 coreconfigitem('format', 'maxchainlen',
-    default=None,
+    default=dynamicdefault,
 )
 coreconfigitem('format', 'obsstore-version',
     default=None,
@@ -667,6 +690,9 @@
 coreconfigitem('format', 'usestore',
     default=True,
 )
+coreconfigitem('format', 'internal-phase',
+    default=False,
+)
 coreconfigitem('fsmonitor', 'warn_when_unused',
     default=True,
 )
@@ -726,6 +752,11 @@
 coreconfigitem('http_proxy', 'user',
     default=None,
 )
+
+coreconfigitem('http', 'timeout',
+    default=None,
+)
+
 coreconfigitem('logtoprocess', 'commandexception',
     default=None,
 )
@@ -759,6 +790,9 @@
 coreconfigitem('merge', 'preferancestor',
         default=lambda: ['*'],
 )
+coreconfigitem('merge', 'strict-capability-check',
+    default=False,
+)
 coreconfigitem('merge-tools', '.*',
     default=None,
     generic=True,
@@ -889,7 +923,7 @@
     default='hotpath',
 )
 coreconfigitem('profiling', 'time-track',
-    default='cpu',
+    default='real',
 )
 coreconfigitem('profiling', 'type',
     default='stat',
@@ -927,6 +961,9 @@
 coreconfigitem('push', 'pushvars.server',
     default=False,
 )
+coreconfigitem('storage', 'new-repo-backend',
+    default='revlogv1',
+)
 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
     default=True,
     alias=[('format', 'aggressivemergedeltas')],
@@ -952,6 +989,10 @@
 coreconfigitem('server', 'bundle1gd.push',
     default=None,
 )
+coreconfigitem('server', 'bundle2.stream',
+    default=True,
+    alias=[('experimental', 'bundle2.stream')]
+)
 coreconfigitem('server', 'compressionengines',
     default=list,
 )
@@ -1330,6 +1371,9 @@
 coreconfigitem('web', 'server-header',
     default=None,
 )
+coreconfigitem('web', 'static',
+    default=None,
+)
 coreconfigitem('web', 'staticurl',
     default=None,
 )
--- a/mercurial/context.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/context.py	Mon Oct 22 14:46:06 2018 -0400
@@ -15,7 +15,6 @@
 from .i18n import _
 from .node import (
     addednodeid,
-    bin,
     hex,
     modifiednodeid,
     nullid,
@@ -36,7 +35,6 @@
     phases,
     pycompat,
     repoview,
-    revlog,
     scmutil,
     sparse,
     subrepo,
@@ -193,25 +191,26 @@
         return self.rev() in obsmod.getrevs(self._repo, 'extinct')
 
     def orphan(self):
-        """True if the changeset is not obsolete but it's ancestor are"""
+        """True if the changeset is not obsolete, but its ancestor is"""
         return self.rev() in obsmod.getrevs(self._repo, 'orphan')
 
     def phasedivergent(self):
-        """True if the changeset try to be a successor of a public changeset
+        """True if the changeset tries to be a successor of a public changeset
 
-        Only non-public and non-obsolete changesets may be bumped.
+        Only non-public and non-obsolete changesets may be phase-divergent.
         """
         return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
 
     def contentdivergent(self):
-        """Is a successors of a changeset with multiple possible successors set
+        """Is a successor of a changeset with multiple possible successor sets
 
-        Only non-public and non-obsolete changesets may be divergent.
+        Only non-public and non-obsolete changesets may be content-divergent.
         """
         return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
 
     def isunstable(self):
-        """True if the changeset is either unstable, bumped or divergent"""
+        """True if the changeset is either orphan, phase-divergent or
+        content-divergent"""
         return self.orphan() or self.phasedivergent() or self.contentdivergent()
 
     def instabilities(self):
@@ -242,7 +241,7 @@
         parents = self._parents
         if len(parents) == 2:
             return parents[1]
-        return changectx(self._repo, nullrev)
+        return self._repo[nullrev]
 
     def _fileinfo(self, path):
         if r'_manifest' in self.__dict__:
@@ -344,7 +343,7 @@
             reversed = True
             ctx1, ctx2 = ctx2, ctx1
 
-        match = match or matchmod.always(self._repo.root, self._repo.getcwd())
+        match = self._repo.narrowmatch(match)
         match = ctx2._matchstatus(ctx1, match)
         r = scmutil.status([], [], [], [], [], [], [])
         r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
@@ -381,73 +380,10 @@
     """A changecontext object makes access to data related to a particular
     changeset convenient. It represents a read-only context already present in
     the repo."""
-    def __init__(self, repo, changeid='.'):
-        """changeid is a revision number, node, or tag"""
+    def __init__(self, repo, rev, node):
         super(changectx, self).__init__(repo)
-
-        try:
-            if isinstance(changeid, int):
-                self._node = repo.changelog.node(changeid)
-                self._rev = changeid
-                return
-            elif changeid == 'null':
-                self._node = nullid
-                self._rev = nullrev
-                return
-            elif changeid == 'tip':
-                self._node = repo.changelog.tip()
-                self._rev = repo.changelog.rev(self._node)
-                return
-            elif (changeid == '.'
-                  or repo.local() and changeid == repo.dirstate.p1()):
-                # this is a hack to delay/avoid loading obsmarkers
-                # when we know that '.' won't be hidden
-                self._node = repo.dirstate.p1()
-                self._rev = repo.unfiltered().changelog.rev(self._node)
-                return
-            elif len(changeid) == 20:
-                try:
-                    self._node = changeid
-                    self._rev = repo.changelog.rev(changeid)
-                    return
-                except error.FilteredLookupError:
-                    changeid = hex(changeid) # for the error message
-                    raise
-                except LookupError:
-                    # check if it might have come from damaged dirstate
-                    #
-                    # XXX we could avoid the unfiltered if we had a recognizable
-                    # exception for filtered changeset access
-                    if (repo.local()
-                        and changeid in repo.unfiltered().dirstate.parents()):
-                        msg = _("working directory has unknown parent '%s'!")
-                        raise error.Abort(msg % short(changeid))
-                    changeid = hex(changeid) # for the error message
-
-            elif len(changeid) == 40:
-                try:
-                    self._node = bin(changeid)
-                    self._rev = repo.changelog.rev(self._node)
-                    return
-                except error.FilteredLookupError:
-                    raise
-                except (TypeError, LookupError):
-                    pass
-            else:
-                raise error.ProgrammingError(
-                        "unsupported changeid '%s' of type %s" %
-                        (changeid, type(changeid)))
-
-            # lookup failed
-        except (error.FilteredIndexError, error.FilteredLookupError):
-            raise error.FilteredRepoLookupError(_("filtered revision '%s'")
-                                                % pycompat.bytestr(changeid))
-        except error.FilteredRepoLookupError:
-            raise
-        except IndexError:
-            pass
-        raise error.RepoLookupError(
-            _("unknown revision '%s'") % changeid)
+        self._rev = rev
+        self._node = node
 
     def __hash__(self):
         try:
@@ -481,8 +417,8 @@
         repo = self._repo
         p1, p2 = repo.changelog.parentrevs(self._rev)
         if p2 == nullrev:
-            return [changectx(repo, p1)]
-        return [changectx(repo, p1), changectx(repo, p2)]
+            return [repo[p1]]
+        return [repo[p1], repo[p2]]
 
     def changeset(self):
         c = self._changeset
@@ -533,11 +469,11 @@
         recursively walk children.
         """
         c = self._repo.changelog.children(self._node)
-        return [changectx(self._repo, x) for x in c]
+        return [self._repo[x] for x in c]
 
     def ancestors(self):
         for a in self._repo.changelog.ancestors([self._rev]):
-            yield changectx(self._repo, a)
+            yield self._repo[a]
 
     def descendants(self):
         """Recursively yield all children of the changeset.
@@ -545,7 +481,7 @@
         For just the immediate children, use children()
         """
         for d in self._repo.changelog.descendants([self._rev]):
-            yield changectx(self._repo, d)
+            yield self._repo[d]
 
     def filectx(self, path, fileid=None, filelog=None):
         """get a file context from this changeset"""
@@ -588,13 +524,7 @@
                     ''.join(_("      alternatively, use --config "
                               "merge.preferancestor=%s\n") %
                             short(n) for n in sorted(cahs) if n != anc))
-        return changectx(self._repo, anc)
-
-    def descendant(self, other):
-        msg = (b'ctx.descendant(other) is deprecated, '
-               b'use ctx.isancestorof(other)')
-        self._repo.ui.deprecwarn(msg, b'4.7')
-        return self.isancestorof(other)
+        return self._repo[anc]
 
     def isancestorof(self, other):
         """True if this changeset is an ancestor of other"""
@@ -612,7 +542,7 @@
                 return
             match.bad(fn, _('no such file in rev %s') % self)
 
-        m = matchmod.badmatch(match, bad)
+        m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
         return self._manifest.walk(m)
 
     def matches(self, match):
@@ -625,7 +555,6 @@
     workingfilectx: a filecontext that represents files from the working
                     directory,
     memfilectx: a filecontext that represents files in-memory,
-    overlayfilectx: duplicate another filecontext with some fields overridden.
     """
     @propertycache
     def _filelog(self):
@@ -800,6 +729,8 @@
         mfl = repo.manifestlog
         # fetch the linkrev
         lkr = self.linkrev()
+        if srcrev == lkr:
+            return lkr
         # hack to reuse ancestor computation when searching for renames
         memberanc = getattr(self, '_ancestrycontext', None)
         iteranc = None
@@ -840,12 +771,12 @@
         'linkrev-shadowing' when a file revision is used by multiple
         changesets.
         """
-        lkr = self.linkrev()
         attrs = vars(self)
-        noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
-        if noctx or self.rev() == lkr:
+        hastoprev = (r'_changeid' in attrs or r'_changectx' in attrs)
+        if hastoprev:
+            return self._adjustlinkrev(self.rev(), inclusive=True)
+        else:
             return self.linkrev()
-        return self._adjustlinkrev(self.rev(), inclusive=True)
 
     def introfilectx(self):
         """Return filectx having identical contents, but pointing to the
@@ -974,7 +905,7 @@
        filerevision convenient."""
     def __init__(self, repo, path, changeid=None, fileid=None,
                  filelog=None, changectx=None):
-        """changeid can be a changeset revision, node, or tag.
+        """changeid must be a revision number, if specified.
            fileid can be a file revision or node."""
         self._repo = repo
         self._path = path
@@ -998,7 +929,7 @@
     @propertycache
     def _changectx(self):
         try:
-            return changectx(self._repo, self._changeid)
+            return self._repo[self._changeid]
         except error.FilteredRepoLookupError:
             # Linkrev may point to any revision in the repository.  When the
             # repository is filtered this may lead to `filectx` trying to build
@@ -1016,7 +947,7 @@
             # Linkrevs have several serious troubles with filtering that are
             # complicated to solve. Proper handling of the issue here should be
             # considered when solving linkrev issue are on the table.
-            return changectx(self._repo.unfiltered(), self._changeid)
+            return self._repo.unfiltered()[self._changeid]
 
     def filectx(self, fileid, changeid=None):
         '''opens an arbitrary revision of the file without
@@ -1054,7 +985,7 @@
 
         renamed = self._filelog.renamed(self._filenode)
         if not renamed:
-            return renamed
+            return None
 
         if self.rev() == self.linkrev():
             return renamed
@@ -1237,11 +1168,12 @@
 
     def walk(self, match):
         '''Generates matching file names.'''
-        return sorted(self._repo.dirstate.walk(match,
+        return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
                                                subrepos=sorted(self.substate),
                                                unknown=True, ignored=False))
 
     def matches(self, match):
+        match = self._repo.narrowmatch(match)
         ds = self._repo.dirstate
         return sorted(f for f in ds.matches(match) if ds[f] != 'r')
 
@@ -1250,7 +1182,7 @@
             yield p
         for a in self._repo.changelog.ancestors(
             [p.rev() for p in self._parents]):
-            yield changectx(self._repo, a)
+            yield self._repo[a]
 
     def markcommitted(self, node):
         """Perform post-commit cleanup necessary after committing this ctx
@@ -1307,7 +1239,9 @@
         p = self._repo.dirstate.parents()
         if p[1] == nullid:
             p = p[:-1]
-        return [changectx(self._repo, x) for x in p]
+        # use unfiltered repo to delay/avoid loading obsmarkers
+        unfi = self._repo.unfiltered()
+        return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
 
     def _fileinfo(self, path):
         # populate __dict__['_manifest'] as workingctx has no _manifestdelta
@@ -1903,23 +1837,28 @@
         # Test that each new directory to be created to write this path from p2
         # is not a file in p1.
         components = path.split('/')
-        for i in xrange(len(components)):
+        for i in pycompat.xrange(len(components)):
             component = "/".join(components[0:i])
-            if component in self.p1():
+            if component in self.p1() and self._cache[component]['exists']:
                 fail(path, component)
 
         # Test the other direction -- that this path from p2 isn't a directory
         # in p1 (test that p1 doesn't any paths matching `path/*`).
         match = matchmod.match('/', '', [path + '/'], default=b'relpath')
         matches = self.p1().manifest().matches(match)
-        if len(matches) > 0:
-            if len(matches) == 1 and matches.keys()[0] == path:
+        mfiles = matches.keys()
+        if len(mfiles) > 0:
+            if len(mfiles) == 1 and mfiles[0] == path:
+                return
+            # omit the files which are deleted in current IMM wctx
+            mfiles = [m for m in mfiles if self._cache[m]['exists']]
+            if not mfiles:
                 return
             raise error.Abort("error: file '%s' cannot be written because "
                               " '%s/' is a folder in %s (containing %d "
                               "entries: %s)"
-                              % (path, path, self.p1(), len(matches),
-                                 ', '.join(matches.keys())))
+                              % (path, path, self.p1(), len(mfiles),
+                                 ', '.join(mfiles)))
 
     def write(self, path, data, flags='', **kwargs):
         if data is None:
@@ -1929,8 +1868,13 @@
                         flags=flags)
 
     def setflags(self, path, l, x):
+        flag = ''
+        if l:
+            flag = 'l'
+        elif x:
+            flag = 'x'
         self._markdirty(path, exists=True, date=dateutil.makedate(),
-                        flags=(l and 'l' or '') + (x and 'x' or ''))
+                        flags=flag)
 
     def remove(self, path):
         self._markdirty(path, exists=False)
@@ -2037,6 +1981,13 @@
         return keys
 
     def _markdirty(self, path, exists, data=None, date=None, flags=''):
+        # data not provided, let's see if we already have some; if not, let's
+        # grab it from our underlying context, so that we always have data if
+        # the file is marked as existing.
+        if exists and data is None:
+            oldentry = self._cache.get(path) or {}
+            data = oldentry.get('data') or self._wrappedctx[path].data()
+
         self._cache[path] = {
             'exists': exists,
             'data': data,
@@ -2117,8 +2068,8 @@
     """
     def __init__(self, repo, changes,
                  text="", user=None, date=None, extra=None):
-        super(workingctx, self).__init__(repo, text, user, date, extra,
-                                         changes)
+        super(workingcommitctx, self).__init__(repo, text, user, date, extra,
+                                               changes)
 
     def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
         """Return matched files only in ``self._status``
@@ -2273,17 +2224,10 @@
         man = pctx.manifest().copy()
 
         for f in self._status.modified:
-            p1node = nullid
-            p2node = nullid
-            p = pctx[f].parents() # if file isn't in pctx, check p2?
-            if len(p) > 0:
-                p1node = p[0].filenode()
-                if len(p) > 1:
-                    p2node = p[1].filenode()
-            man[f] = revlog.hash(self[f].data(), p1node, p2node)
+            man[f] = modifiednodeid
 
         for f in self._status.added:
-            man[f] = revlog.hash(self[f].data(), nullid, nullid)
+            man[f] = addednodeid
 
         for f in self._status.removed:
             if f in man:
@@ -2355,76 +2299,6 @@
         """wraps repo.wwrite"""
         self._data = data
 
-class overlayfilectx(committablefilectx):
-    """Like memfilectx but take an original filectx and optional parameters to
-    override parts of it. This is useful when fctx.data() is expensive (i.e.
-    flag processor is expensive) and raw data, flags, and filenode could be
-    reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
-    """
-
-    def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
-                 copied=None, ctx=None):
-        """originalfctx: filecontext to duplicate
-
-        datafunc: None or a function to override data (file content). It is a
-        function to be lazy. path, flags, copied, ctx: None or overridden value
-
-        copied could be (path, rev), or False. copied could also be just path,
-        and will be converted to (path, nullid). This simplifies some callers.
-        """
-
-        if path is None:
-            path = originalfctx.path()
-        if ctx is None:
-            ctx = originalfctx.changectx()
-            ctxmatch = lambda: True
-        else:
-            ctxmatch = lambda: ctx == originalfctx.changectx()
-
-        repo = originalfctx.repo()
-        flog = originalfctx.filelog()
-        super(overlayfilectx, self).__init__(repo, path, flog, ctx)
-
-        if copied is None:
-            copied = originalfctx.renamed()
-            copiedmatch = lambda: True
-        else:
-            if copied and not isinstance(copied, tuple):
-                # repo._filecommit will recalculate copyrev so nullid is okay
-                copied = (copied, nullid)
-            copiedmatch = lambda: copied == originalfctx.renamed()
-
-        # When data, copied (could affect data), ctx (could affect filelog
-        # parents) are not overridden, rawdata, rawflags, and filenode may be
-        # reused (repo._filecommit should double check filelog parents).
-        #
-        # path, flags are not hashed in filelog (but in manifestlog) so they do
-        # not affect reusable here.
-        #
-        # If ctx or copied is overridden to a same value with originalfctx,
-        # still consider it's reusable. originalfctx.renamed() may be a bit
-        # expensive so it's not called unless necessary. Assuming datafunc is
-        # always expensive, do not call it for this "reusable" test.
-        reusable = datafunc is None and ctxmatch() and copiedmatch()
-
-        if datafunc is None:
-            datafunc = originalfctx.data
-        if flags is None:
-            flags = originalfctx.flags()
-
-        self._datafunc = datafunc
-        self._flags = flags
-        self._copied = copied
-
-        if reusable:
-            # copy extra fields from originalfctx
-            attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
-            for attr_ in attrs:
-                if util.safehasattr(originalfctx, attr_):
-                    setattr(self, attr_, getattr(originalfctx, attr_))
-
-    def data(self):
-        return self._datafunc()
 
 class metadataonlyctx(committablectx):
     """Like memctx but it's reusing the manifest of different commit.
@@ -2463,11 +2337,11 @@
         # manifests of our commit parents
         mp1, mp2 = self.manifestctx().parents
         if p1 != nullid and p1.manifestnode() != mp1:
-            raise RuntimeError('can\'t reuse the manifest: '
-                               'its p1 doesn\'t match the new ctx p1')
+            raise RuntimeError(r"can't reuse the manifest: its p1 "
+                               r"doesn't match the new ctx p1")
         if p2 != nullid and p2.manifestnode() != mp2:
-            raise RuntimeError('can\'t reuse the manifest: '
-                               'its p2 doesn\'t match the new ctx p2')
+            raise RuntimeError(r"can't reuse the manifest: "
+                               r"its p2 doesn't match the new ctx p2")
 
         self._files = originalctx.files()
         self.substate = {}
@@ -2559,5 +2433,5 @@
 
     def write(self, data, flags, **kwargs):
         assert not flags
-        with open(self._path, "w") as f:
+        with open(self._path, "wb") as f:
             f.write(data)
--- a/mercurial/copies.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/copies.py	Mon Oct 22 14:46:06 2018 -0400
@@ -20,6 +20,9 @@
     scmutil,
     util,
 )
+from .utils import (
+    stringutil,
+)
 
 def _findlimit(repo, a, b):
     """
@@ -160,9 +163,17 @@
     """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
     # files might have to be traced back to the fctx parent of the last
     # one-side-only changeset, but not further back than that
-    limit = _findlimit(a._repo, a.rev(), b.rev())
+    repo = a._repo
+    debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
+    dbg = repo.ui.debug
+    if debug:
+        dbg('debug.copies:    looking into rename from %s to %s\n'
+            % (a, b))
+    limit = _findlimit(repo, a.rev(), b.rev())
     if limit is None:
         limit = -1
+    if debug:
+        dbg('debug.copies:      search limit: %d\n' % limit)
     am = a.manifest()
 
     # find where new files came from
@@ -183,12 +194,26 @@
     missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
 
     ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
+
+    if debug:
+        dbg('debug.copies:      missing file to search: %d\n' % len(missing))
+
     for f in missing:
+        if debug:
+            dbg('debug.copies:        tracing file: %s\n' % f)
         fctx = b[f]
         fctx._ancestrycontext = ancestrycontext
+
+        if debug:
+            start = util.timer()
         ofctx = _tracefile(fctx, am, limit)
         if ofctx:
+            if debug:
+                dbg('debug.copies:          rename of: %s\n' % ofctx._path)
             cm[f] = ofctx.path()
+        if debug:
+            dbg('debug.copies:          time: %f seconds\n'
+                % (util.timer() - start))
     return cm
 
 def _forwardcopies(a, b, match=None):
@@ -223,13 +248,24 @@
 
 def pathcopies(x, y, match=None):
     """find {dst@y: src@x} copy mapping for directed compare"""
+    repo = x._repo
+    debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
+    if debug:
+        repo.ui.debug('debug.copies: searching copies from %s to %s\n'
+                      % (x, y))
     if x == y or not x or not y:
         return {}
     a = y.ancestor(x)
     if a == x:
+        if debug:
+            repo.ui.debug('debug.copies: search mode: forward\n')
         return _forwardcopies(x, y, match=match)
     if a == y:
+        if debug:
+            repo.ui.debug('debug.copies: search mode: backward\n')
         return _backwardrenames(x, y)
+    if debug:
+        repo.ui.debug('debug.copies: search mode: combined\n')
     return _chain(x, y, _backwardrenames(x, a),
                   _forwardcopies(a, y, match=match))
 
@@ -255,10 +291,6 @@
     if u2:
         repo.ui.debug("%s:\n   %s\n" % (header % 'other', "\n   ".join(u2)))
 
-    narrowmatch = repo.narrowmatch()
-    if not narrowmatch.always():
-        u1 = [f for f in u1 if narrowmatch(f)]
-        u2 = [f for f in u2 if narrowmatch(f)]
     return u1, u2
 
 def _makegetfctx(ctx):
@@ -366,19 +398,22 @@
         return repo.dirstate.copies(), {}, {}, {}, {}
 
     copytracing = repo.ui.config('experimental', 'copytrace')
+    boolctrace = stringutil.parsebool(copytracing)
 
     # Copy trace disabling is explicitly below the node == p1 logic above
     # because the logic above is required for a simple copy to be kept across a
     # rebase.
-    if copytracing == 'off':
-        return {}, {}, {}, {}, {}
-    elif copytracing == 'heuristics':
+    if copytracing == 'heuristics':
         # Do full copytracing if only non-public revisions are involved as
         # that will be fast enough and will also cover the copies which could
         # be missed by heuristics
         if _isfullcopytraceable(repo, c1, base):
             return _fullcopytracing(repo, c1, c2, base)
         return _heuristicscopytracing(repo, c1, c2, base)
+    elif boolctrace is False:
+        # stringutil.parsebool() returns None when it is unable to parse the
+        # value, so we should rely on making sure copytracing is on such cases
+        return {}, {}, {}, {}, {}
     else:
         return _fullcopytracing(repo, c1, c2, base)
 
@@ -461,8 +496,8 @@
             }
 
     # find interesting file sets from manifests
-    addedinm1 = m1.filesnotin(mb)
-    addedinm2 = m2.filesnotin(mb)
+    addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
+    addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
     bothnew = sorted(addedinm1 & addedinm2)
     if tca == base:
         # unmatched file from base
@@ -593,16 +628,16 @@
             continue
         elif dsrc in d1 and ddst in d1:
             # directory wasn't entirely moved locally
-            invalid.add(dsrc + "/")
+            invalid.add(dsrc)
         elif dsrc in d2 and ddst in d2:
             # directory wasn't entirely moved remotely
-            invalid.add(dsrc + "/")
-        elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
+            invalid.add(dsrc)
+        elif dsrc in dirmove and dirmove[dsrc] != ddst:
             # files from the same directory moved to two different places
-            invalid.add(dsrc + "/")
+            invalid.add(dsrc)
         else:
             # looks good so far
-            dirmove[dsrc + "/"] = ddst + "/"
+            dirmove[dsrc] = ddst
 
     for i in invalid:
         if i in dirmove:
@@ -612,6 +647,8 @@
     if not dirmove:
         return copy, {}, diverge, renamedelete, {}
 
+    dirmove = {k + "/": v + "/" for k, v in dirmove.iteritems()}
+
     for d in dirmove:
         repo.ui.debug("   discovered dir src: '%s' -> dst: '%s'\n" %
                       (d, dirmove[d]))
@@ -808,11 +845,10 @@
     of = None
     seen = {f}
     for oc in getsrcfctx(f, msrc[f]).ancestors():
-        ocr = oc.linkrev()
         of = oc.path()
         if of in seen:
             # check limit late - grab last rename before
-            if ocr < limit:
+            if oc.linkrev() < limit:
                 break
             continue
         seen.add(of)
@@ -868,8 +904,10 @@
     copies between fromrev and rev.
     """
     exclude = {}
+    ctraceconfig = repo.ui.config('experimental', 'copytrace')
+    bctrace = stringutil.parsebool(ctraceconfig)
     if (skiprev is not None and
-        repo.ui.config('experimental', 'copytrace') != 'off'):
+        (ctraceconfig == 'heuristics' or bctrace or bctrace is None)):
         # copytrace='off' skips this line, but not the entire function because
         # the line below is O(size of the repo) during a rebase, while the rest
         # of the function is much faster (and is required for carrying copy
--- a/mercurial/crecord.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/crecord.py	Mon Oct 22 14:46:06 2018 -0400
@@ -713,6 +713,24 @@
         self.currentselecteditem = nextitem
         self.recenterdisplayedarea()
 
+    def nextsametype(self):
+        currentitem = self.currentselecteditem
+        sametype = lambda item: isinstance(item, type(currentitem))
+        nextitem = currentitem.nextitem()
+
+        while nextitem is not None and not sametype(nextitem):
+            nextitem = nextitem.nextitem()
+
+        if nextitem is None:
+            nextitem = currentitem
+        else:
+            parent = nextitem.parentitem()
+            if parent.folded:
+                self.togglefolded(parent)
+
+        self.currentselecteditem = nextitem
+        self.recenterdisplayedarea()
+
     def rightarrowevent(self):
         """
         select (if possible) the first of this item's child-items.
@@ -1027,8 +1045,8 @@
     def _getstatuslinesegments(self):
         """-> [str]. return segments"""
         selected = self.currentselecteditem.applied
-        spaceselect = _('space: select')
-        spacedeselect = _('space: deselect')
+        spaceselect = _('space/enter: select')
+        spacedeselect = _('space/enter: deselect')
         # Format the selected label into a place as long as the longer of the
         # two possible labels.  This may vary by language.
         spacelen = max(len(spaceselect), len(spacedeselect))
@@ -1433,6 +1451,7 @@
 the following are valid keystrokes:
 
                 [space] : (un-)select item ([~]/[x] = partly/fully applied)
+                [enter] : (un-)select item and go to next item of same type
                       A : (un-)select all items
     up/down-arrow [k/j] : go to previous/next unfolded item
         pgup/pgdn [K/J] : go to previous/next item of same type
@@ -1686,8 +1705,9 @@
             return True
         elif keypressed in [' '] or (test and keypressed in ["TOGGLE"]):
             self.toggleapply()
-            if self.ui.configbool('experimental', 'spacemovesdown'):
-                self.downarrowevent()
+        elif keypressed in ['\n', 'KEY_ENTER']:
+            self.toggleapply()
+            self.nextsametype()
         elif keypressed in ['A']:
             self.toggleall()
         elif keypressed in ['e']:
--- a/mercurial/dagop.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/dagop.py	Mon Oct 22 14:46:06 2018 -0400
@@ -9,6 +9,9 @@
 
 import heapq
 
+from .node import (
+    nullrev,
+)
 from .thirdparty import (
     attr,
 )
@@ -195,7 +198,7 @@
     """Build map of 'rev -> child revs', offset from startrev"""
     cl = repo.changelog
     nullrev = node.nullrev
-    descmap = [[] for _rev in xrange(startrev, len(cl))]
+    descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))]
     for currev in cl.revs(startrev + 1):
         p1rev, p2rev = cl.parentrevs(currev)
         if p1rev >= startrev:
@@ -225,6 +228,37 @@
                                         startdepth, stopdepth)
     return generatorset(gen, iterasc=True)
 
+def descendantrevs(revs, revsfn, parentrevsfn):
+    """Generate revision number descendants in revision order.
+
+    Yields revision numbers starting with a child of some rev in
+    ``revs``. Results are ordered by revision number and are
+    therefore topological. Each revision is not considered a descendant
+    of itself.
+
+    ``revsfn`` is a callable that with no argument iterates over all
+    revision numbers and with a ``start`` argument iterates over revision
+    numbers beginning with that value.
+
+    ``parentrevsfn`` is a callable that receives a revision number and
+    returns an iterable of parent revision numbers, whose values may include
+    nullrev.
+    """
+    first = min(revs)
+
+    if first == nullrev:
+        for rev in revsfn():
+            yield rev
+        return
+
+    seen = set(revs)
+    for rev in revsfn(start=first + 1):
+        for prev in parentrevsfn(rev):
+            if prev != nullrev and prev in seen:
+                seen.add(rev)
+                yield rev
+                break
+
 def _reachablerootspure(repo, minroot, roots, heads, includepath):
     """return (heads(::<roots> and ::<heads>))
 
@@ -435,7 +469,7 @@
         for idx, (parent, blocks) in enumerate(pblocks):
             for (a1, a2, b1, b2), _t in blocks:
                 if a2 - a1 >= b2 - b1:
-                    for bk in xrange(b1, b2):
+                    for bk in pycompat.xrange(b1, b2):
                         if child.fctxs[bk] == childfctx:
                             ak = min(a1 + (bk - b1), a2 - 1)
                             child.fctxs[bk] = parent.fctxs[ak]
@@ -448,7 +482,7 @@
         # line.
         for parent, blocks in remaining:
             for a1, a2, b1, b2 in blocks:
-                for bk in xrange(b1, b2):
+                for bk in pycompat.xrange(b1, b2):
                     if child.fctxs[bk] == childfctx:
                         ak = min(a1 + (bk - b1), a2 - 1)
                         child.fctxs[bk] = parent.fctxs[ak]
@@ -715,3 +749,99 @@
     for g in groups:
         for r in g[0]:
             yield r
+
+def headrevs(revs, parentsfn):
+    """Resolve the set of heads from a set of revisions.
+
+    Receives an iterable of revision numbers and a callbable that receives a
+    revision number and returns an iterable of parent revision numbers, possibly
+    including nullrev.
+
+    Returns a set of revision numbers that are DAG heads within the passed
+    subset.
+
+    ``nullrev`` is never included in the returned set, even if it is provided in
+    the input set.
+    """
+    headrevs = set(revs)
+
+    for rev in revs:
+        for prev in parentsfn(rev):
+            headrevs.discard(prev)
+
+    headrevs.discard(node.nullrev)
+
+    return headrevs
+
+def headrevssubset(revsfn, parentrevsfn, startrev=None, stoprevs=None):
+    """Returns the set of all revs that have no children with control.
+
+    ``revsfn`` is a callable that with no arguments returns an iterator over
+    all revision numbers in topological order. With a ``start`` argument, it
+    returns revision numbers starting at that number.
+
+    ``parentrevsfn`` is a callable receiving a revision number and returns an
+    iterable of parent revision numbers, where values can include nullrev.
+
+    ``startrev`` is a revision number at which to start the search.
+
+    ``stoprevs`` is an iterable of revision numbers that, when encountered,
+    will stop DAG traversal beyond them. Parents of revisions in this
+    collection will be heads.
+    """
+    if startrev is None:
+        startrev = nullrev
+
+    stoprevs = set(stoprevs or [])
+
+    reachable = {startrev}
+    heads = {startrev}
+
+    for rev in revsfn(start=startrev + 1):
+        for prev in parentrevsfn(rev):
+            if prev in reachable:
+                if rev not in stoprevs:
+                    reachable.add(rev)
+                heads.add(rev)
+
+            if prev in heads and prev not in stoprevs:
+                heads.remove(prev)
+
+    return heads
+
+def linearize(revs, parentsfn):
+    """Linearize and topologically sort a list of revisions.
+
+    The linearization process tries to create long runs of revs where a child
+    rev comes immediately after its first parent. This is done by visiting the
+    heads of the revs in inverse topological order, and for each visited rev,
+    visiting its second parent, then its first parent, then adding the rev
+    itself to the output list.
+
+    Returns a list of revision numbers.
+    """
+    visit = list(sorted(headrevs(revs, parentsfn), reverse=True))
+    finished = set()
+    result = []
+
+    while visit:
+        rev = visit.pop()
+        if rev < 0:
+            rev = -rev - 1
+
+            if rev not in finished:
+                result.append(rev)
+                finished.add(rev)
+
+        else:
+            visit.append(-rev - 1)
+
+            for prev in parentsfn(rev):
+                if prev == node.nullrev or prev not in revs or prev in finished:
+                    continue
+
+                visit.append(prev)
+
+    assert len(result) == len(revs)
+
+    return result
--- a/mercurial/dagparser.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/dagparser.py	Mon Oct 22 14:46:06 2018 -0400
@@ -222,7 +222,7 @@
         elif c == '+':
             c, digs = nextrun(nextch(), pycompat.bytestr(string.digits))
             n = int(digs)
-            for i in xrange(0, n):
+            for i in pycompat.xrange(0, n):
                 yield 'n', (r, [p1])
                 p1 = r
                 r += 1
--- a/mercurial/dagutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,287 +0,0 @@
-# dagutil.py - dag utilities for mercurial
-#
-# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
-# and Peter Arrenbrecht <peter@arrenbrecht.ch>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-from .i18n import _
-from .node import nullrev
-
-class basedag(object):
-    '''generic interface for DAGs
-
-    terms:
-    "ix" (short for index) identifies a nodes internally,
-    "id" identifies one externally.
-
-    All params are ixs unless explicitly suffixed otherwise.
-    Pluralized params are lists or sets.
-    '''
-
-    def __init__(self):
-        self._inverse = None
-
-    def nodeset(self):
-        '''set of all node ixs'''
-        raise NotImplementedError
-
-    def heads(self):
-        '''list of head ixs'''
-        raise NotImplementedError
-
-    def parents(self, ix):
-        '''list of parents ixs of ix'''
-        raise NotImplementedError
-
-    def inverse(self):
-        '''inverse DAG, where parents becomes children, etc.'''
-        raise NotImplementedError
-
-    def ancestorset(self, starts, stops=None):
-        '''
-        set of all ancestors of starts (incl), but stop walk at stops (excl)
-        '''
-        raise NotImplementedError
-
-    def descendantset(self, starts, stops=None):
-        '''
-        set of all descendants of starts (incl), but stop walk at stops (excl)
-        '''
-        return self.inverse().ancestorset(starts, stops)
-
-    def headsetofconnecteds(self, ixs):
-        '''
-        subset of connected list of ixs so that no node has a descendant in it
-
-        By "connected list" we mean that if an ancestor and a descendant are in
-        the list, then so is at least one path connecting them.
-        '''
-        raise NotImplementedError
-
-    def externalize(self, ix):
-        '''return a node id'''
-        return self._externalize(ix)
-
-    def externalizeall(self, ixs):
-        '''return a list of (or set if given a set) of node ids'''
-        ids = self._externalizeall(ixs)
-        if isinstance(ixs, set):
-            return set(ids)
-        return list(ids)
-
-    def internalize(self, id):
-        '''return a node ix'''
-        return self._internalize(id)
-
-    def internalizeall(self, ids, filterunknown=False):
-        '''return a list of (or set if given a set) of node ixs'''
-        ixs = self._internalizeall(ids, filterunknown)
-        if isinstance(ids, set):
-            return set(ixs)
-        return list(ixs)
-
-
-class genericdag(basedag):
-    '''generic implementations for DAGs'''
-
-    def ancestorset(self, starts, stops=None):
-        if stops:
-            stops = set(stops)
-        else:
-            stops = set()
-        seen = set()
-        pending = list(starts)
-        while pending:
-            n = pending.pop()
-            if n not in seen and n not in stops:
-                seen.add(n)
-                pending.extend(self.parents(n))
-        return seen
-
-    def headsetofconnecteds(self, ixs):
-        hds = set(ixs)
-        if not hds:
-            return hds
-        for n in ixs:
-            for p in self.parents(n):
-                hds.discard(p)
-        assert hds
-        return hds
-
-
-class revlogbaseddag(basedag):
-    '''generic dag interface to a revlog'''
-
-    def __init__(self, revlog, nodeset):
-        basedag.__init__(self)
-        self._revlog = revlog
-        self._heads = None
-        self._nodeset = nodeset
-
-    def nodeset(self):
-        return self._nodeset
-
-    def heads(self):
-        if self._heads is None:
-            self._heads = self._getheads()
-        return self._heads
-
-    def _externalize(self, ix):
-        return self._revlog.index[ix][7]
-    def _externalizeall(self, ixs):
-        idx = self._revlog.index
-        return [idx[i][7] for i in ixs]
-
-    def _internalize(self, id):
-        ix = self._revlog.rev(id)
-        if ix == nullrev:
-            raise LookupError(id, self._revlog.indexfile, _('nullid'))
-        return ix
-    def _internalizeall(self, ids, filterunknown):
-        rl = self._revlog
-        if filterunknown:
-            return [r for r in map(rl.nodemap.get, ids)
-                    if (r is not None
-                        and r != nullrev
-                        and r not in rl.filteredrevs)]
-        return [self._internalize(i) for i in ids]
-
-
-class revlogdag(revlogbaseddag):
-    '''dag interface to a revlog'''
-
-    def __init__(self, revlog, localsubset=None):
-        revlogbaseddag.__init__(self, revlog, set(revlog))
-        self._heads = localsubset
-
-    def _getheads(self):
-        return [r for r in self._revlog.headrevs() if r != nullrev]
-
-    def parents(self, ix):
-        rlog = self._revlog
-        idx = rlog.index
-        revdata = idx[ix]
-        prev = revdata[5]
-        if prev != nullrev:
-            prev2 = revdata[6]
-            if prev2 == nullrev:
-                return [prev]
-            return [prev, prev2]
-        prev2 = revdata[6]
-        if prev2 != nullrev:
-            return [prev2]
-        return []
-
-    def inverse(self):
-        if self._inverse is None:
-            self._inverse = inverserevlogdag(self)
-        return self._inverse
-
-    def ancestorset(self, starts, stops=None):
-        rlog = self._revlog
-        idx = rlog.index
-        if stops:
-            stops = set(stops)
-        else:
-            stops = set()
-        seen = set()
-        pending = list(starts)
-        while pending:
-            rev = pending.pop()
-            if rev not in seen and rev not in stops:
-                seen.add(rev)
-                revdata = idx[rev]
-                for i in [5, 6]:
-                    prev = revdata[i]
-                    if prev != nullrev:
-                        pending.append(prev)
-        return seen
-
-    def headsetofconnecteds(self, ixs):
-        if not ixs:
-            return set()
-        rlog = self._revlog
-        idx = rlog.index
-        headrevs = set(ixs)
-        for rev in ixs:
-            revdata = idx[rev]
-            for i in [5, 6]:
-                prev = revdata[i]
-                if prev != nullrev:
-                    headrevs.discard(prev)
-        assert headrevs
-        return headrevs
-
-    def linearize(self, ixs):
-        '''linearize and topologically sort a list of revisions
-
-        The linearization process tries to create long runs of revs where
-        a child rev comes immediately after its first parent. This is done by
-        visiting the heads of the given revs in inverse topological order,
-        and for each visited rev, visiting its second parent, then its first
-        parent, then adding the rev itself to the output list.
-        '''
-        sorted = []
-        visit = list(self.headsetofconnecteds(ixs))
-        visit.sort(reverse=True)
-        finished = set()
-
-        while visit:
-            cur = visit.pop()
-            if cur < 0:
-                cur = -cur - 1
-                if cur not in finished:
-                    sorted.append(cur)
-                    finished.add(cur)
-            else:
-                visit.append(-cur - 1)
-                visit += [p for p in self.parents(cur)
-                          if p in ixs and p not in finished]
-        assert len(sorted) == len(ixs)
-        return sorted
-
-
-class inverserevlogdag(revlogbaseddag, genericdag):
-    '''inverse of an existing revlog dag; see revlogdag.inverse()'''
-
-    def __init__(self, orig):
-        revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
-        self._orig = orig
-        self._children = {}
-        self._roots = []
-        self._walkfrom = len(self._revlog) - 1
-
-    def _walkto(self, walkto):
-        rev = self._walkfrom
-        cs = self._children
-        roots = self._roots
-        idx = self._revlog.index
-        while rev >= walkto:
-            data = idx[rev]
-            isroot = True
-            for prev in [data[5], data[6]]: # parent revs
-                if prev != nullrev:
-                    cs.setdefault(prev, []).append(rev)
-                    isroot = False
-            if isroot:
-                roots.append(rev)
-            rev -= 1
-        self._walkfrom = rev
-
-    def _getheads(self):
-        self._walkto(nullrev)
-        return self._roots
-
-    def parents(self, ix):
-        if ix is None:
-            return []
-        if ix <= self._walkfrom:
-            self._walkto(ix)
-        return self._children.get(ix, [])
-
-    def inverse(self):
-        return self._orig
--- a/mercurial/debugcommands.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/debugcommands.py	Mon Oct 22 14:46:06 2018 -0400
@@ -32,9 +32,6 @@
     nullrev,
     short,
 )
-from .thirdparty import (
-    cbor,
-)
 from . import (
     bundle2,
     changegroup,
@@ -42,13 +39,12 @@
     color,
     context,
     dagparser,
-    dagutil,
     encoding,
     error,
     exchange,
     extensions,
     filemerge,
-    fileset,
+    filesetlang,
     formatter,
     hg,
     httppeer,
@@ -84,11 +80,16 @@
     wireprotov2peer,
 )
 from .utils import (
+    cborutil,
     dateutil,
     procutil,
     stringutil,
 )
 
+from .revlogutils import (
+    deltas as deltautil
+)
+
 release = lockmod.release
 
 command = registrar.command()
@@ -98,7 +99,7 @@
     """find the ancestor revision of two revisions in a given index"""
     if len(args) == 3:
         index, rev1, rev2 = args
-        r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
+        r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
         lookup = r.lookup
     elif len(args) == 2:
         if not repo:
@@ -177,7 +178,8 @@
     if mergeable_file:
         linesperrev = 2
         # make a file with k lines per rev
-        initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)]
+        initialmergedlines = ['%d' % i
+                              for i in pycompat.xrange(0, total * linesperrev)]
         initialmergedlines.append("")
 
     tags = []
@@ -501,7 +503,7 @@
     spaces = opts.get(r'spaces')
     dots = opts.get(r'dots')
     if file_:
-        rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
+        rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
                              file_)
         revs = set((int(r) for r in revs))
         def events():
@@ -556,7 +558,7 @@
         file_, rev = None, file_
     elif rev is None:
         raise error.CommandError('debugdata', _('invalid arguments'))
-    r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
+    r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
     try:
         ui.write(r.revision(r.lookup(rev), raw=True))
     except KeyError:
@@ -706,7 +708,7 @@
             largestblock = 0
             srchunks = 0
 
-            for revschunk in revlog._slicechunk(r, chain):
+            for revschunk in deltautil.slicechunk(r, chain):
                 srchunks += 1
                 blkend = start(revschunk[-1]) + length(revschunk[-1])
                 blksize = blkend - start(revschunk[0])
@@ -731,13 +733,16 @@
     fm.end()
 
 @command('debugdirstate|debugstate',
-    [('', 'nodates', None, _('do not display the saved mtime')),
-    ('', 'datesort', None, _('sort by saved mtime'))],
+    [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
+     ('', 'dates', True, _('display the saved mtime')),
+     ('', 'datesort', None, _('sort by saved mtime'))],
     _('[OPTION]...'))
 def debugstate(ui, repo, **opts):
     """show the contents of the current dirstate"""
 
-    nodates = opts.get(r'nodates')
+    nodates = not opts[r'dates']
+    if opts.get(r'nodates') is not None:
+        nodates = True
     datesort = opts.get(r'datesort')
 
     timestr = ""
@@ -790,9 +795,10 @@
             if not opts.get('nonheads'):
                 ui.write(("unpruned common: %s\n") %
                          " ".join(sorted(short(n) for n in common)))
-                dag = dagutil.revlogdag(repo.changelog)
-                all = dag.ancestorset(dag.internalizeall(common))
-                common = dag.externalizeall(dag.headsetofconnecteds(all))
+
+                clnode = repo.changelog.node
+                common = repo.revs('heads(::%ln)', common)
+                common = {clnode(r) for r in common}
         else:
             nodes = None
             if pushedrevs:
@@ -887,15 +893,45 @@
 @command('debugfileset',
     [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
      ('', 'all-files', False,
-      _('test files from all revisions and working directory'))],
-    _('[-r REV] [--all-files] FILESPEC'))
+      _('test files from all revisions and working directory')),
+     ('s', 'show-matcher', None,
+      _('print internal representation of matcher')),
+     ('p', 'show-stage', [],
+      _('print parsed tree at the given stage'), _('NAME'))],
+    _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
 def debugfileset(ui, repo, expr, **opts):
     '''parse and apply a fileset specification'''
+    from . import fileset
+    fileset.symbols # force import of fileset so we have predicates to optimize
     opts = pycompat.byteskwargs(opts)
     ctx = scmutil.revsingle(repo, opts.get('rev'), None)
-    if ui.verbose:
-        tree = fileset.parse(expr)
-        ui.note(fileset.prettyformat(tree), "\n")
+
+    stages = [
+        ('parsed', pycompat.identity),
+        ('analyzed', filesetlang.analyze),
+        ('optimized', filesetlang.optimize),
+    ]
+    stagenames = set(n for n, f in stages)
+
+    showalways = set()
+    if ui.verbose and not opts['show_stage']:
+        # show parsed tree by --verbose (deprecated)
+        showalways.add('parsed')
+    if opts['show_stage'] == ['all']:
+        showalways.update(stagenames)
+    else:
+        for n in opts['show_stage']:
+            if n not in stagenames:
+                raise error.Abort(_('invalid stage name: %s') % n)
+        showalways.update(opts['show_stage'])
+
+    tree = filesetlang.parse(expr)
+    for n, f in stages:
+        tree = f(tree)
+        if n in showalways:
+            if opts['show_stage'] or n != 'parsed':
+                ui.write(("* %s:\n") % n)
+            ui.write(filesetlang.prettyformat(tree), "\n")
 
     files = set()
     if opts['all_files']:
@@ -914,14 +950,15 @@
         files.update(ctx.substate)
 
     m = ctx.matchfileset(expr)
+    if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
+        ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
     for f in sorted(files):
         if not m(f):
             continue
         ui.write("%s\n" % f)
 
 @command('debugformat',
-         [] + cmdutil.formatteropts,
-        _(''))
+         [] + cmdutil.formatteropts)
 def debugformat(ui, repo, **opts):
     """display format information about the current repository
 
@@ -1076,77 +1113,48 @@
             else:
                 ui.write(_("%s is not ignored\n") % m.uipath(f))
 
-@command('debugindex', cmdutil.debugrevlogopts +
-    [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
-    _('[-f FORMAT] -c|-m|FILE'),
-    optionalrepo=True)
+@command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
+         _('-c|-m|FILE'))
 def debugindex(ui, repo, file_=None, **opts):
-    """dump the contents of an index file"""
+    """dump index data for a storage primitive"""
     opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
-    format = opts.get('format', 0)
-    if format not in (0, 1):
-        raise error.Abort(_("unknown format %d") % format)
+    store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
 
     if ui.debugflag:
         shortfn = hex
     else:
         shortfn = short
 
-    # There might not be anything in r, so have a sane default
     idlen = 12
-    for i in r:
-        idlen = len(shortfn(r.node(i)))
+    for i in store:
+        idlen = len(shortfn(store.node(i)))
         break
 
-    if format == 0:
-        if ui.verbose:
-            ui.write(("   rev    offset  length linkrev"
-                     " %s %s p2\n") % ("nodeid".ljust(idlen),
-                                       "p1".ljust(idlen)))
-        else:
-            ui.write(("   rev linkrev %s %s p2\n") % (
-                "nodeid".ljust(idlen), "p1".ljust(idlen)))
-    elif format == 1:
-        if ui.verbose:
-            ui.write(("   rev flag   offset   length     size   link     p1"
-                      "     p2 %s\n") % "nodeid".rjust(idlen))
-        else:
-            ui.write(("   rev flag     size   link     p1     p2 %s\n") %
-                     "nodeid".rjust(idlen))
-
-    for i in r:
-        node = r.node(i)
-        if format == 0:
-            try:
-                pp = r.parents(node)
-            except Exception:
-                pp = [nullid, nullid]
-            if ui.verbose:
-                ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
-                        i, r.start(i), r.length(i), r.linkrev(i),
-                        shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
-            else:
-                ui.write("% 6d % 7d %s %s %s\n" % (
-                    i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
-                    shortfn(pp[1])))
-        elif format == 1:
-            pr = r.parentrevs(i)
-            if ui.verbose:
-                ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
-                        i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
-                        r.linkrev(i), pr[0], pr[1], shortfn(node)))
-            else:
-                ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
-                    i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
-                    shortfn(node)))
+    fm = ui.formatter('debugindex', opts)
+    fm.plain(b'   rev linkrev %s %s p2\n' % (
+        b'nodeid'.ljust(idlen),
+        b'p1'.ljust(idlen)))
+
+    for rev in store:
+        node = store.node(rev)
+        parents = store.parents(node)
+
+        fm.startitem()
+        fm.write(b'rev', b'%6d ', rev)
+        fm.write(b'linkrev', '%7d ', store.linkrev(rev))
+        fm.write(b'node', '%s ', shortfn(node))
+        fm.write(b'p1', '%s ', shortfn(parents[0]))
+        fm.write(b'p2', '%s', shortfn(parents[1]))
+        fm.plain(b'\n')
+
+    fm.end()
 
 @command('debugindexdot', cmdutil.debugrevlogopts,
     _('-c|-m|FILE'), optionalrepo=True)
 def debugindexdot(ui, repo, file_=None, **opts):
     """dump an index DAG as a graphviz dot file"""
     opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
+    r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
     ui.write(("digraph G {\n"))
     for i in r:
         node = r.node(i)
@@ -1156,6 +1164,16 @@
             ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
     ui.write("}\n")
 
+@command('debugindexstats', [])
+def debugindexstats(ui, repo):
+    """show stats related to the changelog index"""
+    repo.changelog.shortest(nullid, 1)
+    index = repo.changelog.index
+    if not util.safehasattr(index, 'stats'):
+        raise error.Abort(_('debugindexstats only works with native code'))
+    for k, v in sorted(index.stats().items()):
+        ui.write('%s: %s\n' % (k, v))
+
 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
 def debuginstall(ui, **opts):
     '''test Mercurial installation
@@ -1428,10 +1446,10 @@
                 if ":" in locker:
                     host, pid = locker.split(':')
                     if host == socket.gethostname():
-                        locker = 'user %s, process %s' % (user, pid)
+                        locker = 'user %s, process %s' % (user or b'None', pid)
                     else:
                         locker = 'user %s, process %s, host %s' \
-                                 % (user, pid, host)
+                                 % (user or b'None', pid, host)
                 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
                 return 1
             except OSError as e:
@@ -1446,6 +1464,53 @@
 
     return held
 
+@command('debugmanifestfulltextcache', [
+        ('', 'clear', False, _('clear the cache')),
+        ('a', 'add', '', _('add the given manifest node to the cache'),
+         _('NODE'))
+    ], '')
+def debugmanifestfulltextcache(ui, repo, add=None, **opts):
+    """show, clear or amend the contents of the manifest fulltext cache"""
+    with repo.lock():
+        r = repo.manifestlog.getstorage(b'')
+        try:
+            cache = r._fulltextcache
+        except AttributeError:
+            ui.warn(_(
+                "Current revlog implementation doesn't appear to have a "
+                'manifest fulltext cache\n'))
+            return
+
+        if opts.get(r'clear'):
+            cache.clear()
+
+        if add:
+            try:
+                manifest = repo.manifestlog[r.lookup(add)]
+            except error.LookupError as e:
+                raise error.Abort(e, hint="Check your manifest node id")
+            manifest.read()  # stores revisision in cache too
+
+        if not len(cache):
+            ui.write(_('Cache empty'))
+        else:
+            ui.write(
+                _('Cache contains %d manifest entries, in order of most to '
+                  'least recent:\n') % (len(cache),))
+            totalsize = 0
+            for nodeid in cache:
+                # Use cache.get to not update the LRU order
+                data = cache.get(nodeid)
+                size = len(data)
+                totalsize += size + 24   # 20 bytes nodeid, 4 bytes size
+                ui.write(_('id: %s, size %s\n') % (
+                    hex(nodeid), util.bytecount(size)))
+            ondisk = cache._opener.stat('manifestfulltextcache').st_size
+            ui.write(
+                _('Total cache data size %s, on-disk %s\n') % (
+                    util.bytecount(totalsize), util.bytecount(ondisk))
+            )
+
 @command('debugmergestate', [], '')
 def debugmergestate(ui, repo, *args):
     """print merge state
@@ -1699,7 +1764,7 @@
 
     def complete(path, acceptable):
         dirstate = repo.dirstate
-        spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
+        spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
         rootdir = repo.root + pycompat.ossep
         if spec != repo.root and not spec.startswith(rootdir):
             return [], []
@@ -1971,7 +2036,7 @@
         ts = 0
         heads = set()
 
-        for rev in xrange(numrevs):
+        for rev in pycompat.xrange(numrevs):
             dbase = r.deltaparent(rev)
             if dbase == -1:
                 dbase = rev
@@ -2006,20 +2071,43 @@
     if not flags:
         flags = ['(none)']
 
+    ### tracks merge vs single parent
     nummerges = 0
+
+    ### tracks ways the "delta" are build
+    # nodelta
+    numempty = 0
+    numemptytext = 0
+    numemptydelta = 0
+    # full file content
     numfull = 0
+    # intermediate snapshot against a prior snapshot
+    numsemi = 0
+    # snapshot count per depth
+    numsnapdepth = collections.defaultdict(lambda: 0)
+    # delta against previous revision
     numprev = 0
+    # delta against first or second parent (not prev)
     nump1 = 0
     nump2 = 0
+    # delta against neither prev nor parents
     numother = 0
+    # delta against prev that are also first or second parent
+    # (details of `numprev`)
     nump1prev = 0
     nump2prev = 0
+
+    # data about delta chain of each revs
     chainlengths = []
     chainbases = []
     chainspans = []
 
+    # data about each revision
     datasize = [None, 0, 0]
     fullsize = [None, 0, 0]
+    semisize = [None, 0, 0]
+    # snapshot count per depth
+    snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
     deltasize = [None, 0, 0]
     chunktypecounts = {}
     chunktypesizes = {}
@@ -2032,7 +2120,7 @@
         l[2] += size
 
     numrevs = len(r)
-    for rev in xrange(numrevs):
+    for rev in pycompat.xrange(numrevs):
         p1, p2 = r.parentrevs(rev)
         delta = r.deltaparent(rev)
         if format > 0:
@@ -2044,30 +2132,49 @@
             chainlengths.append(0)
             chainbases.append(r.start(rev))
             chainspans.append(size)
-            numfull += 1
-            addsize(size, fullsize)
+            if size == 0:
+                numempty += 1
+                numemptytext += 1
+            else:
+                numfull += 1
+                numsnapdepth[0] += 1
+                addsize(size, fullsize)
+                addsize(size, snapsizedepth[0])
         else:
             chainlengths.append(chainlengths[delta] + 1)
             baseaddr = chainbases[delta]
             revaddr = r.start(rev)
             chainbases.append(baseaddr)
             chainspans.append((revaddr - baseaddr) + size)
-            addsize(size, deltasize)
-            if delta == rev - 1:
-                numprev += 1
-                if delta == p1:
-                    nump1prev += 1
+            if size == 0:
+                numempty += 1
+                numemptydelta += 1
+            elif r.issnapshot(rev):
+                addsize(size, semisize)
+                numsemi += 1
+                depth = r.snapshotdepth(rev)
+                numsnapdepth[depth] += 1
+                addsize(size, snapsizedepth[depth])
+            else:
+                addsize(size, deltasize)
+                if delta == rev - 1:
+                    numprev += 1
+                    if delta == p1:
+                        nump1prev += 1
+                    elif delta == p2:
+                        nump2prev += 1
+                elif delta == p1:
+                    nump1 += 1
                 elif delta == p2:
-                    nump2prev += 1
-            elif delta == p1:
-                nump1 += 1
-            elif delta == p2:
-                nump2 += 1
-            elif delta != nullrev:
-                numother += 1
+                    nump2 += 1
+                elif delta != nullrev:
+                    numother += 1
 
         # Obtain data on the raw chunks in the revlog.
-        segment = r._getsegmentforrevs(rev, rev)[1]
+        if util.safehasattr(r, '_getsegmentforrevs'):
+            segment = r._getsegmentforrevs(rev, rev)[1]
+        else:
+            segment = r._revlog._getsegmentforrevs(rev, rev)[1]
         if segment:
             chunktype = bytes(segment[0:1])
         else:
@@ -2081,20 +2188,28 @@
         chunktypesizes[chunktype] += size
 
     # Adjust size min value for empty cases
-    for size in (datasize, fullsize, deltasize):
+    for size in (datasize, fullsize, semisize, deltasize):
         if size[0] is None:
             size[0] = 0
 
-    numdeltas = numrevs - numfull
+    numdeltas = numrevs - numfull - numempty - numsemi
     numoprev = numprev - nump1prev - nump2prev
     totalrawsize = datasize[2]
     datasize[2] /= numrevs
     fulltotal = fullsize[2]
     fullsize[2] /= numfull
+    semitotal = semisize[2]
+    snaptotal = {}
+    if numsemi > 0:
+        semisize[2] /= numsemi
+    for depth in snapsizedepth:
+        snaptotal[depth] = snapsizedepth[depth][2]
+        snapsizedepth[depth][2] /= numsnapdepth[depth]
+
     deltatotal = deltasize[2]
-    if numrevs - numfull > 0:
-        deltasize[2] /= numrevs - numfull
-    totalsize = fulltotal + deltatotal
+    if numdeltas > 0:
+        deltasize[2] /= numdeltas
+    totalsize = fulltotal + semitotal + deltatotal
     avgchainlen = sum(chainlengths) / numrevs
     maxchainlen = max(chainlengths)
     maxchainspan = max(chainspans)
@@ -2126,10 +2241,22 @@
     ui.write(('    merges    : ') + fmt % pcfmt(nummerges, numrevs))
     ui.write(('    normal    : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
     ui.write(('revisions     : ') + fmt2 % numrevs)
-    ui.write(('    full      : ') + fmt % pcfmt(numfull, numrevs))
+    ui.write(('    empty     : ') + fmt % pcfmt(numempty, numrevs))
+    ui.write(('                   text  : ')
+             + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
+    ui.write(('                   delta : ')
+             + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
+    ui.write(('    snapshot  : ') + fmt % pcfmt(numfull + numsemi, numrevs))
+    for depth in sorted(numsnapdepth):
+        ui.write(('      lvl-%-3d :       ' % depth)
+                 + fmt % pcfmt(numsnapdepth[depth], numrevs))
     ui.write(('    deltas    : ') + fmt % pcfmt(numdeltas, numrevs))
     ui.write(('revision size : ') + fmt2 % totalsize)
-    ui.write(('    full      : ') + fmt % pcfmt(fulltotal, totalsize))
+    ui.write(('    snapshot  : ')
+             + fmt % pcfmt(fulltotal + semitotal, totalsize))
+    for depth in sorted(numsnapdepth):
+        ui.write(('      lvl-%-3d :       ' % depth)
+                 + fmt % pcfmt(snaptotal[depth], totalsize))
     ui.write(('    deltas    : ') + fmt % pcfmt(deltatotal, totalsize))
 
     def fmtchunktype(chunktype):
@@ -2163,6 +2290,13 @@
                  % tuple(datasize))
     ui.write(('full revision size (min/max/avg)     : %d / %d / %d\n')
              % tuple(fullsize))
+    ui.write(('inter-snapshot size (min/max/avg)    : %d / %d / %d\n')
+             % tuple(semisize))
+    for depth in sorted(snapsizedepth):
+        if depth == 0:
+            continue
+        ui.write(('    level-%-3d (min/max/avg)          : %d / %d / %d\n')
+                 % ((depth,) + tuple(snapsizedepth[depth])))
     ui.write(('delta size (min/max/avg)             : %d / %d / %d\n')
              % tuple(deltasize))
 
@@ -2186,6 +2320,71 @@
             ui.write(('deltas against other : ') + fmt % pcfmt(numother,
                                                              numdeltas))
 
+@command('debugrevlogindex', cmdutil.debugrevlogopts +
+    [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
+    _('[-f FORMAT] -c|-m|FILE'),
+    optionalrepo=True)
+def debugrevlogindex(ui, repo, file_=None, **opts):
+    """dump the contents of a revlog index"""
+    opts = pycompat.byteskwargs(opts)
+    r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
+    format = opts.get('format', 0)
+    if format not in (0, 1):
+        raise error.Abort(_("unknown format %d") % format)
+
+    if ui.debugflag:
+        shortfn = hex
+    else:
+        shortfn = short
+
+    # There might not be anything in r, so have a sane default
+    idlen = 12
+    for i in r:
+        idlen = len(shortfn(r.node(i)))
+        break
+
+    if format == 0:
+        if ui.verbose:
+            ui.write(("   rev    offset  length linkrev"
+                     " %s %s p2\n") % ("nodeid".ljust(idlen),
+                                       "p1".ljust(idlen)))
+        else:
+            ui.write(("   rev linkrev %s %s p2\n") % (
+                "nodeid".ljust(idlen), "p1".ljust(idlen)))
+    elif format == 1:
+        if ui.verbose:
+            ui.write(("   rev flag   offset   length     size   link     p1"
+                      "     p2 %s\n") % "nodeid".rjust(idlen))
+        else:
+            ui.write(("   rev flag     size   link     p1     p2 %s\n") %
+                     "nodeid".rjust(idlen))
+
+    for i in r:
+        node = r.node(i)
+        if format == 0:
+            try:
+                pp = r.parents(node)
+            except Exception:
+                pp = [nullid, nullid]
+            if ui.verbose:
+                ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
+                        i, r.start(i), r.length(i), r.linkrev(i),
+                        shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
+            else:
+                ui.write("% 6d % 7d %s %s %s\n" % (
+                    i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
+                    shortfn(pp[1])))
+        elif format == 1:
+            pr = r.parentrevs(i)
+            if ui.verbose:
+                ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
+                        i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
+                        r.linkrev(i), pr[0], pr[1], shortfn(node)))
+            else:
+                ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
+                    i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
+                    shortfn(node)))
+
 @command('debugrevspec',
     [('', 'optimize', None,
       _('print parsed tree after optimizing (DEPRECATED)')),
@@ -2633,6 +2832,7 @@
 def _parsewirelangblocks(fh):
     activeaction = None
     blocklines = []
+    lastindent = 0
 
     for line in fh:
         line = line.rstrip()
@@ -2642,13 +2842,14 @@
         if line.startswith(b'#'):
             continue
 
-        if not line.startswith(' '):
+        if not line.startswith(b' '):
             # New block. Flush previous one.
             if activeaction:
                 yield activeaction, blocklines
 
             activeaction = line
             blocklines = []
+            lastindent = 0
             continue
 
         # Else we start with an indent.
@@ -2656,7 +2857,14 @@
         if not activeaction:
             raise error.Abort(_('indented line outside of block'))
 
-        blocklines.append(line)
+        indent = len(line) - len(line.lstrip())
+
+        # If this line is indented more than the last line, concatenate it.
+        if indent > lastindent and blocklines:
+            blocklines[-1] += line.lstrip()
+        else:
+            blocklines.append(line)
+            lastindent = indent
 
     # Flush last block.
     if activeaction:
@@ -2885,7 +3093,8 @@
             '-R', repo.root,
             'debugserve', '--sshstdio',
         ]
-        proc = subprocess.Popen(args, stdin=subprocess.PIPE,
+        proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
+                                stdin=subprocess.PIPE,
                                 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                                 bufsize=0)
 
@@ -3054,13 +3263,12 @@
                     res = e.callcommand(command, args).result()
 
                 if isinstance(res, wireprotov2peer.commandresponse):
-                    val = list(res.cborobjects())
+                    val = res.objects()
                     ui.status(_('response: %s\n') %
-                              stringutil.pprint(val, bprefix=True))
-
+                              stringutil.pprint(val, bprefix=True, indent=2))
                 else:
                     ui.status(_('response: %s\n') %
-                              stringutil.pprint(res, bprefix=True))
+                              stringutil.pprint(res, bprefix=True, indent=2))
 
         elif action == 'batchbegin':
             if batchedcommands is not None:
@@ -3097,7 +3305,10 @@
                 line = line.lstrip()
                 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
                 if m:
-                    headers[m.group(1)] = m.group(2)
+                    # Headers need to use native strings.
+                    key = pycompat.strurl(m.group(1))
+                    value = pycompat.strurl(m.group(2))
+                    headers[key] = value
                     continue
 
                 if line.startswith(b'BODYFILE '):
@@ -3122,18 +3333,22 @@
             # urllib.Request insists on using has_data() as a proxy for
             # determining the request method. Override that to use our
             # explicitly requested method.
-            req.get_method = lambda: method
+            req.get_method = lambda: pycompat.sysstr(method)
 
             try:
                 res = opener.open(req)
                 body = res.read()
             except util.urlerr.urlerror as e:
-                e.read()
+                # read() method must be called, but only exists in Python 2
+                getattr(e, 'read', lambda: None)()
                 continue
 
-            if res.headers.get('Content-Type') == 'application/mercurial-cbor':
+            ct = res.headers.get(r'Content-Type')
+            if ct == r'application/mercurial-cbor':
                 ui.write(_('cbor> %s\n') %
-                         stringutil.pprint(cbor.loads(body), bprefix=True))
+                         stringutil.pprint(cborutil.decodeall(body),
+                                           bprefix=True,
+                                           indent=2))
 
         elif action == 'close':
             peer.close()
--- a/mercurial/default.d/mergetools.rc	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/default.d/mergetools.rc	Mon Oct 22 14:46:06 2018 -0400
@@ -26,7 +26,7 @@
 gpyfm.gui=True
 
 meld.gui=True
-meld.args=--label=$labellocal $local --label='merged' $base --label=$labelother $other -o $output
+meld.args=--label=$labellocal $local --label='merged' $base --label=$labelother $other -o $output --auto-merge
 meld.check=changed
 meld.diffargs=-a --label=$plabel1 $parent --label=$clabel $child
 
--- a/mercurial/diffhelper.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/diffhelper.py	Mon Oct 22 14:46:06 2018 -0400
@@ -11,6 +11,7 @@
 
 from . import (
     error,
+    pycompat,
 )
 
 def addlines(fp, hunk, lena, lenb, a, b):
@@ -26,7 +27,7 @@
         num = max(todoa, todob)
         if num == 0:
             break
-        for i in xrange(num):
+        for i in pycompat.xrange(num):
             s = fp.readline()
             if not s:
                 raise error.ParseError(_('incomplete hunk'))
@@ -71,7 +72,7 @@
     blen = len(b)
     if alen > blen - bstart or bstart < 0:
         return False
-    for i in xrange(alen):
+    for i in pycompat.xrange(alen):
         if a[i][1:] != b[i + bstart]:
             return False
     return True
--- a/mercurial/dirstate.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/dirstate.py	Mon Oct 22 14:46:06 2018 -0400
@@ -210,7 +210,7 @@
         forcecwd = self._ui.config('ui', 'forcecwd')
         if forcecwd:
             return forcecwd
-        return pycompat.getcwd()
+        return encoding.getcwd()
 
     def getcwd(self):
         '''Return the path from which a canonical path is calculated.
@@ -893,8 +893,11 @@
             wadd = work.append
             while work:
                 nd = work.pop()
-                if not match.visitdir(nd):
+                visitentries = match.visitchildrenset(nd)
+                if not visitentries:
                     continue
+                if visitentries == 'this' or visitentries == 'all':
+                    visitentries = None
                 skip = None
                 if nd == '.':
                     nd = ''
@@ -909,6 +912,16 @@
                         continue
                     raise
                 for f, kind, st in entries:
+                    # Some matchers may return files in the visitentries set,
+                    # instead of 'this', if the matcher explicitly mentions them
+                    # and is not an exactmatcher. This is acceptable; we do not
+                    # make any hard assumptions about file-or-directory below
+                    # based on the presence of `f` in visitentries. If
+                    # visitchildrenset returned a set, we can always skip the
+                    # entries *not* in the set it provided regardless of whether
+                    # they're actually a file or a directory.
+                    if visitentries and f not in visitentries:
+                        continue
                     if normalizefile:
                         # even though f might be a directory, we're only
                         # interested in comparing it to files currently in the
--- a/mercurial/dirstateguard.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/dirstateguard.py	Mon Oct 22 14:46:06 2018 -0400
@@ -11,6 +11,7 @@
 
 from . import (
     error,
+    narrowspec,
     util,
 )
 
@@ -33,7 +34,10 @@
         self._active = False
         self._closed = False
         self._backupname = 'dirstate.backup.%s.%d' % (name, id(self))
+        self._narrowspecbackupname = ('narrowspec.backup.%s.%d' %
+                                      (name, id(self)))
         repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
+        narrowspec.savebackup(repo, self._narrowspecbackupname)
         self._active = True
 
     def __del__(self):
@@ -52,10 +56,12 @@
 
         self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
                                          self._backupname)
+        narrowspec.clearbackup(self._repo, self._narrowspecbackupname)
         self._active = False
         self._closed = True
 
     def _abort(self):
+        narrowspec.restorebackup(self._repo, self._narrowspecbackupname)
         self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
                                            self._backupname)
         self._active = False
--- a/mercurial/dispatch.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/dispatch.py	Mon Oct 22 14:46:06 2018 -0400
@@ -21,6 +21,8 @@
 
 from .i18n import _
 
+from hgdemandimport import tracing
+
 from . import (
     cmdutil,
     color,
@@ -64,6 +66,9 @@
         # low-level repo state (for example, changelog) before extensions.
         self.prereposetups = prereposetups or []
 
+        # store the parsed and canonical command
+        self.canonical_command = None
+
     def _runexithandlers(self):
         exc = None
         handlers = self.ui._exithandlers
@@ -84,7 +89,8 @@
 def run():
     "run the command in sys.argv"
     initstdio()
-    req = request(pycompat.sysargv[1:])
+    with tracing.log('parse args into request'):
+        req = request(pycompat.sysargv[1:])
     err = None
     try:
         status = dispatch(req)
@@ -176,182 +182,185 @@
 
 def dispatch(req):
     """run the command specified in req.args; returns an integer status code"""
-    if req.ferr:
-        ferr = req.ferr
-    elif req.ui:
-        ferr = req.ui.ferr
-    else:
-        ferr = procutil.stderr
+    with tracing.log('dispatch.dispatch'):
+        if req.ferr:
+            ferr = req.ferr
+        elif req.ui:
+            ferr = req.ui.ferr
+        else:
+            ferr = procutil.stderr
 
-    try:
-        if not req.ui:
-            req.ui = uimod.ui.load()
-        req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
-        if req.earlyoptions['traceback']:
-            req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
+        try:
+            if not req.ui:
+                req.ui = uimod.ui.load()
+            req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
+            if req.earlyoptions['traceback']:
+                req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
 
-        # set ui streams from the request
-        if req.fin:
-            req.ui.fin = req.fin
-        if req.fout:
-            req.ui.fout = req.fout
-        if req.ferr:
-            req.ui.ferr = req.ferr
-    except error.Abort as inst:
-        ferr.write(_("abort: %s\n") % inst)
-        if inst.hint:
-            ferr.write(_("(%s)\n") % inst.hint)
-        return -1
-    except error.ParseError as inst:
-        _formatparse(ferr.write, inst)
-        return -1
+            # set ui streams from the request
+            if req.fin:
+                req.ui.fin = req.fin
+            if req.fout:
+                req.ui.fout = req.fout
+            if req.ferr:
+                req.ui.ferr = req.ferr
+        except error.Abort as inst:
+            ferr.write(_("abort: %s\n") % inst)
+            if inst.hint:
+                ferr.write(_("(%s)\n") % inst.hint)
+            return -1
+        except error.ParseError as inst:
+            _formatparse(ferr.write, inst)
+            return -1
 
-    msg = _formatargs(req.args)
-    starttime = util.timer()
-    ret = 1  # default of Python exit code on unhandled exception
-    try:
-        ret = _runcatch(req) or 0
-    except error.ProgrammingError as inst:
-        req.ui.warn(_('** ProgrammingError: %s\n') % inst)
-        if inst.hint:
-            req.ui.warn(_('** (%s)\n') % inst.hint)
-        raise
-    except KeyboardInterrupt as inst:
+        msg = _formatargs(req.args)
+        starttime = util.timer()
+        ret = 1  # default of Python exit code on unhandled exception
         try:
-            if isinstance(inst, error.SignalInterrupt):
-                msg = _("killed!\n")
-            else:
-                msg = _("interrupted!\n")
-            req.ui.warn(msg)
-        except error.SignalInterrupt:
-            # maybe pager would quit without consuming all the output, and
-            # SIGPIPE was raised. we cannot print anything in this case.
-            pass
-        except IOError as inst:
-            if inst.errno != errno.EPIPE:
-                raise
-        ret = -1
-    finally:
-        duration = util.timer() - starttime
-        req.ui.flush()
-        if req.ui.logblockedtimes:
-            req.ui._blockedtimes['command_duration'] = duration * 1000
-            req.ui.log('uiblocked', 'ui blocked ms',
-                       **pycompat.strkwargs(req.ui._blockedtimes))
-        req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n",
-                   msg, ret & 255, duration)
-        try:
-            req._runexithandlers()
-        except: # exiting, so no re-raises
-            ret = ret or -1
-    return ret
+            ret = _runcatch(req) or 0
+        except error.ProgrammingError as inst:
+            req.ui.error(_('** ProgrammingError: %s\n') % inst)
+            if inst.hint:
+                req.ui.error(_('** (%s)\n') % inst.hint)
+            raise
+        except KeyboardInterrupt as inst:
+            try:
+                if isinstance(inst, error.SignalInterrupt):
+                    msg = _("killed!\n")
+                else:
+                    msg = _("interrupted!\n")
+                req.ui.error(msg)
+            except error.SignalInterrupt:
+                # maybe pager would quit without consuming all the output, and
+                # SIGPIPE was raised. we cannot print anything in this case.
+                pass
+            except IOError as inst:
+                if inst.errno != errno.EPIPE:
+                    raise
+            ret = -1
+        finally:
+            duration = util.timer() - starttime
+            req.ui.flush()
+            if req.ui.logblockedtimes:
+                req.ui._blockedtimes['command_duration'] = duration * 1000
+                req.ui.log('uiblocked', 'ui blocked ms',
+                           **pycompat.strkwargs(req.ui._blockedtimes))
+            req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n",
+                       msg, ret & 255, duration,
+                       canonical_command=req.canonical_command)
+            try:
+                req._runexithandlers()
+            except: # exiting, so no re-raises
+                ret = ret or -1
+        return ret
 
 def _runcatch(req):
-    def catchterm(*args):
-        raise error.SignalInterrupt
+    with tracing.log('dispatch._runcatch'):
+        def catchterm(*args):
+            raise error.SignalInterrupt
 
-    ui = req.ui
-    try:
-        for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
-            num = getattr(signal, name, None)
-            if num:
-                signal.signal(num, catchterm)
-    except ValueError:
-        pass # happens if called in a thread
-
-    def _runcatchfunc():
-        realcmd = None
+        ui = req.ui
         try:
-            cmdargs = fancyopts.fancyopts(req.args[:], commands.globalopts, {})
-            cmd = cmdargs[0]
-            aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
-            realcmd = aliases[0]
-        except (error.UnknownCommand, error.AmbiguousCommand,
-                IndexError, getopt.GetoptError):
-            # Don't handle this here. We know the command is
-            # invalid, but all we're worried about for now is that
-            # it's not a command that server operators expect to
-            # be safe to offer to users in a sandbox.
-            pass
-        if realcmd == 'serve' and '--stdio' in cmdargs:
-            # We want to constrain 'hg serve --stdio' instances pretty
-            # closely, as many shared-ssh access tools want to grant
-            # access to run *only* 'hg -R $repo serve --stdio'. We
-            # restrict to exactly that set of arguments, and prohibit
-            # any repo name that starts with '--' to prevent
-            # shenanigans wherein a user does something like pass
-            # --debugger or --config=ui.debugger=1 as a repo
-            # name. This used to actually run the debugger.
-            if (len(req.args) != 4 or
-                req.args[0] != '-R' or
-                req.args[1].startswith('--') or
-                req.args[2] != 'serve' or
-                req.args[3] != '--stdio'):
-                raise error.Abort(
-                    _('potentially unsafe serve --stdio invocation: %s') %
-                    (stringutil.pprint(req.args),))
+            for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
+                num = getattr(signal, name, None)
+                if num:
+                    signal.signal(num, catchterm)
+        except ValueError:
+            pass # happens if called in a thread
 
-        try:
-            debugger = 'pdb'
-            debugtrace = {
-                'pdb': pdb.set_trace
-            }
-            debugmortem = {
-                'pdb': pdb.post_mortem
-            }
+        def _runcatchfunc():
+            realcmd = None
+            try:
+                cmdargs = fancyopts.fancyopts(
+                    req.args[:], commands.globalopts, {})
+                cmd = cmdargs[0]
+                aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
+                realcmd = aliases[0]
+            except (error.UnknownCommand, error.AmbiguousCommand,
+                    IndexError, getopt.GetoptError):
+                # Don't handle this here. We know the command is
+                # invalid, but all we're worried about for now is that
+                # it's not a command that server operators expect to
+                # be safe to offer to users in a sandbox.
+                pass
+            if realcmd == 'serve' and '--stdio' in cmdargs:
+                # We want to constrain 'hg serve --stdio' instances pretty
+                # closely, as many shared-ssh access tools want to grant
+                # access to run *only* 'hg -R $repo serve --stdio'. We
+                # restrict to exactly that set of arguments, and prohibit
+                # any repo name that starts with '--' to prevent
+                # shenanigans wherein a user does something like pass
+                # --debugger or --config=ui.debugger=1 as a repo
+                # name. This used to actually run the debugger.
+                if (len(req.args) != 4 or
+                    req.args[0] != '-R' or
+                    req.args[1].startswith('--') or
+                    req.args[2] != 'serve' or
+                    req.args[3] != '--stdio'):
+                    raise error.Abort(
+                        _('potentially unsafe serve --stdio invocation: %s') %
+                        (stringutil.pprint(req.args),))
 
-            # read --config before doing anything else
-            # (e.g. to change trust settings for reading .hg/hgrc)
-            cfgs = _parseconfig(req.ui, req.earlyoptions['config'])
-
-            if req.repo:
-                # copy configs that were passed on the cmdline (--config) to
-                # the repo ui
-                for sec, name, val in cfgs:
-                    req.repo.ui.setconfig(sec, name, val, source='--config')
+            try:
+                debugger = 'pdb'
+                debugtrace = {
+                    'pdb': pdb.set_trace
+                }
+                debugmortem = {
+                    'pdb': pdb.post_mortem
+                }
 
-            # developer config: ui.debugger
-            debugger = ui.config("ui", "debugger")
-            debugmod = pdb
-            if not debugger or ui.plain():
-                # if we are in HGPLAIN mode, then disable custom debugging
-                debugger = 'pdb'
-            elif req.earlyoptions['debugger']:
-                # This import can be slow for fancy debuggers, so only
-                # do it when absolutely necessary, i.e. when actual
-                # debugging has been requested
-                with demandimport.deactivated():
-                    try:
-                        debugmod = __import__(debugger)
-                    except ImportError:
-                        pass # Leave debugmod = pdb
+                # read --config before doing anything else
+                # (e.g. to change trust settings for reading .hg/hgrc)
+                cfgs = _parseconfig(req.ui, req.earlyoptions['config'])
+
+                if req.repo:
+                    # copy configs that were passed on the cmdline (--config) to
+                    # the repo ui
+                    for sec, name, val in cfgs:
+                        req.repo.ui.setconfig(sec, name, val, source='--config')
 
-            debugtrace[debugger] = debugmod.set_trace
-            debugmortem[debugger] = debugmod.post_mortem
+                # developer config: ui.debugger
+                debugger = ui.config("ui", "debugger")
+                debugmod = pdb
+                if not debugger or ui.plain():
+                    # if we are in HGPLAIN mode, then disable custom debugging
+                    debugger = 'pdb'
+                elif req.earlyoptions['debugger']:
+                    # This import can be slow for fancy debuggers, so only
+                    # do it when absolutely necessary, i.e. when actual
+                    # debugging has been requested
+                    with demandimport.deactivated():
+                        try:
+                            debugmod = __import__(debugger)
+                        except ImportError:
+                            pass # Leave debugmod = pdb
 
-            # enter the debugger before command execution
-            if req.earlyoptions['debugger']:
-                ui.warn(_("entering debugger - "
-                        "type c to continue starting hg or h for help\n"))
+                debugtrace[debugger] = debugmod.set_trace
+                debugmortem[debugger] = debugmod.post_mortem
 
-                if (debugger != 'pdb' and
-                    debugtrace[debugger] == debugtrace['pdb']):
-                    ui.warn(_("%s debugger specified "
-                              "but its module was not found\n") % debugger)
-                with demandimport.deactivated():
-                    debugtrace[debugger]()
-            try:
-                return _dispatch(req)
-            finally:
-                ui.flush()
-        except: # re-raises
-            # enter the debugger when we hit an exception
-            if req.earlyoptions['debugger']:
-                traceback.print_exc()
-                debugmortem[debugger](sys.exc_info()[2])
-            raise
+                # enter the debugger before command execution
+                if req.earlyoptions['debugger']:
+                    ui.warn(_("entering debugger - "
+                            "type c to continue starting hg or h for help\n"))
 
-    return _callcatch(ui, _runcatchfunc)
+                    if (debugger != 'pdb' and
+                        debugtrace[debugger] == debugtrace['pdb']):
+                        ui.warn(_("%s debugger specified "
+                                  "but its module was not found\n") % debugger)
+                    with demandimport.deactivated():
+                        debugtrace[debugger]()
+                try:
+                    return _dispatch(req)
+                finally:
+                    ui.flush()
+            except: # re-raises
+                # enter the debugger when we hit an exception
+                if req.earlyoptions['debugger']:
+                    traceback.print_exc()
+                    debugmortem[debugger](sys.exc_info()[2])
+                raise
+        return _callcatch(ui, _runcatchfunc)
 
 def _callcatch(ui, func):
     """like scmutil.callcatch but handles more high-level exceptions about
@@ -370,9 +379,8 @@
             ui.warn(_("hg %s: %s\n") % (inst.args[0], msgbytes))
             commands.help_(ui, inst.args[0], full=False, command=True)
         else:
-            ui.pager('help')
             ui.warn(_("hg: %s\n") % inst.args[1])
-            commands.help_(ui, 'shortlist')
+            ui.warn(_("(use 'hg help -v' for a list of global options)\n"))
     except error.ParseError as inst:
         _formatparse(ui.warn, inst)
         return -1
@@ -394,9 +402,8 @@
                     _reportsimilar(ui.warn, sim)
                     suggested = True
             if not suggested:
-                ui.pager('help')
                 ui.warn(nocmdmsg)
-                commands.help_(ui, 'shortlist')
+                ui.warn(_("(use 'hg help' for a list of commands)\n"))
     except IOError:
         raise
     except KeyboardInterrupt:
@@ -745,7 +752,7 @@
     """
     if wd is None:
         try:
-            wd = pycompat.getcwd()
+            wd = encoding.getcwd()
         except OSError as e:
             raise error.Abort(_("error getting current working directory: %s") %
                               encoding.strtolocal(e.strerror))
@@ -850,6 +857,9 @@
         fullargs = args
         cmd, func, args, options, cmdoptions = _parse(lui, args)
 
+        # store the canonical command name in request object for later access
+        req.canonical_command = cmd
+
         if options["config"] != req.earlyoptions["config"]:
             raise error.Abort(_("option --config may not be abbreviated!"))
         if options["cwd"] != req.earlyoptions["cwd"]:
@@ -965,7 +975,7 @@
                         if not path:
                             raise error.RepoError(_("no repository found in"
                                                     " '%s' (.hg not found)")
-                                                  % pycompat.getcwd())
+                                                  % encoding.getcwd())
                         raise
             if repo:
                 ui = repo.ui
@@ -989,7 +999,8 @@
 def _runcommand(ui, options, cmd, cmdfunc):
     """Run a command function, possibly with profiling enabled."""
     try:
-        return cmdfunc()
+        with tracing.log("Running %s command" % cmd):
+            return cmdfunc()
     except error.SignatureError:
         raise error.CommandError(cmd, _('invalid arguments'))
 
--- a/mercurial/encoding.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/encoding.py	Mon Oct 22 14:46:06 2018 -0400
@@ -68,21 +68,21 @@
     environ = dict((k.encode(u'utf-8'), v.encode(u'utf-8'))
                    for k, v in os.environ.items())  # re-exports
 
-_encodingfixers = {
-    '646': lambda: 'ascii',
-    'ANSI_X3.4-1968': lambda: 'ascii',
+_encodingrewrites = {
+    '646': 'ascii',
+    'ANSI_X3.4-1968': 'ascii',
 }
 # cp65001 is a Windows variant of utf-8, which isn't supported on Python 2.
 # No idea if it should be rewritten to the canonical name 'utf-8' on Python 3.
 # https://bugs.python.org/issue13216
 if pycompat.iswindows and not pycompat.ispy3:
-    _encodingfixers['cp65001'] = lambda: 'utf-8'
+    _encodingrewrites['cp65001'] = 'utf-8'
 
 try:
     encoding = environ.get("HGENCODING")
     if not encoding:
         encoding = locale.getpreferredencoding().encode('ascii') or 'ascii'
-        encoding = _encodingfixers.get(encoding, lambda: encoding)()
+        encoding = _encodingrewrites.get(encoding, encoding)
 except locale.Error:
     encoding = 'ascii'
 encodingmode = environ.get("HGENCODINGMODE", "strict")
@@ -233,6 +233,18 @@
     environ = dict((tolocal(k.encode(u'utf-8')), tolocal(v.encode(u'utf-8')))
                    for k, v in os.environ.items())  # re-exports
 
+if pycompat.ispy3:
+    # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
+    # returns bytes.
+    if pycompat.iswindows:
+        # Python 3 on Windows issues a DeprecationWarning about using the bytes
+        # API when os.getcwdb() is called.
+        getcwd = lambda: strtolocal(os.getcwd())  # re-exports
+    else:
+        getcwd = os.getcwdb  # re-exports
+else:
+    getcwd = os.getcwd  # re-exports
+
 # How to treat ambiguous-width characters. Set to 'wide' to treat as wide.
 _wide = _sysstr(environ.get("HGENCODINGAMBIGUOUS", "narrow") == "wide"
                 and "WFA" or "WF")
@@ -251,7 +263,7 @@
 def getcols(s, start, c):
     '''Use colwidth to find a c-column substring of s starting at byte
     index start'''
-    for x in xrange(start + c, len(s)):
+    for x in pycompat.xrange(start + c, len(s)):
         t = s[start:x]
         if colwidth(t) == c:
             return t
@@ -346,7 +358,7 @@
     else:
         uslice = lambda i: u[:-i]
         concat = lambda s: s + ellipsis
-    for i in xrange(1, len(u)):
+    for i in pycompat.xrange(1, len(u)):
         usub = uslice(i)
         if ucolwidth(usub) <= width:
             return concat(usub.encode(_sysstr(encoding)))
--- a/mercurial/error.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/error.py	Mon Oct 22 14:46:06 2018 -0400
@@ -34,7 +34,14 @@
         self.hint = kw.pop(r'hint', None)
         super(Hint, self).__init__(*args, **kw)
 
-class RevlogError(Hint, Exception):
+class StorageError(Hint, Exception):
+    """Raised when an error occurs in a storage layer.
+
+    Usually subclassed by a storage-specific exception.
+    """
+    __bytes__ = _tobytes
+
+class RevlogError(StorageError):
     __bytes__ = _tobytes
 
 class FilteredIndexError(IndexError):
@@ -58,6 +65,9 @@
     def __str__(self):
         return RevlogError.__str__(self)
 
+class AmbiguousPrefixLookupError(LookupError):
+    pass
+
 class FilteredLookupError(LookupError):
     pass
 
@@ -212,6 +222,14 @@
 
 class ProgrammingError(Hint, RuntimeError):
     """Raised if a mercurial (core or extension) developer made a mistake"""
+
+    def __init__(self, msg, *args, **kwargs):
+        # On Python 3, turn the message back into a string since this is
+        # an internal-only error that won't be printed except in a
+        # stack traces.
+        msg = pycompat.sysstr(msg)
+        super(ProgrammingError, self).__init__(msg, *args, **kwargs)
+
     __bytes__ = _tobytes
 
 class WdirUnsupported(Exception):
@@ -265,7 +283,7 @@
         Abort.__init__(self, 'failed to update value for "%s/%s"'
                        % (namespace, key))
 
-class CensoredNodeError(RevlogError):
+class CensoredNodeError(StorageError):
     """error raised when content verification fails on a censored node
 
     Also contains the tombstone data substituted for the uncensored data.
@@ -273,10 +291,10 @@
 
     def __init__(self, filename, node, tombstone):
         from .node import short
-        RevlogError.__init__(self, '%s:%s' % (filename, short(node)))
+        StorageError.__init__(self, '%s:%s' % (filename, short(node)))
         self.tombstone = tombstone
 
-class CensoredBaseError(RevlogError):
+class CensoredBaseError(StorageError):
     """error raised when a delta is rejected because its base is censored
 
     A delta based on a censored revision must be formed as single patch
@@ -305,3 +323,14 @@
 class InMemoryMergeConflictsError(Exception):
     """Exception raised when merge conflicts arose during an in-memory merge."""
     __bytes__ = _tobytes
+
+class WireprotoCommandError(Exception):
+    """Represents an error during execution of a wire protocol command.
+
+    Should only be thrown by wire protocol version 2 commands.
+
+    The error is a formatter string and an optional iterable of arguments.
+    """
+    def __init__(self, message, args=None):
+        self.message = message
+        self.messageargs = args
--- a/mercurial/exchange.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/exchange.py	Mon Oct 22 14:46:06 2018 -0400
@@ -15,6 +15,7 @@
     bin,
     hex,
     nullid,
+    nullrev,
 )
 from .thirdparty import (
     attr,
@@ -25,12 +26,15 @@
     changegroup,
     discovery,
     error,
+    exchangev2,
     lock as lockmod,
     logexchange,
+    narrowspec,
     obsolete,
     phases,
     pushkey,
     pycompat,
+    repository,
     scmutil,
     sslutil,
     streamclone,
@@ -44,6 +48,8 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
+_NARROWACL_SECTION = 'narrowhgacl'
+
 # Maps bundle version human names to changegroup versions.
 _bundlespeccgversions = {'v1': '01',
                          'v2': '02',
@@ -516,7 +522,8 @@
         # source repo cannot be locked.
         # We do not abort the push, but just disable the local phase
         # synchronisation.
-        msg = 'cannot lock source repository: %s\n' % err
+        msg = ('cannot lock source repository: %s\n'
+               % stringutil.forcebytestr(err))
         pushop.ui.debug(msg)
 
     with wlock or util.nullcontextmanager(), \
@@ -1308,7 +1315,8 @@
     """
 
     def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
-                 remotebookmarks=None, streamclonerequested=None):
+                 remotebookmarks=None, streamclonerequested=None,
+                 includepats=None, excludepats=None, depth=None):
         # repo we pull into
         self.repo = repo
         # repo we pull from
@@ -1338,6 +1346,12 @@
         self.stepsdone = set()
         # Whether we attempted a clone from pre-generated bundles.
         self.clonebundleattempted = False
+        # Set of file patterns to include.
+        self.includepats = includepats
+        # Set of file patterns to exclude.
+        self.excludepats = excludepats
+        # Number of ancestor changesets to pull from each pulled head.
+        self.depth = depth
 
     @util.propertycache
     def pulledsubset(self):
@@ -1427,7 +1441,7 @@
         old_heads = unficl.heads()
         clstart = len(unficl)
         _pullbundle2(pullop)
-        if changegroup.NARROW_REQUIREMENT in repo.requirements:
+        if repository.NARROW_REQUIREMENT in repo.requirements:
             # XXX narrow clones filter the heads on the server side during
             # XXX getbundle and result in partial replies as well.
             # XXX Disable pull bundles in this case as band aid to avoid
@@ -1442,7 +1456,8 @@
         pullop.rheads = set(pullop.rheads) - pullop.common
 
 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
-         streamclonerequested=None):
+         streamclonerequested=None, includepats=None, excludepats=None,
+         depth=None):
     """Fetch repository data from a remote.
 
     This is the main function used to retrieve data from a remote repository.
@@ -1460,13 +1475,33 @@
     of revlogs from the server. This only works when the local repository is
     empty. The default value of ``None`` means to respect the server
     configuration for preferring stream clones.
+    ``includepats`` and ``excludepats`` define explicit file patterns to
+    include and exclude in storage, respectively. If not defined, narrow
+    patterns from the repo instance are used, if available.
+    ``depth`` is an integer indicating the DAG depth of history we're
+    interested in. If defined, for each revision specified in ``heads``, we
+    will fetch up to this many of its ancestors and data associated with them.
 
     Returns the ``pulloperation`` created for this pull.
     """
     if opargs is None:
         opargs = {}
+
+    # We allow the narrow patterns to be passed in explicitly to provide more
+    # flexibility for API consumers.
+    if includepats or excludepats:
+        includepats = includepats or set()
+        excludepats = excludepats or set()
+    else:
+        includepats, excludepats = repo.narrowpats
+
+    narrowspec.validatepatterns(includepats)
+    narrowspec.validatepatterns(excludepats)
+
     pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
                            streamclonerequested=streamclonerequested,
+                           includepats=includepats, excludepats=excludepats,
+                           depth=depth,
                            **pycompat.strkwargs(opargs))
 
     peerlocal = pullop.remote.local()
@@ -1480,17 +1515,21 @@
 
     pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
     with repo.wlock(), repo.lock(), pullop.trmanager:
-        # This should ideally be in _pullbundle2(). However, it needs to run
-        # before discovery to avoid extra work.
-        _maybeapplyclonebundle(pullop)
-        streamclone.maybeperformlegacystreamclone(pullop)
-        _pulldiscovery(pullop)
-        if pullop.canusebundle2:
-            _fullpullbundle2(repo, pullop)
-        _pullchangeset(pullop)
-        _pullphase(pullop)
-        _pullbookmarks(pullop)
-        _pullobsolete(pullop)
+        # Use the modern wire protocol, if available.
+        if remote.capable('command-changesetdata'):
+            exchangev2.pull(pullop)
+        else:
+            # This should ideally be in _pullbundle2(). However, it needs to run
+            # before discovery to avoid extra work.
+            _maybeapplyclonebundle(pullop)
+            streamclone.maybeperformlegacystreamclone(pullop)
+            _pulldiscovery(pullop)
+            if pullop.canusebundle2:
+                _fullpullbundle2(repo, pullop)
+            _pullchangeset(pullop)
+            _pullphase(pullop)
+            _pullbookmarks(pullop)
+            _pullobsolete(pullop)
 
     # storing remotenames
     if repo.ui.configbool('experimental', 'remotenames'):
@@ -1830,6 +1869,177 @@
             pullop.repo.invalidatevolatilesets()
     return tr
 
+def applynarrowacl(repo, kwargs):
+    """Apply narrow fetch access control.
+
+    This massages the named arguments for getbundle wire protocol commands
+    so requested data is filtered through access control rules.
+    """
+    ui = repo.ui
+    # TODO this assumes existence of HTTP and is a layering violation.
+    username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
+    user_includes = ui.configlist(
+        _NARROWACL_SECTION, username + '.includes',
+        ui.configlist(_NARROWACL_SECTION, 'default.includes'))
+    user_excludes = ui.configlist(
+        _NARROWACL_SECTION, username + '.excludes',
+        ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
+    if not user_includes:
+        raise error.Abort(_("{} configuration for user {} is empty")
+                          .format(_NARROWACL_SECTION, username))
+
+    user_includes = [
+        'path:.' if p == '*' else 'path:' + p for p in user_includes]
+    user_excludes = [
+        'path:.' if p == '*' else 'path:' + p for p in user_excludes]
+
+    req_includes = set(kwargs.get(r'includepats', []))
+    req_excludes = set(kwargs.get(r'excludepats', []))
+
+    req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
+        req_includes, req_excludes, user_includes, user_excludes)
+
+    if invalid_includes:
+        raise error.Abort(
+            _("The following includes are not accessible for {}: {}")
+            .format(username, invalid_includes))
+
+    new_args = {}
+    new_args.update(kwargs)
+    new_args[r'narrow'] = True
+    new_args[r'narrow_acl'] = True
+    new_args[r'includepats'] = req_includes
+    if req_excludes:
+        new_args[r'excludepats'] = req_excludes
+
+    return new_args
+
+def _computeellipsis(repo, common, heads, known, match, depth=None):
+    """Compute the shape of a narrowed DAG.
+
+    Args:
+      repo: The repository we're transferring.
+      common: The roots of the DAG range we're transferring.
+              May be just [nullid], which means all ancestors of heads.
+      heads: The heads of the DAG range we're transferring.
+      match: The narrowmatcher that allows us to identify relevant changes.
+      depth: If not None, only consider nodes to be full nodes if they are at
+             most depth changesets away from one of heads.
+
+    Returns:
+      A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
+
+        visitnodes: The list of nodes (either full or ellipsis) which
+                    need to be sent to the client.
+        relevant_nodes: The set of changelog nodes which change a file inside
+                 the narrowspec. The client needs these as non-ellipsis nodes.
+        ellipsisroots: A dict of {rev: parents} that is used in
+                       narrowchangegroup to produce ellipsis nodes with the
+                       correct parents.
+    """
+    cl = repo.changelog
+    mfl = repo.manifestlog
+
+    clrev = cl.rev
+
+    commonrevs = {clrev(n) for n in common} | {nullrev}
+    headsrevs = {clrev(n) for n in heads}
+
+    if depth:
+        revdepth = {h: 0 for h in headsrevs}
+
+    ellipsisheads = collections.defaultdict(set)
+    ellipsisroots = collections.defaultdict(set)
+
+    def addroot(head, curchange):
+        """Add a root to an ellipsis head, splitting heads with 3 roots."""
+        ellipsisroots[head].add(curchange)
+        # Recursively split ellipsis heads with 3 roots by finding the
+        # roots' youngest common descendant which is an elided merge commit.
+        # That descendant takes 2 of the 3 roots as its own, and becomes a
+        # root of the head.
+        while len(ellipsisroots[head]) > 2:
+            child, roots = splithead(head)
+            splitroots(head, child, roots)
+            head = child  # Recurse in case we just added a 3rd root
+
+    def splitroots(head, child, roots):
+        ellipsisroots[head].difference_update(roots)
+        ellipsisroots[head].add(child)
+        ellipsisroots[child].update(roots)
+        ellipsisroots[child].discard(child)
+
+    def splithead(head):
+        r1, r2, r3 = sorted(ellipsisroots[head])
+        for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
+            mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
+                            nr1, head, nr2, head)
+            for j in mid:
+                if j == nr2:
+                    return nr2, (nr1, nr2)
+                if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
+                    return j, (nr1, nr2)
+        raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
+                            'roots: %d %d %d') % (head, r1, r2, r3))
+
+    missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
+    visit = reversed(missing)
+    relevant_nodes = set()
+    visitnodes = [cl.node(m) for m in missing]
+    required = set(headsrevs) | known
+    for rev in visit:
+        clrev = cl.changelogrevision(rev)
+        ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
+        if depth is not None:
+            curdepth = revdepth[rev]
+            for p in ps:
+                revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
+        needed = False
+        shallow_enough = depth is None or revdepth[rev] <= depth
+        if shallow_enough:
+            curmf = mfl[clrev.manifest].read()
+            if ps:
+                # We choose to not trust the changed files list in
+                # changesets because it's not always correct. TODO: could
+                # we trust it for the non-merge case?
+                p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
+                needed = bool(curmf.diff(p1mf, match))
+                if not needed and len(ps) > 1:
+                    # For merge changes, the list of changed files is not
+                    # helpful, since we need to emit the merge if a file
+                    # in the narrow spec has changed on either side of the
+                    # merge. As a result, we do a manifest diff to check.
+                    p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
+                    needed = bool(curmf.diff(p2mf, match))
+            else:
+                # For a root node, we need to include the node if any
+                # files in the node match the narrowspec.
+                needed = any(curmf.walk(match))
+
+        if needed:
+            for head in ellipsisheads[rev]:
+                addroot(head, rev)
+            for p in ps:
+                required.add(p)
+            relevant_nodes.add(cl.node(rev))
+        else:
+            if not ps:
+                ps = [nullrev]
+            if rev in required:
+                for head in ellipsisheads[rev]:
+                    addroot(head, rev)
+                for p in ps:
+                    ellipsisheads[p].add(rev)
+            else:
+                for p in ps:
+                    ellipsisheads[p] |= ellipsisheads[rev]
+
+    # add common changesets as roots of their reachable ellipsis heads
+    for c in commonrevs:
+        for head in ellipsisheads[c]:
+            addroot(head, c)
+    return visitnodes, relevant_nodes, ellipsisroots
+
 def caps20to10(repo, role):
     """return a set with appropriate options to use bundle20 during getbundle"""
     caps = {'HG20'}
@@ -1924,30 +2134,51 @@
 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
                               b2caps=None, heads=None, common=None, **kwargs):
     """add a changegroup part to the requested bundle"""
-    cgstream = None
-    if kwargs.get(r'cg', True):
-        # build changegroup bundle here.
-        version = '01'
-        cgversions = b2caps.get('changegroup')
-        if cgversions:  # 3.1 and 3.2 ship with an empty value
-            cgversions = [v for v in cgversions
-                          if v in changegroup.supportedoutgoingversions(repo)]
-            if not cgversions:
-                raise ValueError(_('no common changegroup version'))
-            version = max(cgversions)
-        outgoing = _computeoutgoing(repo, heads, common)
-        if outgoing.missing:
-            cgstream = changegroup.makestream(repo, outgoing, version, source,
-                                              bundlecaps=bundlecaps)
+    if not kwargs.get(r'cg', True):
+        return
+
+    version = '01'
+    cgversions = b2caps.get('changegroup')
+    if cgversions:  # 3.1 and 3.2 ship with an empty value
+        cgversions = [v for v in cgversions
+                      if v in changegroup.supportedoutgoingversions(repo)]
+        if not cgversions:
+            raise ValueError(_('no common changegroup version'))
+        version = max(cgversions)
+
+    outgoing = _computeoutgoing(repo, heads, common)
+    if not outgoing.missing:
+        return
+
+    if kwargs.get(r'narrow', False):
+        include = sorted(filter(bool, kwargs.get(r'includepats', [])))
+        exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
+        matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
+    else:
+        matcher = None
 
-    if cgstream:
-        part = bundler.newpart('changegroup', data=cgstream)
-        if cgversions:
-            part.addparam('version', version)
-        part.addparam('nbchanges', '%d' % len(outgoing.missing),
-                      mandatory=False)
-        if 'treemanifest' in repo.requirements:
-            part.addparam('treemanifest', '1')
+    cgstream = changegroup.makestream(repo, outgoing, version, source,
+                                      bundlecaps=bundlecaps, matcher=matcher)
+
+    part = bundler.newpart('changegroup', data=cgstream)
+    if cgversions:
+        part.addparam('version', version)
+
+    part.addparam('nbchanges', '%d' % len(outgoing.missing),
+                  mandatory=False)
+
+    if 'treemanifest' in repo.requirements:
+        part.addparam('treemanifest', '1')
+
+    if (kwargs.get(r'narrow', False) and kwargs.get(r'narrow_acl', False)
+        and (include or exclude)):
+        narrowspecpart = bundler.newpart('narrow:spec')
+        if include:
+            narrowspecpart.addparam(
+                'include', '\n'.join(include), mandatory=True)
+        if exclude:
+            narrowspecpart.addparam(
+                'exclude', '\n'.join(exclude), mandatory=True)
 
 @getbundle2partsgenerator('bookmarks')
 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
@@ -2069,8 +2300,13 @@
     # Don't send unless:
     # - changeset are being exchanged,
     # - the client supports it.
-    if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
+    # - narrow bundle isn't in play (not currently compatible).
+    if (not kwargs.get(r'cg', True)
+        or 'rev-branch-cache' not in b2caps
+        or kwargs.get(r'narrow', False)
+        or repo.ui.has_section(_NARROWACL_SECTION)):
         return
+
     outgoing = _computeoutgoing(repo, heads, common)
     bundle2.addpartrevbranchcache(repo, bundler, outgoing)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/exchangev2.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,697 @@
+# exchangev2.py - repository exchange for wire protocol version 2
+#
+# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import collections
+import weakref
+
+from .i18n import _
+from .node import (
+    nullid,
+    short,
+)
+from . import (
+    bookmarks,
+    error,
+    mdiff,
+    narrowspec,
+    phases,
+    pycompat,
+    repository,
+    setdiscovery,
+)
+
+def pull(pullop):
+    """Pull using wire protocol version 2."""
+    repo = pullop.repo
+    remote = pullop.remote
+
+    usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop)
+
+    # If this is a clone and it was requested to perform a "stream clone",
+    # we obtain the raw files data from the remote then fall back to an
+    # incremental pull. This is somewhat hacky and is not nearly robust enough
+    # for long-term usage.
+    if usingrawchangelogandmanifest:
+        with repo.transaction('clone'):
+            _fetchrawstorefiles(repo, remote)
+            repo.invalidate(clearfilecache=True)
+
+    tr = pullop.trmanager.transaction()
+
+    # We don't use the repo's narrow matcher here because the patterns passed
+    # to exchange.pull() could be different.
+    narrowmatcher = narrowspec.match(repo.root,
+                                     # Empty maps to nevermatcher. So always
+                                     # set includes if missing.
+                                     pullop.includepats or {'path:.'},
+                                     pullop.excludepats)
+
+    if pullop.includepats or pullop.excludepats:
+        pathfilter = {}
+        if pullop.includepats:
+            pathfilter[b'include'] = sorted(pullop.includepats)
+        if pullop.excludepats:
+            pathfilter[b'exclude'] = sorted(pullop.excludepats)
+    else:
+        pathfilter = None
+
+    # Figure out what needs to be fetched.
+    common, fetch, remoteheads = _pullchangesetdiscovery(
+        repo, remote, pullop.heads, abortwhenunrelated=pullop.force)
+
+    # And fetch the data.
+    pullheads = pullop.heads or remoteheads
+    csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads)
+
+    # New revisions are written to the changelog. But all other updates
+    # are deferred. Do those now.
+
+    # Ensure all new changesets are draft by default. If the repo is
+    # publishing, the phase will be adjusted by the loop below.
+    if csetres['added']:
+        phases.registernew(repo, tr, phases.draft, csetres['added'])
+
+    # And adjust the phase of all changesets accordingly.
+    for phase in phases.phasenames:
+        if phase == b'secret' or not csetres['nodesbyphase'][phase]:
+            continue
+
+        phases.advanceboundary(repo, tr, phases.phasenames.index(phase),
+                               csetres['nodesbyphase'][phase])
+
+    # Write bookmark updates.
+    bookmarks.updatefromremote(repo.ui, repo, csetres['bookmarks'],
+                               remote.url(), pullop.gettransaction,
+                               explicit=pullop.explicitbookmarks)
+
+    manres = _fetchmanifests(repo, tr, remote, csetres['manifestnodes'])
+
+    # We don't properly support shallow changeset and manifest yet. So we apply
+    # depth limiting locally.
+    if pullop.depth:
+        relevantcsetnodes = set()
+        clnode = repo.changelog.node
+
+        for rev in repo.revs(b'ancestors(%ln, %d)',
+                             pullheads, pullop.depth - 1):
+            relevantcsetnodes.add(clnode(rev))
+
+        csetrelevantfilter = lambda n: n in relevantcsetnodes
+
+    else:
+        csetrelevantfilter = lambda n: True
+
+    # If obtaining the raw store files, we need to scan the full repo to
+    # derive all the changesets, manifests, and linkrevs.
+    if usingrawchangelogandmanifest:
+        csetsforfiles = []
+        mnodesforfiles = []
+        manifestlinkrevs = {}
+
+        for rev in repo:
+            ctx = repo[rev]
+            node = ctx.node()
+
+            if not csetrelevantfilter(node):
+                continue
+
+            mnode = ctx.manifestnode()
+
+            csetsforfiles.append(node)
+            mnodesforfiles.append(mnode)
+            manifestlinkrevs[mnode] = rev
+
+    else:
+        csetsforfiles = [n for n in csetres['added'] if csetrelevantfilter(n)]
+        mnodesforfiles = manres['added']
+        manifestlinkrevs = manres['linkrevs']
+
+    # Find all file nodes referenced by added manifests and fetch those
+    # revisions.
+    fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
+    _fetchfilesfromcsets(repo, tr, remote, pathfilter, fnodes, csetsforfiles,
+                         manifestlinkrevs, shallow=bool(pullop.depth))
+
+def _checkuserawstorefiledata(pullop):
+    """Check whether we should use rawstorefiledata command to retrieve data."""
+
+    repo = pullop.repo
+    remote = pullop.remote
+
+    # Command to obtain raw store data isn't available.
+    if b'rawstorefiledata' not in remote.apidescriptor[b'commands']:
+        return False
+
+    # Only honor if user requested stream clone operation.
+    if not pullop.streamclonerequested:
+        return False
+
+    # Only works on empty repos.
+    if len(repo):
+        return False
+
+    # TODO This is super hacky. There needs to be a storage API for this. We
+    # also need to check for compatibility with the remote.
+    if b'revlogv1' not in repo.requirements:
+        return False
+
+    return True
+
+def _fetchrawstorefiles(repo, remote):
+    with remote.commandexecutor() as e:
+        objs = e.callcommand(b'rawstorefiledata', {
+            b'files': [b'changelog', b'manifestlog'],
+        }).result()
+
+        # First object is a summary of files data that follows.
+        overall = next(objs)
+
+        progress = repo.ui.makeprogress(_('clone'), total=overall[b'totalsize'],
+                                        unit=_('bytes'))
+        with progress:
+            progress.update(0)
+
+            # Next are pairs of file metadata, data.
+            while True:
+                try:
+                    filemeta = next(objs)
+                except StopIteration:
+                    break
+
+                for k in (b'location', b'path', b'size'):
+                    if k not in filemeta:
+                        raise error.Abort(_(b'remote file data missing key: %s')
+                                          % k)
+
+                if filemeta[b'location'] == b'store':
+                    vfs = repo.svfs
+                else:
+                    raise error.Abort(_(b'invalid location for raw file data: '
+                                        b'%s') % filemeta[b'location'])
+
+                bytesremaining = filemeta[b'size']
+
+                with vfs.open(filemeta[b'path'], b'wb') as fh:
+                    while True:
+                        try:
+                            chunk = next(objs)
+                        except StopIteration:
+                            break
+
+                        bytesremaining -= len(chunk)
+
+                        if bytesremaining < 0:
+                            raise error.Abort(_(
+                                b'received invalid number of bytes for file '
+                                b'data; expected %d, got extra') %
+                                              filemeta[b'size'])
+
+                        progress.increment(step=len(chunk))
+                        fh.write(chunk)
+
+                        try:
+                            if chunk.islast:
+                                break
+                        except AttributeError:
+                            raise error.Abort(_(
+                                b'did not receive indefinite length bytestring '
+                                b'for file data'))
+
+                if bytesremaining:
+                    raise error.Abort(_(b'received invalid number of bytes for'
+                                        b'file data; expected %d got %d') %
+                                      (filemeta[b'size'],
+                                       filemeta[b'size'] - bytesremaining))
+
+def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
+    """Determine which changesets need to be pulled."""
+
+    if heads:
+        knownnode = repo.changelog.hasnode
+        if all(knownnode(head) for head in heads):
+            return heads, False, heads
+
+    # TODO wire protocol version 2 is capable of more efficient discovery
+    # than setdiscovery. Consider implementing something better.
+    common, fetch, remoteheads = setdiscovery.findcommonheads(
+        repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated)
+
+    common = set(common)
+    remoteheads = set(remoteheads)
+
+    # If a remote head is filtered locally, put it back in the common set.
+    # See the comment in exchange._pulldiscoverychangegroup() for more.
+
+    if fetch and remoteheads:
+        nodemap = repo.unfiltered().changelog.nodemap
+
+        common |= {head for head in remoteheads if head in nodemap}
+
+        if set(remoteheads).issubset(common):
+            fetch = []
+
+    common.discard(nullid)
+
+    return common, fetch, remoteheads
+
+def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
+    # TODO consider adding a step here where we obtain the DAG shape first
+    # (or ask the server to slice changesets into chunks for us) so that
+    # we can perform multiple fetches in batches. This will facilitate
+    # resuming interrupted clones, higher server-side cache hit rates due
+    # to smaller segments, etc.
+    with remote.commandexecutor() as e:
+        objs = e.callcommand(b'changesetdata', {
+            b'revisions': [{
+                b'type': b'changesetdagrange',
+                b'roots': sorted(common),
+                b'heads': sorted(remoteheads),
+            }],
+            b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
+        }).result()
+
+        # The context manager waits on all response data when exiting. So
+        # we need to remain in the context manager in order to stream data.
+        return _processchangesetdata(repo, tr, objs)
+
+def _processchangesetdata(repo, tr, objs):
+    repo.hook('prechangegroup', throw=True,
+              **pycompat.strkwargs(tr.hookargs))
+
+    urepo = repo.unfiltered()
+    cl = urepo.changelog
+
+    cl.delayupdate(tr)
+
+    # The first emitted object is a header describing the data that
+    # follows.
+    meta = next(objs)
+
+    progress = repo.ui.makeprogress(_('changesets'),
+                                    unit=_('chunks'),
+                                    total=meta.get(b'totalitems'))
+
+    manifestnodes = {}
+
+    def linkrev(node):
+        repo.ui.debug('add changeset %s\n' % short(node))
+        # Linkrev for changelog is always self.
+        return len(cl)
+
+    def onchangeset(cl, node):
+        progress.increment()
+
+        revision = cl.changelogrevision(node)
+
+        # We need to preserve the mapping of changelog revision to node
+        # so we can set the linkrev accordingly when manifests are added.
+        manifestnodes[cl.rev(node)] = revision.manifest
+
+    nodesbyphase = {phase: set() for phase in phases.phasenames}
+    remotebookmarks = {}
+
+    # addgroup() expects a 7-tuple describing revisions. This normalizes
+    # the wire data to that format.
+    #
+    # This loop also aggregates non-revision metadata, such as phase
+    # data.
+    def iterrevisions():
+        for cset in objs:
+            node = cset[b'node']
+
+            if b'phase' in cset:
+                nodesbyphase[cset[b'phase']].add(node)
+
+            for mark in cset.get(b'bookmarks', []):
+                remotebookmarks[mark] = node
+
+            # TODO add mechanism for extensions to examine records so they
+            # can siphon off custom data fields.
+
+            extrafields = {}
+
+            for field, size in cset.get(b'fieldsfollowing', []):
+                extrafields[field] = next(objs)
+
+            # Some entries might only be metadata only updates.
+            if b'revision' not in extrafields:
+                continue
+
+            data = extrafields[b'revision']
+
+            yield (
+                node,
+                cset[b'parents'][0],
+                cset[b'parents'][1],
+                # Linknode is always itself for changesets.
+                cset[b'node'],
+                # We always send full revisions. So delta base is not set.
+                nullid,
+                mdiff.trivialdiffheader(len(data)) + data,
+                # Flags not yet supported.
+                0,
+            )
+
+    added = cl.addgroup(iterrevisions(), linkrev, weakref.proxy(tr),
+                        addrevisioncb=onchangeset)
+
+    progress.complete()
+
+    return {
+        'added': added,
+        'nodesbyphase': nodesbyphase,
+        'bookmarks': remotebookmarks,
+        'manifestnodes': manifestnodes,
+    }
+
+def _fetchmanifests(repo, tr, remote, manifestnodes):
+    rootmanifest = repo.manifestlog.getstorage(b'')
+
+    # Some manifests can be shared between changesets. Filter out revisions
+    # we already know about.
+    fetchnodes = []
+    linkrevs = {}
+    seen = set()
+
+    for clrev, node in sorted(manifestnodes.iteritems()):
+        if node in seen:
+            continue
+
+        try:
+            rootmanifest.rev(node)
+        except error.LookupError:
+            fetchnodes.append(node)
+            linkrevs[node] = clrev
+
+        seen.add(node)
+
+    # TODO handle tree manifests
+
+    # addgroup() expects 7-tuple describing revisions. This normalizes
+    # the wire data to that format.
+    def iterrevisions(objs, progress):
+        for manifest in objs:
+            node = manifest[b'node']
+
+            extrafields = {}
+
+            for field, size in manifest.get(b'fieldsfollowing', []):
+                extrafields[field] = next(objs)
+
+            if b'delta' in extrafields:
+                basenode = manifest[b'deltabasenode']
+                delta = extrafields[b'delta']
+            elif b'revision' in extrafields:
+                basenode = nullid
+                revision = extrafields[b'revision']
+                delta = mdiff.trivialdiffheader(len(revision)) + revision
+            else:
+                continue
+
+            yield (
+                node,
+                manifest[b'parents'][0],
+                manifest[b'parents'][1],
+                # The value passed in is passed to the lookup function passed
+                # to addgroup(). We already have a map of manifest node to
+                # changelog revision number. So we just pass in the
+                # manifest node here and use linkrevs.__getitem__ as the
+                # resolution function.
+                node,
+                basenode,
+                delta,
+                # Flags not yet supported.
+                0
+            )
+
+            progress.increment()
+
+    progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
+                                    total=len(fetchnodes))
+
+    commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
+    batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
+    # TODO make size configurable on client?
+
+    # We send commands 1 at a time to the remote. This is not the most
+    # efficient because we incur a round trip at the end of each batch.
+    # However, the existing frame-based reactor keeps consuming server
+    # data in the background. And this results in response data buffering
+    # in memory. This can consume gigabytes of memory.
+    # TODO send multiple commands in a request once background buffering
+    # issues are resolved.
+
+    added = []
+
+    for i in pycompat.xrange(0, len(fetchnodes), batchsize):
+        batch = [node for node in fetchnodes[i:i + batchsize]]
+        if not batch:
+            continue
+
+        with remote.commandexecutor() as e:
+            objs = e.callcommand(b'manifestdata', {
+                b'tree': b'',
+                b'nodes': batch,
+                b'fields': {b'parents', b'revision'},
+                b'haveparents': True,
+            }).result()
+
+            # Chomp off header object.
+            next(objs)
+
+            added.extend(rootmanifest.addgroup(
+                iterrevisions(objs, progress),
+                linkrevs.__getitem__,
+                weakref.proxy(tr)))
+
+    progress.complete()
+
+    return {
+        'added': added,
+        'linkrevs': linkrevs,
+    }
+
+def _derivefilesfrommanifests(repo, matcher, manifestnodes):
+    """Determine what file nodes are relevant given a set of manifest nodes.
+
+    Returns a dict mapping file paths to dicts of file node to first manifest
+    node.
+    """
+    ml = repo.manifestlog
+    fnodes = collections.defaultdict(dict)
+
+    progress = repo.ui.makeprogress(
+        _('scanning manifests'), total=len(manifestnodes))
+
+    with progress:
+        for manifestnode in manifestnodes:
+            m = ml.get(b'', manifestnode)
+
+            # TODO this will pull in unwanted nodes because it takes the storage
+            # delta into consideration. What we really want is something that
+            # takes the delta between the manifest's parents. And ideally we
+            # would ignore file nodes that are known locally. For now, ignore
+            # both these limitations. This will result in incremental fetches
+            # requesting data we already have. So this is far from ideal.
+            md = m.readfast()
+
+            for path, fnode in md.items():
+                if matcher(path):
+                    fnodes[path].setdefault(fnode, manifestnode)
+
+            progress.increment()
+
+    return fnodes
+
+def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
+    """Fetch file data from explicit file revisions."""
+    def iterrevisions(objs, progress):
+        for filerevision in objs:
+            node = filerevision[b'node']
+
+            extrafields = {}
+
+            for field, size in filerevision.get(b'fieldsfollowing', []):
+                extrafields[field] = next(objs)
+
+            if b'delta' in extrafields:
+                basenode = filerevision[b'deltabasenode']
+                delta = extrafields[b'delta']
+            elif b'revision' in extrafields:
+                basenode = nullid
+                revision = extrafields[b'revision']
+                delta = mdiff.trivialdiffheader(len(revision)) + revision
+            else:
+                continue
+
+            yield (
+                node,
+                filerevision[b'parents'][0],
+                filerevision[b'parents'][1],
+                node,
+                basenode,
+                delta,
+                # Flags not yet supported.
+                0,
+            )
+
+            progress.increment()
+
+    progress = repo.ui.makeprogress(
+        _('files'), unit=_('chunks'),
+         total=sum(len(v) for v in fnodes.itervalues()))
+
+    # TODO make batch size configurable
+    batchsize = 10000
+    fnodeslist = [x for x in sorted(fnodes.items())]
+
+    for i in pycompat.xrange(0, len(fnodeslist), batchsize):
+        batch = [x for x in fnodeslist[i:i + batchsize]]
+        if not batch:
+            continue
+
+        with remote.commandexecutor() as e:
+            fs = []
+            locallinkrevs = {}
+
+            for path, nodes in batch:
+                fs.append((path, e.callcommand(b'filedata', {
+                    b'path': path,
+                    b'nodes': sorted(nodes),
+                    b'fields': {b'parents', b'revision'},
+                    b'haveparents': True,
+                })))
+
+                locallinkrevs[path] = {
+                    node: linkrevs[manifestnode]
+                    for node, manifestnode in nodes.iteritems()}
+
+            for path, f in fs:
+                objs = f.result()
+
+                # Chomp off header objects.
+                next(objs)
+
+                store = repo.file(path)
+                store.addgroup(
+                    iterrevisions(objs, progress),
+                    locallinkrevs[path].__getitem__,
+                    weakref.proxy(tr))
+
+def _fetchfilesfromcsets(repo, tr, remote, pathfilter, fnodes, csets,
+                         manlinkrevs, shallow=False):
+    """Fetch file data from explicit changeset revisions."""
+
+    def iterrevisions(objs, remaining, progress):
+        while remaining:
+            filerevision = next(objs)
+
+            node = filerevision[b'node']
+
+            extrafields = {}
+
+            for field, size in filerevision.get(b'fieldsfollowing', []):
+                extrafields[field] = next(objs)
+
+            if b'delta' in extrafields:
+                basenode = filerevision[b'deltabasenode']
+                delta = extrafields[b'delta']
+            elif b'revision' in extrafields:
+                basenode = nullid
+                revision = extrafields[b'revision']
+                delta = mdiff.trivialdiffheader(len(revision)) + revision
+            else:
+                continue
+
+            if b'linknode' in filerevision:
+                linknode = filerevision[b'linknode']
+            else:
+                linknode = node
+
+            yield (
+                node,
+                filerevision[b'parents'][0],
+                filerevision[b'parents'][1],
+                linknode,
+                basenode,
+                delta,
+                # Flags not yet supported.
+                0,
+            )
+
+            progress.increment()
+            remaining -= 1
+
+    progress = repo.ui.makeprogress(
+        _('files'), unit=_('chunks'),
+        total=sum(len(v) for v in fnodes.itervalues()))
+
+    commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
+    batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
+
+    shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features
+    fields = {b'parents', b'revision'}
+    clrev = repo.changelog.rev
+
+    # There are no guarantees that we'll have ancestor revisions if
+    # a) this repo has shallow file storage b) shallow data fetching is enabled.
+    # Force remote to not delta against possibly unknown revisions when these
+    # conditions hold.
+    haveparents = not (shallowfiles or shallow)
+
+    # Similarly, we may not have calculated linkrevs for all incoming file
+    # revisions. Ask the remote to do work for us in this case.
+    if not haveparents:
+        fields.add(b'linknode')
+
+    for i in pycompat.xrange(0, len(csets), batchsize):
+        batch = [x for x in csets[i:i + batchsize]]
+        if not batch:
+            continue
+
+        with remote.commandexecutor() as e:
+            args = {
+                b'revisions': [{
+                    b'type': b'changesetexplicit',
+                    b'nodes': batch,
+                }],
+                b'fields': fields,
+                b'haveparents': haveparents,
+            }
+
+            if pathfilter:
+                args[b'pathfilter'] = pathfilter
+
+            objs = e.callcommand(b'filesdata', args).result()
+
+            # First object is an overall header.
+            overall = next(objs)
+
+            # We have overall['totalpaths'] segments.
+            for i in pycompat.xrange(overall[b'totalpaths']):
+                header = next(objs)
+
+                path = header[b'path']
+                store = repo.file(path)
+
+                linkrevs = {
+                    fnode: manlinkrevs[mnode]
+                    for fnode, mnode in fnodes[path].iteritems()}
+
+                def getlinkrev(node):
+                    if node in linkrevs:
+                        return linkrevs[node]
+                    else:
+                        return clrev(node)
+
+                store.addgroup(iterrevisions(objs, header[b'totalitems'],
+                                             progress),
+                               getlinkrev,
+                               weakref.proxy(tr),
+                               maybemissingparents=shallow)
--- a/mercurial/exewrapper.c	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/exewrapper.c	Mon Oct 22 14:46:06 2018 -0400
@@ -8,6 +8,7 @@
 */
 
 #include <stdio.h>
+#include <tchar.h>
 #include <windows.h>
 
 #include "hgpythonlib.h"
@@ -21,39 +22,42 @@
 {
 	return !strncpy(d, s, n);
 }
+
+#define _tcscpy_s strcpy_s
+#define _tcscat_s strcat_s
+#define _countof(array) (sizeof(array)/sizeof(array[0]))
 #endif
 
-static char pyscript[MAX_PATH + 10];
-static char pyhome[MAX_PATH + 10];
-static char envpyhome[MAX_PATH + 10];
-static char pydllfile[MAX_PATH + 10];
+static TCHAR pyscript[MAX_PATH + 10];
+static TCHAR pyhome[MAX_PATH + 10];
+static TCHAR pydllfile[MAX_PATH + 10];
 
-int main(int argc, char *argv[])
+int _tmain(int argc, TCHAR *argv[])
 {
-	char *p;
+	TCHAR *p;
 	int ret;
 	int i;
 	int n;
-	char **pyargv;
+	TCHAR **pyargv;
 	WIN32_FIND_DATA fdata;
 	HANDLE hfind;
 	const char *err;
 	HMODULE pydll;
-	void(__cdecl * Py_SetPythonHome)(char *home);
-	int(__cdecl * Py_Main)(int argc, char *argv[]);
+	void(__cdecl * Py_SetPythonHome)(TCHAR *home);
+	int(__cdecl * Py_Main)(int argc, TCHAR *argv[]);
 
-	if (GetModuleFileName(NULL, pyscript, sizeof(pyscript)) == 0) {
+	if (GetModuleFileName(NULL, pyscript, _countof(pyscript)) == 0) {
 		err = "GetModuleFileName failed";
 		goto bail;
 	}
 
-	p = strrchr(pyscript, '.');
+	p = _tcsrchr(pyscript, '.');
 	if (p == NULL) {
 		err = "malformed module filename";
 		goto bail;
 	}
 	*p = 0; /* cut trailing ".exe" */
-	strcpy_s(pyhome, sizeof(pyhome), pyscript);
+	_tcscpy_s(pyhome, _countof(pyhome), pyscript);
 
 	hfind = FindFirstFile(pyscript, &fdata);
 	if (hfind != INVALID_HANDLE_VALUE) {
@@ -61,12 +65,12 @@
 		FindClose(hfind);
 	} else {
 		/* file pyscript isn't there, take <pyscript>exe.py */
-		strcat_s(pyscript, sizeof(pyscript), "exe.py");
+		_tcscat_s(pyscript, _countof(pyscript), _T("exe.py"));
 	}
 
 	pydll = NULL;
 
-	p = strrchr(pyhome, '\\');
+	p = _tcsrchr(pyhome, _T('\\'));
 	if (p == NULL) {
 		err = "can't find backslash in module filename";
 		goto bail;
@@ -74,19 +78,19 @@
 	*p = 0; /* cut at directory */
 
 	/* check for private Python of HackableMercurial */
-	strcat_s(pyhome, sizeof(pyhome), "\\hg-python");
+	_tcscat_s(pyhome, _countof(pyhome), _T("\\hg-python"));
 
 	hfind = FindFirstFile(pyhome, &fdata);
 	if (hfind != INVALID_HANDLE_VALUE) {
 		/* Path .\hg-python exists. We are probably in HackableMercurial
 		scenario, so let's load python dll from this dir. */
 		FindClose(hfind);
-		strcpy_s(pydllfile, sizeof(pydllfile), pyhome);
-		strcat_s(pydllfile, sizeof(pydllfile), "\\" HGPYTHONLIB ".dll");
+		_tcscpy_s(pydllfile, _countof(pydllfile), pyhome);
+		_tcscat_s(pydllfile, _countof(pydllfile), _T("\\") _T(HGPYTHONLIB)
+					_T(".dll"));
 		pydll = LoadLibrary(pydllfile);
 		if (pydll == NULL) {
-			err = "failed to load private Python DLL " HGPYTHONLIB
-			      ".dll";
+			err = "failed to load private Python DLL " HGPYTHONLIB ".dll";
 			goto bail;
 		}
 		Py_SetPythonHome =
@@ -99,7 +103,7 @@
 	}
 
 	if (pydll == NULL) {
-		pydll = LoadLibrary(HGPYTHONLIB ".dll");
+		pydll = LoadLibrary(_T(HGPYTHONLIB) _T(".dll"));
 		if (pydll == NULL) {
 			err = "failed to load Python DLL " HGPYTHONLIB ".dll";
 			goto bail;
@@ -119,7 +123,7 @@
 	place. So we optionally accept the pyscript as the first argument
 	(argv[1]), letting our exe taking the role of the python interpreter.
 	*/
-	if (argc >= 2 && strcmp(argv[1], pyscript) == 0) {
+	if (argc >= 2 && _tcscmp(argv[1], pyscript) == 0) {
 		/*
 		pyscript is already in the args, so there is no need to copy
 		the args and we can directly call the python interpreter with
@@ -133,7 +137,7 @@
 	name of our exe (argv[0]) in the position where the python.exe
 	canonically is, and insert the pyscript next.
 	*/
-	pyargv = malloc((argc + 5) * sizeof(char *));
+	pyargv = malloc((argc + 5) * sizeof(TCHAR *));
 	if (pyargv == NULL) {
 		err = "not enough memory";
 		goto bail;
--- a/mercurial/extensions.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/extensions.py	Mon Oct 22 14:46:06 2018 -0400
@@ -124,7 +124,7 @@
     # note: this ui.debug happens before --debug is processed,
     #       Use --config ui.debug=1 to see them.
     if ui.configbool('devel', 'debug.extensions'):
-        ui.debug('could not import %s (%s): trying %s\n'
+        ui.debug('debug.extensions:     - could not import %s (%s): trying %s\n'
                  % (failed, stringutil.forcebytestr(err), next))
         if ui.debugflag:
             ui.traceback()
@@ -166,7 +166,7 @@
             _rejectunicode(t, o._table)
     _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
 
-def load(ui, name, path):
+def load(ui, name, path, log=lambda *a: None, loadingtime=None):
     if name.startswith('hgext.') or name.startswith('hgext/'):
         shortname = name[6:]
     else:
@@ -175,8 +175,13 @@
         return None
     if shortname in _extensions:
         return _extensions[shortname]
+    log('  - loading extension: %r\n', shortname)
     _extensions[shortname] = None
-    mod = _importext(name, path, bind(_reportimporterror, ui))
+    with util.timedcm('load extension %r', shortname) as stats:
+        mod = _importext(name, path, bind(_reportimporterror, ui))
+    log('  > %r extension loaded in %s\n', shortname, stats)
+    if loadingtime is not None:
+        loadingtime[shortname] += stats.elapsed
 
     # Before we do anything with the extension, check against minimum stated
     # compatibility. This gives extension authors a mechanism to have their
@@ -187,12 +192,16 @@
         ui.warn(_('(third party extension %s requires version %s or newer '
                   'of Mercurial; disabling)\n') % (shortname, minver))
         return
+    log('    - validating extension tables: %r\n', shortname)
     _validatetables(ui, mod)
 
     _extensions[shortname] = mod
     _order.append(shortname)
-    for fn in _aftercallbacks.get(shortname, []):
-        fn(loaded=True)
+    log('    - invoking registered callbacks: %r\n', shortname)
+    with util.timedcm('callbacks extension %r', shortname) as stats:
+        for fn in _aftercallbacks.get(shortname, []):
+            fn(loaded=True)
+    log('    > callbacks completed in %s\n', stats)
     return mod
 
 def _runuisetup(name, ui):
@@ -225,28 +234,42 @@
     return True
 
 def loadall(ui, whitelist=None):
+    if ui.configbool('devel', 'debug.extensions'):
+        log = lambda msg, *values: ui.debug('debug.extensions: ',
+            msg % values, label='debug.extensions')
+    else:
+        log = lambda *a, **kw: None
+    loadingtime = collections.defaultdict(int)
     result = ui.configitems("extensions")
     if whitelist is not None:
         result = [(k, v) for (k, v) in result if k in whitelist]
     newindex = len(_order)
-    for (name, path) in result:
-        if path:
-            if path[0:1] == '!':
-                _disabledextensions[name] = path[1:]
-                continue
-        try:
-            load(ui, name, path)
-        except Exception as inst:
-            msg = stringutil.forcebytestr(inst)
+    log('loading %sextensions\n', 'additional ' if newindex else '')
+    log('- processing %d entries\n', len(result))
+    with util.timedcm('load all extensions') as stats:
+        for (name, path) in result:
             if path:
-                ui.warn(_("*** failed to import extension %s from %s: %s\n")
-                        % (name, path, msg))
-            else:
-                ui.warn(_("*** failed to import extension %s: %s\n")
-                        % (name, msg))
-            if isinstance(inst, error.Hint) and inst.hint:
-                ui.warn(_("*** (%s)\n") % inst.hint)
-            ui.traceback()
+                if path[0:1] == '!':
+                    if name not in _disabledextensions:
+                        log('  - skipping disabled extension: %r\n', name)
+                    _disabledextensions[name] = path[1:]
+                    continue
+            try:
+                load(ui, name, path, log, loadingtime)
+            except Exception as inst:
+                msg = stringutil.forcebytestr(inst)
+                if path:
+                    ui.warn(_("*** failed to import extension %s from %s: %s\n")
+                            % (name, path, msg))
+                else:
+                    ui.warn(_("*** failed to import extension %s: %s\n")
+                            % (name, msg))
+                if isinstance(inst, error.Hint) and inst.hint:
+                    ui.warn(_("*** (%s)\n") % inst.hint)
+                ui.traceback()
+
+    log('> loaded %d extensions, total time %s\n',
+        len(_order) - newindex, stats)
     # list of (objname, loadermod, loadername) tuple:
     # - objname is the name of an object in extension module,
     #   from which extra information is loaded
@@ -258,29 +281,53 @@
     earlyextraloaders = [
         ('configtable', configitems, 'loadconfigtable'),
     ]
+
+    log('- loading configtable attributes\n')
     _loadextra(ui, newindex, earlyextraloaders)
 
     broken = set()
-    for name in _order[newindex:]:
-        if not _runuisetup(name, ui):
-            broken.add(name)
+    log('- executing uisetup hooks\n')
+    with util.timedcm('all uisetup') as alluisetupstats:
+        for name in _order[newindex:]:
+            log('  - running uisetup for %r\n', name)
+            with util.timedcm('uisetup %r', name) as stats:
+                if not _runuisetup(name, ui):
+                    log('    - the %r extension uisetup failed\n', name)
+                    broken.add(name)
+            log('  > uisetup for %r took %s\n', name, stats)
+            loadingtime[name] += stats.elapsed
+    log('> all uisetup took %s\n', alluisetupstats)
 
-    for name in _order[newindex:]:
-        if name in broken:
-            continue
-        if not _runextsetup(name, ui):
-            broken.add(name)
+    log('- executing extsetup hooks\n')
+    with util.timedcm('all extsetup') as allextetupstats:
+        for name in _order[newindex:]:
+            if name in broken:
+                continue
+            log('  - running extsetup for %r\n', name)
+            with util.timedcm('extsetup %r', name) as stats:
+                if not _runextsetup(name, ui):
+                    log('    - the %r extension extsetup failed\n', name)
+                    broken.add(name)
+            log('  > extsetup for %r took %s\n', name, stats)
+            loadingtime[name] += stats.elapsed
+    log('> all extsetup took %s\n', allextetupstats)
 
     for name in broken:
+        log('    - disabling broken %r extension\n', name)
         _extensions[name] = None
 
     # Call aftercallbacks that were never met.
-    for shortname in _aftercallbacks:
-        if shortname in _extensions:
-            continue
+    log('- executing remaining aftercallbacks\n')
+    with util.timedcm('aftercallbacks') as stats:
+        for shortname in _aftercallbacks:
+            if shortname in _extensions:
+                continue
 
-        for fn in _aftercallbacks[shortname]:
-            fn(loaded=False)
+            for fn in _aftercallbacks[shortname]:
+                log('  - extension %r not loaded, notify callbacks\n',
+                    shortname)
+                fn(loaded=False)
+    log('> remaining aftercallbacks completed in %s\n', stats)
 
     # loadall() is called multiple times and lingering _aftercallbacks
     # entries could result in double execution. See issue4646.
@@ -304,6 +351,7 @@
     # - loadermod is the module where loader is placed
     # - loadername is the name of the function,
     #   which takes (ui, extensionname, extraobj) arguments
+    log('- loading extension registration objects\n')
     extraloaders = [
         ('cmdtable', commands, 'loadcmdtable'),
         ('colortable', color, 'loadcolortable'),
@@ -314,7 +362,16 @@
         ('templatefunc', templatefuncs, 'loadfunction'),
         ('templatekeyword', templatekw, 'loadkeyword'),
     ]
-    _loadextra(ui, newindex, extraloaders)
+    with util.timedcm('load registration objects') as stats:
+        _loadextra(ui, newindex, extraloaders)
+    log('> extension registration object loading took %s\n', stats)
+
+    # Report per extension loading time (except reposetup)
+    for name in sorted(loadingtime):
+        extension_msg = '> extension %s take a total of %s to load\n'
+        log(extension_msg, name, util.timecount(loadingtime[name]))
+
+    log('extension loading complete\n')
 
 def _loadextra(ui, newindex, extraloaders):
     for name in _order[newindex:]:
--- a/mercurial/filelog.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/filelog.py	Mon Oct 22 14:46:06 2018 -0400
@@ -7,6 +7,11 @@
 
 from __future__ import absolute_import
 
+from .i18n import _
+from .node import (
+    nullid,
+    nullrev,
+)
 from . import (
     error,
     repository,
@@ -14,6 +19,7 @@
 )
 from .utils import (
     interfaceutil,
+    storageutil,
 )
 
 @interfaceutil.implementer(repository.ifilestorage)
@@ -22,12 +28,9 @@
         self._revlog = revlog.revlog(opener,
                                      '/'.join(('data', path + '.i')),
                                      censorable=True)
-        # full name of the user visible file, relative to the repository root
-        self.filename = path
-        self.index = self._revlog.index
-        self.version = self._revlog.version
-        self.storedeltachains = self._revlog.storedeltachains
-        self._generaldelta = self._revlog._generaldelta
+        # Full name of the user visible file, relative to the repository root.
+        # Used by LFS.
+        self._revlog.filename = path
 
     def __len__(self):
         return len(self._revlog)
@@ -35,6 +38,16 @@
     def __iter__(self):
         return self._revlog.__iter__()
 
+    def hasnode(self, node):
+        if node in (nullid, nullrev):
+            return False
+
+        try:
+            self._revlog.rev(node)
+            return True
+        except (TypeError, ValueError, IndexError, error.LookupError):
+            return False
+
     def revs(self, start=0, stop=None):
         return self._revlog.revs(start=start, stop=stop)
 
@@ -51,49 +64,39 @@
         return self._revlog.node(rev)
 
     def lookup(self, node):
-        return self._revlog.lookup(node)
+        return storageutil.fileidlookup(self._revlog, node,
+                                        self._revlog.indexfile)
 
     def linkrev(self, rev):
         return self._revlog.linkrev(rev)
 
-    def flags(self, rev):
-        return self._revlog.flags(rev)
-
     def commonancestorsheads(self, node1, node2):
         return self._revlog.commonancestorsheads(node1, node2)
 
+    # Used by dagop.blockdescendants().
     def descendants(self, revs):
         return self._revlog.descendants(revs)
 
-    def headrevs(self):
-        return self._revlog.headrevs()
-
     def heads(self, start=None, stop=None):
         return self._revlog.heads(start, stop)
 
+    # Used by hgweb, children extension.
     def children(self, node):
         return self._revlog.children(node)
 
-    def deltaparent(self, rev):
-        return self._revlog.deltaparent(rev)
-
-    def candelta(self, baserev, rev):
-        return self._revlog.candelta(baserev, rev)
-
     def iscensored(self, rev):
         return self._revlog.iscensored(rev)
 
-    def rawsize(self, rev):
-        return self._revlog.rawsize(rev)
-
-    def checkhash(self, text, node, p1=None, p2=None, rev=None):
-        return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
-
     def revision(self, node, _df=None, raw=False):
         return self._revlog.revision(node, _df=_df, raw=raw)
 
-    def revdiff(self, rev1, rev2):
-        return self._revlog.revdiff(rev1, rev2)
+    def emitrevisions(self, nodes, nodesorder=None,
+                      revisiondata=False, assumehaveparentrevisions=False,
+                      deltaprevious=False):
+        return self._revlog.emitrevisions(
+            nodes, nodesorder=nodesorder, revisiondata=revisiondata,
+            assumehaveparentrevisions=assumehaveparentrevisions,
+            deltaprevious=deltaprevious)
 
     def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
                     node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
@@ -102,9 +105,14 @@
                                     p1, p2, node=node, flags=flags,
                                     cachedelta=cachedelta)
 
-    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
+    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
+                 maybemissingparents=False):
+        if maybemissingparents:
+            raise error.Abort(_('revlog storage does not support missing '
+                                'parents write mode'))
+
         return self._revlog.addgroup(deltas, linkmapper, transaction,
-                                 addrevisioncb=addrevisioncb)
+                                     addrevisioncb=addrevisioncb)
 
     def getstrippoint(self, minlink):
         return self._revlog.getstrippoint(minlink)
@@ -112,34 +120,22 @@
     def strip(self, minlink, transaction):
         return self._revlog.strip(minlink, transaction)
 
+    def censorrevision(self, tr, node, tombstone=b''):
+        return self._revlog.censorrevision(tr, node, tombstone=tombstone)
+
     def files(self):
         return self._revlog.files()
 
-    def checksize(self):
-        return self._revlog.checksize()
-
     def read(self, node):
-        t = self.revision(node)
-        if not t.startswith('\1\n'):
-            return t
-        s = t.index('\1\n', 2)
-        return t[s + 2:]
+        return storageutil.filtermetadata(self.revision(node))
 
     def add(self, text, meta, transaction, link, p1=None, p2=None):
         if meta or text.startswith('\1\n'):
-            text = revlog.packmeta(meta, text)
+            text = storageutil.packmeta(meta, text)
         return self.addrevision(text, transaction, link, p1, p2)
 
     def renamed(self, node):
-        if self.parents(node)[0] != revlog.nullid:
-            return False
-        t = self.revision(node)
-        m = revlog.parsemeta(t)[0]
-        # copy and copyrev occur in pairs. In rare cases due to bugs,
-        # one can occur without the other.
-        if m and "copy" in m and "copyrev" in m:
-            return (m["copy"], revlog.bin(m["copyrev"]))
-        return False
+        return storageutil.filerevisioncopied(self, node)
 
     def size(self, rev):
         """return the size of a given revision"""
@@ -159,37 +155,23 @@
 
         returns True if text is different than what is stored.
         """
-
-        t = text
-        if text.startswith('\1\n'):
-            t = '\1\n\1\n' + text
+        return not storageutil.filedataequivalent(self, node, text)
 
-        samehashes = not self._revlog.cmp(node, t)
-        if samehashes:
-            return False
-
-        # censored files compare against the empty file
-        if self.iscensored(self.rev(node)):
-            return text != ''
+    def verifyintegrity(self, state):
+        return self._revlog.verifyintegrity(state)
 
-        # renaming a file produces a different hash, even if the data
-        # remains unchanged. Check if it's the case (slow):
-        if self.renamed(node):
-            t2 = self.read(node)
-            return t2 != text
-
-        return True
-
-    @property
-    def filename(self):
-        return self._revlog.filename
-
-    @filename.setter
-    def filename(self, value):
-        self._revlog.filename = value
+    def storageinfo(self, exclusivefiles=False, sharedfiles=False,
+                    revisionscount=False, trackedsize=False,
+                    storedsize=False):
+        return self._revlog.storageinfo(
+            exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
+            revisionscount=revisionscount, trackedsize=trackedsize,
+            storedsize=storedsize)
 
     # TODO these aren't part of the interface and aren't internal methods.
     # Callers should be fixed to not use them.
+
+    # Used by bundlefilelog, unionfilelog.
     @property
     def indexfile(self):
         return self._revlog.indexfile
@@ -198,72 +180,60 @@
     def indexfile(self, value):
         self._revlog.indexfile = value
 
-    @property
-    def datafile(self):
-        return self._revlog.datafile
-
-    @property
-    def opener(self):
-        return self._revlog.opener
-
-    @property
-    def _lazydeltabase(self):
-        return self._revlog._lazydeltabase
-
-    @_lazydeltabase.setter
-    def _lazydeltabase(self, value):
-        self._revlog._lazydeltabase = value
-
-    @property
-    def _deltabothparents(self):
-        return self._revlog._deltabothparents
-
-    @_deltabothparents.setter
-    def _deltabothparents(self, value):
-        self._revlog._deltabothparents = value
-
-    @property
-    def _inline(self):
-        return self._revlog._inline
-
-    @property
-    def _withsparseread(self):
-        return getattr(self._revlog, '_withsparseread', False)
-
-    @property
-    def _srmingapsize(self):
-        return self._revlog._srmingapsize
-
-    @property
-    def _srdensitythreshold(self):
-        return self._revlog._srdensitythreshold
-
-    def _deltachain(self, rev, stoprev=None):
-        return self._revlog._deltachain(rev, stoprev)
-
-    def chainbase(self, rev):
-        return self._revlog.chainbase(rev)
-
-    def chainlen(self, rev):
-        return self._revlog.chainlen(rev)
-
+    # Used by repo upgrade.
     def clone(self, tr, destrevlog, **kwargs):
         if not isinstance(destrevlog, filelog):
             raise error.ProgrammingError('expected filelog to clone()')
 
         return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
 
-    def start(self, rev):
-        return self._revlog.start(rev)
+class narrowfilelog(filelog):
+    """Filelog variation to be used with narrow stores."""
 
-    def end(self, rev):
-        return self._revlog.end(rev)
+    def __init__(self, opener, path, narrowmatch):
+        super(narrowfilelog, self).__init__(opener, path)
+        self._narrowmatch = narrowmatch
+
+    def renamed(self, node):
+        res = super(narrowfilelog, self).renamed(node)
 
-    def length(self, rev):
-        return self._revlog.length(rev)
+        # Renames that come from outside the narrowspec are problematic
+        # because we may lack the base text for the rename. This can result
+        # in code attempting to walk the ancestry or compute a diff
+        # encountering a missing revision. We address this by silently
+        # removing rename metadata if the source file is outside the
+        # narrow spec.
+        #
+        # A better solution would be to see if the base revision is available,
+        # rather than assuming it isn't.
+        #
+        # An even better solution would be to teach all consumers of rename
+        # metadata that the base revision may not be available.
+        #
+        # TODO consider better ways of doing this.
+        if res and not self._narrowmatch(res[0]):
+            return None
+
+        return res
 
-    def compress(self, data):
-        return self._revlog.compress(data)
+    def size(self, rev):
+        # Because we have a custom renamed() that may lie, we need to call
+        # the base renamed() to report accurate results.
+        node = self.node(rev)
+        if super(narrowfilelog, self).renamed(node):
+            return len(self.read(node))
+        else:
+            return super(narrowfilelog, self).size(rev)
 
-    def _addrevision(self, *args, **kwargs):
-        return self._revlog._addrevision(*args, **kwargs)
+    def cmp(self, node, text):
+        different = super(narrowfilelog, self).cmp(node, text)
+
+        # Because renamed() may lie, we may get false positives for
+        # different content. Check for this by comparing against the original
+        # renamed() implementation.
+        if different:
+            if super(narrowfilelog, self).renamed(node):
+                t2 = self.read(node)
+                return t2 != text
+
+        return different
--- a/mercurial/filemerge.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/filemerge.py	Mon Oct 22 14:46:06 2018 -0400
@@ -56,12 +56,14 @@
 fullmerge = internaltool.fullmerge # both premerge and merge
 
 _localchangedotherdeletedmsg = _(
-    "local%(l)s changed %(fd)s which other%(o)s deleted\n"
+    "file '%(fd)s' was deleted in other%(o)s but was modified in local%(l)s.\n"
+    "What do you want to do?\n"
     "use (c)hanged version, (d)elete, or leave (u)nresolved?"
     "$$ &Changed $$ &Delete $$ &Unresolved")
 
 _otherchangedlocaldeletedmsg = _(
-    "other%(o)s changed %(fd)s which local%(l)s deleted\n"
+    "file '%(fd)s' was deleted in local%(l)s but was modified in other%(o)s.\n"
+    "What do you want to do?\n"
     "use (c)hanged version, leave (d)eleted, or "
     "leave (u)nresolved?"
     "$$ &Changed $$ &Deleted $$ &Unresolved")
@@ -137,6 +139,13 @@
     return procutil.findexe(util.expandpath(exe))
 
 def _picktool(repo, ui, path, binary, symlink, changedelete):
+    strictcheck = ui.configbool('merge', 'strict-capability-check')
+
+    def hascapability(tool, capability, strict=False):
+        if tool in internals:
+            return strict and internals[tool].capabilities.get(capability)
+        return _toolbool(ui, tool, capability)
+
     def supportscd(tool):
         return tool in internals and internals[tool].mergetype == nomerge
 
@@ -149,9 +158,9 @@
                 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
             else: # configured but non-existing tools are more silent
                 ui.note(_("couldn't find merge tool %s\n") % tmsg)
-        elif symlink and not _toolbool(ui, tool, "symlink"):
+        elif symlink and not hascapability(tool, "symlink", strictcheck):
             ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
-        elif binary and not _toolbool(ui, tool, "binary"):
+        elif binary and not hascapability(tool, "binary", strictcheck):
             ui.warn(_("tool %s can't handle binary\n") % tmsg)
         elif changedelete and not supportscd(tool):
             # the nomerge tools are the only tools that support change/delete
@@ -186,9 +195,19 @@
             return (hgmerge, hgmerge)
 
     # then patterns
+
+    # whether binary capability should be checked strictly
+    binarycap = binary and strictcheck
+
     for pat, tool in ui.configitems("merge-patterns"):
         mf = match.match(repo.root, '', [pat])
-        if mf(path) and check(tool, pat, symlink, False, changedelete):
+        if mf(path) and check(tool, pat, symlink, binarycap, changedelete):
+            if binary and not hascapability(tool, "binary", strict=True):
+                ui.warn(_("warning: check merge-patterns configurations,"
+                          " if %r for binary file %r is unintentional\n"
+                          "(see 'hg help merge-tools'"
+                          " for binary files capability)\n")
+                        % (pycompat.bytestr(tool), pycompat.bytestr(path)))
             toolpath = _findtool(ui, tool)
             return (tool, _quotetoolpath(toolpath))
 
@@ -208,9 +227,10 @@
     if uimerge:
         # external tools defined in uimerge won't be able to handle
         # change/delete conflicts
-        if uimerge not in names and not changedelete:
-            return (uimerge, uimerge)
-        tools.insert(0, (None, uimerge)) # highest priority
+        if check(uimerge, path, symlink, binary, changedelete):
+            if uimerge not in names and not changedelete:
+                return (uimerge, uimerge)
+            tools.insert(0, (None, uimerge)) # highest priority
     tools.append((None, "hgmerge")) # the old default, if found
     for p, t in tools:
         if check(t, None, symlink, binary, changedelete):
@@ -469,7 +489,7 @@
     success, status = tagmerge.merge(repo, fcd, fco, fca)
     return success, status, False
 
-@internaltool('dump', fullmerge)
+@internaltool('dump', fullmerge, binary=True, symlink=True)
 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
     """
     Creates three versions of the files to merge, containing the
@@ -495,7 +515,7 @@
     repo.wwrite(fd + ".base", fca.data(), fca.flags())
     return False, 1, False
 
-@internaltool('forcedump', mergeonly)
+@internaltool('forcedump', mergeonly, binary=True, symlink=True)
 def _forcedump(repo, mynode, orig, fcd, fco, fca, toolconf, files,
                 labels=None):
     """
@@ -916,14 +936,17 @@
         _haltmerge()
     # default action is 'continue', in which case we neither prompt nor halt
 
+def hasconflictmarkers(data):
+    return bool(re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", data,
+                          re.MULTILINE))
+
 def _check(repo, r, ui, tool, fcd, files):
     fd = fcd.path()
     unused, unused, unused, back = files
 
     if not r and (_toolbool(ui, tool, "checkconflicts") or
                   'conflicts' in _toollist(ui, tool, "check")):
-        if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
-                     re.MULTILINE):
+        if hasconflictmarkers(fcd.data()):
             r = 1
 
     checked = False
@@ -967,6 +990,24 @@
         internals['internal:' + name] = func
         internalsdoc[fullname] = func
 
+        capabilities = sorted([k for k, v in func.capabilities.items() if v])
+        if capabilities:
+            capdesc = "    (actual capabilities: %s)" % ', '.join(capabilities)
+            func.__doc__ = (func.__doc__ +
+                            pycompat.sysstr("\n\n%s" % capdesc))
+
+    # to put i18n comments into hg.pot for automatically generated texts
+
+    # i18n: "binary" and "symlink" are keywords
+    # i18n: this text is added automatically
+    _("    (actual capabilities: binary, symlink)")
+    # i18n: "binary" is keyword
+    # i18n: this text is added automatically
+    _("    (actual capabilities: binary)")
+    # i18n: "symlink" is keyword
+    # i18n: this text is added automatically
+    _("    (actual capabilities: symlink)")
+
 # load built-in merge tools explicitly to setup internalsdoc
 loadinternalmerge(None, None, internaltool)
 
--- a/mercurial/fileset.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/fileset.py	Mon Oct 22 14:46:06 2018 -0400
@@ -13,9 +13,9 @@
 from .i18n import _
 from . import (
     error,
+    filesetlang,
     match as matchmod,
     merge,
-    parser,
     pycompat,
     registrar,
     scmutil,
@@ -25,126 +25,28 @@
     stringutil,
 )
 
-elements = {
-    # token-type: binding-strength, primary, prefix, infix, suffix
-    "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
-    ":": (15, None, None, ("kindpat", 15), None),
-    "-": (5, None, ("negate", 19), ("minus", 5), None),
-    "not": (10, None, ("not", 10), None, None),
-    "!": (10, None, ("not", 10), None, None),
-    "and": (5, None, None, ("and", 5), None),
-    "&": (5, None, None, ("and", 5), None),
-    "or": (4, None, None, ("or", 4), None),
-    "|": (4, None, None, ("or", 4), None),
-    "+": (4, None, None, ("or", 4), None),
-    ",": (2, None, None, ("list", 2), None),
-    ")": (0, None, None, None, None),
-    "symbol": (0, "symbol", None, None, None),
-    "string": (0, "string", None, None, None),
-    "end": (0, None, None, None, None),
-}
-
-keywords = {'and', 'or', 'not'}
-
-globchars = ".*{}[]?/\\_"
+# common weight constants
+_WEIGHT_CHECK_FILENAME = filesetlang.WEIGHT_CHECK_FILENAME
+_WEIGHT_READ_CONTENTS = filesetlang.WEIGHT_READ_CONTENTS
+_WEIGHT_STATUS = filesetlang.WEIGHT_STATUS
+_WEIGHT_STATUS_THOROUGH = filesetlang.WEIGHT_STATUS_THOROUGH
 
-def tokenize(program):
-    pos, l = 0, len(program)
-    program = pycompat.bytestr(program)
-    while pos < l:
-        c = program[pos]
-        if c.isspace(): # skip inter-token whitespace
-            pass
-        elif c in "(),-:|&+!": # handle simple operators
-            yield (c, None, pos)
-        elif (c in '"\'' or c == 'r' and
-              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
-            if c == 'r':
-                pos += 1
-                c = program[pos]
-                decode = lambda x: x
-            else:
-                decode = parser.unescapestr
-            pos += 1
-            s = pos
-            while pos < l: # find closing quote
-                d = program[pos]
-                if d == '\\': # skip over escaped characters
-                    pos += 2
-                    continue
-                if d == c:
-                    yield ('string', decode(program[s:pos]), s)
-                    break
-                pos += 1
-            else:
-                raise error.ParseError(_("unterminated string"), s)
-        elif c.isalnum() or c in globchars or ord(c) > 127:
-            # gather up a symbol/keyword
-            s = pos
-            pos += 1
-            while pos < l: # find end of symbol
-                d = program[pos]
-                if not (d.isalnum() or d in globchars or ord(d) > 127):
-                    break
-                pos += 1
-            sym = program[s:pos]
-            if sym in keywords: # operator keywords
-                yield (sym, None, s)
-            else:
-                yield ('symbol', sym, s)
-            pos -= 1
-        else:
-            raise error.ParseError(_("syntax error"), pos)
-        pos += 1
-    yield ('end', None, pos)
-
-def parse(expr):
-    p = parser.parser(elements)
-    tree, pos = p.parse(tokenize(expr))
-    if pos != len(expr):
-        raise error.ParseError(_("invalid token"), pos)
-    return tree
-
-def getsymbol(x):
-    if x and x[0] == 'symbol':
-        return x[1]
-    raise error.ParseError(_('not a symbol'))
-
-def getstring(x, err):
-    if x and (x[0] == 'string' or x[0] == 'symbol'):
-        return x[1]
-    raise error.ParseError(err)
-
-def _getkindpat(x, y, allkinds, err):
-    kind = getsymbol(x)
-    pat = getstring(y, err)
-    if kind not in allkinds:
-        raise error.ParseError(_("invalid pattern kind: %s") % kind)
-    return '%s:%s' % (kind, pat)
-
-def getpattern(x, allkinds, err):
-    if x and x[0] == 'kindpat':
-        return _getkindpat(x[1], x[2], allkinds, err)
-    return getstring(x, err)
-
-def getlist(x):
-    if not x:
-        return []
-    if x[0] == 'list':
-        return getlist(x[1]) + [x[2]]
-    return [x]
-
-def getargs(x, min, max, err):
-    l = getlist(x)
-    if len(l) < min or len(l) > max:
-        raise error.ParseError(err)
-    return l
+# helpers for processing parsed tree
+getsymbol = filesetlang.getsymbol
+getstring = filesetlang.getstring
+_getkindpat = filesetlang.getkindpat
+getpattern = filesetlang.getpattern
+getargs = filesetlang.getargs
 
 def getmatch(mctx, x):
     if not x:
         raise error.ParseError(_("missing argument"))
     return methods[x[0]](mctx, *x[1:])
 
+def getmatchwithstatus(mctx, x, hint):
+    keys = set(getstring(hint, 'status hint must be a string').split())
+    return getmatch(mctx.withstatus(keys), x)
+
 def stringmatch(mctx, x):
     return mctx.matcher([x])
 
@@ -152,15 +54,20 @@
     return stringmatch(mctx, _getkindpat(x, y, matchmod.allpatternkinds,
                                          _("pattern must be a string")))
 
+def patternsmatch(mctx, *xs):
+    allkinds = matchmod.allpatternkinds
+    patterns = [getpattern(x, allkinds, _("pattern must be a string"))
+                for x in xs]
+    return mctx.matcher(patterns)
+
 def andmatch(mctx, x, y):
     xm = getmatch(mctx, x)
-    ym = getmatch(mctx, y)
+    ym = getmatch(mctx.narrowed(xm), y)
     return matchmod.intersectmatchers(xm, ym)
 
-def ormatch(mctx, x, y):
-    xm = getmatch(mctx, x)
-    ym = getmatch(mctx, y)
-    return matchmod.unionmatcher([xm, ym])
+def ormatch(mctx, *xs):
+    ms = [getmatch(mctx, x) for x in xs]
+    return matchmod.unionmatcher(ms)
 
 def notmatch(mctx, x):
     m = getmatch(mctx, x)
@@ -168,15 +75,12 @@
 
 def minusmatch(mctx, x, y):
     xm = getmatch(mctx, x)
-    ym = getmatch(mctx, y)
+    ym = getmatch(mctx.narrowed(xm), y)
     return matchmod.differencematcher(xm, ym)
 
-def negatematch(mctx, x):
-    raise error.ParseError(_("can't use negate operator in this context"))
-
-def listmatch(mctx, x, y):
+def listmatch(mctx, *xs):
     raise error.ParseError(_("can't use a list in this context"),
-                           hint=_('see hg help "filesets.x or y"'))
+                           hint=_('see \'hg help "filesets.x or y"\''))
 
 def func(mctx, a, b):
     funcname = getsymbol(a)
@@ -193,14 +97,11 @@
 # with:
 #  mctx - current matchctx instance
 #  x - argument in tree form
-symbols = {}
+symbols = filesetlang.symbols
 
-# filesets using matchctx.status()
-_statuscallers = set()
+predicate = registrar.filesetpredicate(symbols)
 
-predicate = registrar.filesetpredicate()
-
-@predicate('modified()', callstatus=True)
+@predicate('modified()', callstatus=True, weight=_WEIGHT_STATUS)
 def modified(mctx, x):
     """File that is modified according to :hg:`status`.
     """
@@ -209,7 +110,7 @@
     s = set(mctx.status().modified)
     return mctx.predicate(s.__contains__, predrepr='modified')
 
-@predicate('added()', callstatus=True)
+@predicate('added()', callstatus=True, weight=_WEIGHT_STATUS)
 def added(mctx, x):
     """File that is added according to :hg:`status`.
     """
@@ -218,7 +119,7 @@
     s = set(mctx.status().added)
     return mctx.predicate(s.__contains__, predrepr='added')
 
-@predicate('removed()', callstatus=True)
+@predicate('removed()', callstatus=True, weight=_WEIGHT_STATUS)
 def removed(mctx, x):
     """File that is removed according to :hg:`status`.
     """
@@ -227,7 +128,7 @@
     s = set(mctx.status().removed)
     return mctx.predicate(s.__contains__, predrepr='removed')
 
-@predicate('deleted()', callstatus=True)
+@predicate('deleted()', callstatus=True, weight=_WEIGHT_STATUS)
 def deleted(mctx, x):
     """Alias for ``missing()``.
     """
@@ -236,7 +137,7 @@
     s = set(mctx.status().deleted)
     return mctx.predicate(s.__contains__, predrepr='deleted')
 
-@predicate('missing()', callstatus=True)
+@predicate('missing()', callstatus=True, weight=_WEIGHT_STATUS)
 def missing(mctx, x):
     """File that is missing according to :hg:`status`.
     """
@@ -245,7 +146,7 @@
     s = set(mctx.status().deleted)
     return mctx.predicate(s.__contains__, predrepr='deleted')
 
-@predicate('unknown()', callstatus=True)
+@predicate('unknown()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
 def unknown(mctx, x):
     """File that is unknown according to :hg:`status`."""
     # i18n: "unknown" is a keyword
@@ -253,7 +154,7 @@
     s = set(mctx.status().unknown)
     return mctx.predicate(s.__contains__, predrepr='unknown')
 
-@predicate('ignored()', callstatus=True)
+@predicate('ignored()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
 def ignored(mctx, x):
     """File that is ignored according to :hg:`status`."""
     # i18n: "ignored" is a keyword
@@ -261,7 +162,7 @@
     s = set(mctx.status().ignored)
     return mctx.predicate(s.__contains__, predrepr='ignored')
 
-@predicate('clean()', callstatus=True)
+@predicate('clean()', callstatus=True, weight=_WEIGHT_STATUS)
 def clean(mctx, x):
     """File that is clean according to :hg:`status`.
     """
@@ -277,7 +178,7 @@
     getargs(x, 0, 0, _("tracked takes no arguments"))
     return mctx.predicate(mctx.ctx.__contains__, predrepr='tracked')
 
-@predicate('binary()')
+@predicate('binary()', weight=_WEIGHT_READ_CONTENTS)
 def binary(mctx, x):
     """File that appears to be binary (contains NUL bytes).
     """
@@ -304,7 +205,7 @@
     ctx = mctx.ctx
     return mctx.predicate(lambda f: ctx.flags(f) == 'l', predrepr='symlink')
 
-@predicate('resolved()')
+@predicate('resolved()', weight=_WEIGHT_STATUS)
 def resolved(mctx, x):
     """File that is marked resolved according to :hg:`resolve -l`.
     """
@@ -316,7 +217,7 @@
     return mctx.predicate(lambda f: f in ms and ms[f] == 'r',
                           predrepr='resolved')
 
-@predicate('unresolved()')
+@predicate('unresolved()', weight=_WEIGHT_STATUS)
 def unresolved(mctx, x):
     """File that is marked unresolved according to :hg:`resolve -l`.
     """
@@ -328,7 +229,7 @@
     return mctx.predicate(lambda f: f in ms and ms[f] == 'u',
                           predrepr='unresolved')
 
-@predicate('hgignore()')
+@predicate('hgignore()', weight=_WEIGHT_STATUS)
 def hgignore(mctx, x):
     """File that matches the active .hgignore pattern.
     """
@@ -336,7 +237,7 @@
     getargs(x, 0, 0, _("hgignore takes no arguments"))
     return mctx.ctx.repo().dirstate._ignore
 
-@predicate('portable()')
+@predicate('portable()', weight=_WEIGHT_CHECK_FILENAME)
 def portable(mctx, x):
     """File that has a portable name. (This doesn't include filenames with case
     collisions.)
@@ -346,7 +247,7 @@
     return mctx.predicate(lambda f: util.checkwinfilename(f) is None,
                           predrepr='portable')
 
-@predicate('grep(regex)')
+@predicate('grep(regex)', weight=_WEIGHT_READ_CONTENTS)
 def grep(mctx, x):
     """File contains the given regular expression.
     """
@@ -400,7 +301,7 @@
         b = _sizetomax(expr)
         return lambda x: x >= a and x <= b
 
-@predicate('size(expression)')
+@predicate('size(expression)', weight=_WEIGHT_STATUS)
 def size(mctx, x):
     """File size matches the given expression. Examples:
 
@@ -415,7 +316,7 @@
     return mctx.fpredicate(lambda fctx: m(fctx.size()),
                            predrepr=('size(%r)', expr), cache=True)
 
-@predicate('encoding(name)')
+@predicate('encoding(name)', weight=_WEIGHT_READ_CONTENTS)
 def encoding(mctx, x):
     """File can be successfully decoded with the given character
     encoding. May not be useful for encodings other than ASCII and
@@ -437,7 +338,7 @@
 
     return mctx.fpredicate(encp, predrepr=('encoding(%r)', enc), cache=True)
 
-@predicate('eol(style)')
+@predicate('eol(style)', weight=_WEIGHT_READ_CONTENTS)
 def eol(mctx, x):
     """File contains newlines of the given style (dos, unix, mac). Binary
     files are excluded, files with mixed line endings match multiple
@@ -471,7 +372,7 @@
         return p and p[0].path() != fctx.path()
     return mctx.fpredicate(copiedp, predrepr='copied', cache=True)
 
-@predicate('revs(revs, pattern)')
+@predicate('revs(revs, pattern)', weight=_WEIGHT_STATUS)
 def revs(mctx, x):
     """Evaluate set in the specified revisions. If the revset match multiple
     revs, this will return file matching pattern in any of the revision.
@@ -486,14 +387,15 @@
     matchers = []
     for r in revs:
         ctx = repo[r]
-        matchers.append(getmatch(mctx.switch(ctx, _buildstatus(ctx, x)), x))
+        mc = mctx.switch(ctx.p1(), ctx)
+        matchers.append(getmatch(mc, x))
     if not matchers:
         return mctx.never()
     if len(matchers) == 1:
         return matchers[0]
     return matchmod.unionmatcher(matchers)
 
-@predicate('status(base, rev, pattern)')
+@predicate('status(base, rev, pattern)', weight=_WEIGHT_STATUS)
 def status(mctx, x):
     """Evaluate predicate using status change between ``base`` and
     ``rev``. Examples:
@@ -513,7 +415,8 @@
     if not revspec:
         raise error.ParseError(reverr)
     basectx, ctx = scmutil.revpair(repo, [baserevspec, revspec])
-    return getmatch(mctx.switch(ctx, _buildstatus(ctx, x, basectx=basectx)), x)
+    mc = mctx.switch(basectx, ctx)
+    return getmatch(mc, x)
 
 @predicate('subrepo([pattern])')
 def subrepo(mctx, x):
@@ -539,24 +442,52 @@
         return mctx.predicate(sstate.__contains__, predrepr='subrepo')
 
 methods = {
+    'withstatus': getmatchwithstatus,
     'string': stringmatch,
     'symbol': stringmatch,
     'kindpat': kindpatmatch,
+    'patterns': patternsmatch,
     'and': andmatch,
     'or': ormatch,
     'minus': minusmatch,
-    'negate': negatematch,
     'list': listmatch,
-    'group': getmatch,
     'not': notmatch,
     'func': func,
 }
 
 class matchctx(object):
-    def __init__(self, ctx, status=None, badfn=None):
+    def __init__(self, basectx, ctx, badfn=None):
+        self._basectx = basectx
         self.ctx = ctx
-        self._status = status
         self._badfn = badfn
+        self._match = None
+        self._status = None
+
+    def narrowed(self, match):
+        """Create matchctx for a sub-tree narrowed by the given matcher"""
+        mctx = matchctx(self._basectx, self.ctx, self._badfn)
+        mctx._match = match
+        # leave wider status which we don't have to care
+        mctx._status = self._status
+        return mctx
+
+    def switch(self, basectx, ctx):
+        mctx = matchctx(basectx, ctx, self._badfn)
+        mctx._match = self._match
+        return mctx
+
+    def withstatus(self, keys):
+        """Create matchctx which has precomputed status specified by the keys"""
+        mctx = matchctx(self._basectx, self.ctx, self._badfn)
+        mctx._match = self._match
+        mctx._buildstatus(keys)
+        return mctx
+
+    def _buildstatus(self, keys):
+        self._status = self._basectx.status(self.ctx, self._match,
+                                            listignored='ignored' in keys,
+                                            listclean='clean' in keys,
+                                            listunknown='unknown' in keys)
 
     def status(self):
         return self._status
@@ -612,62 +543,20 @@
         return matchmod.nevermatcher(repo.root, repo.getcwd(),
                                      badfn=self._badfn)
 
-    def switch(self, ctx, status=None):
-        return matchctx(ctx, status, self._badfn)
-
-# filesets using matchctx.switch()
-_switchcallers = [
-    'revs',
-    'status',
-]
-
-def _intree(funcs, tree):
-    if isinstance(tree, tuple):
-        if tree[0] == 'func' and tree[1][0] == 'symbol':
-            if tree[1][1] in funcs:
-                return True
-            if tree[1][1] in _switchcallers:
-                # arguments won't be evaluated in the current context
-                return False
-        for s in tree[1:]:
-            if _intree(funcs, s):
-                return True
-    return False
-
 def match(ctx, expr, badfn=None):
     """Create a matcher for a single fileset expression"""
-    tree = parse(expr)
-    mctx = matchctx(ctx, _buildstatus(ctx, tree), badfn=badfn)
+    tree = filesetlang.parse(expr)
+    tree = filesetlang.analyze(tree)
+    tree = filesetlang.optimize(tree)
+    mctx = matchctx(ctx.p1(), ctx, badfn=badfn)
     return getmatch(mctx, tree)
 
-def _buildstatus(ctx, tree, basectx=None):
-    # do we need status info?
-
-    if _intree(_statuscallers, tree):
-        unknown = _intree(['unknown'], tree)
-        ignored = _intree(['ignored'], tree)
-
-        r = ctx.repo()
-        if basectx is None:
-            basectx = ctx.p1()
-        return r.status(basectx, ctx,
-                        unknown=unknown, ignored=ignored, clean=True)
-    else:
-        return None
-
-def prettyformat(tree):
-    return parser.prettyformat(tree, ('string', 'symbol'))
 
 def loadpredicate(ui, extname, registrarobj):
     """Load fileset predicates from specified registrarobj
     """
     for name, func in registrarobj._table.iteritems():
         symbols[name] = func
-        if func._callstatus:
-            _statuscallers.add(name)
-
-# load built-in predicates explicitly to setup _statuscallers
-loadpredicate(None, None, predicate)
 
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = symbols.values()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/filesetlang.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,330 @@
+# filesetlang.py - parser, tokenizer and utility for file set language
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from .i18n import _
+from . import (
+    error,
+    parser,
+    pycompat,
+)
+
+# common weight constants for static optimization
+# (see registrar.filesetpredicate for details)
+WEIGHT_CHECK_FILENAME = 0.5
+WEIGHT_READ_CONTENTS = 30
+WEIGHT_STATUS = 10
+WEIGHT_STATUS_THOROUGH = 50
+
+elements = {
+    # token-type: binding-strength, primary, prefix, infix, suffix
+    "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
+    ":": (15, None, None, ("kindpat", 15), None),
+    "-": (5, None, ("negate", 19), ("minus", 5), None),
+    "not": (10, None, ("not", 10), None, None),
+    "!": (10, None, ("not", 10), None, None),
+    "and": (5, None, None, ("and", 5), None),
+    "&": (5, None, None, ("and", 5), None),
+    "or": (4, None, None, ("or", 4), None),
+    "|": (4, None, None, ("or", 4), None),
+    "+": (4, None, None, ("or", 4), None),
+    ",": (2, None, None, ("list", 2), None),
+    ")": (0, None, None, None, None),
+    "symbol": (0, "symbol", None, None, None),
+    "string": (0, "string", None, None, None),
+    "end": (0, None, None, None, None),
+}
+
+keywords = {'and', 'or', 'not'}
+
+symbols = {}
+
+globchars = ".*{}[]?/\\_"
+
+def tokenize(program):
+    pos, l = 0, len(program)
+    program = pycompat.bytestr(program)
+    while pos < l:
+        c = program[pos]
+        if c.isspace(): # skip inter-token whitespace
+            pass
+        elif c in "(),-:|&+!": # handle simple operators
+            yield (c, None, pos)
+        elif (c in '"\'' or c == 'r' and
+              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
+            if c == 'r':
+                pos += 1
+                c = program[pos]
+                decode = lambda x: x
+            else:
+                decode = parser.unescapestr
+            pos += 1
+            s = pos
+            while pos < l: # find closing quote
+                d = program[pos]
+                if d == '\\': # skip over escaped characters
+                    pos += 2
+                    continue
+                if d == c:
+                    yield ('string', decode(program[s:pos]), s)
+                    break
+                pos += 1
+            else:
+                raise error.ParseError(_("unterminated string"), s)
+        elif c.isalnum() or c in globchars or ord(c) > 127:
+            # gather up a symbol/keyword
+            s = pos
+            pos += 1
+            while pos < l: # find end of symbol
+                d = program[pos]
+                if not (d.isalnum() or d in globchars or ord(d) > 127):
+                    break
+                pos += 1
+            sym = program[s:pos]
+            if sym in keywords: # operator keywords
+                yield (sym, None, s)
+            else:
+                yield ('symbol', sym, s)
+            pos -= 1
+        else:
+            raise error.ParseError(_("syntax error"), pos)
+        pos += 1
+    yield ('end', None, pos)
+
+def parse(expr):
+    p = parser.parser(elements)
+    tree, pos = p.parse(tokenize(expr))
+    if pos != len(expr):
+        raise error.ParseError(_("invalid token"), pos)
+    return parser.simplifyinfixops(tree, {'list', 'or'})
+
+def getsymbol(x):
+    if x and x[0] == 'symbol':
+        return x[1]
+    raise error.ParseError(_('not a symbol'))
+
+def getstring(x, err):
+    if x and (x[0] == 'string' or x[0] == 'symbol'):
+        return x[1]
+    raise error.ParseError(err)
+
+def getkindpat(x, y, allkinds, err):
+    kind = getsymbol(x)
+    pat = getstring(y, err)
+    if kind not in allkinds:
+        raise error.ParseError(_("invalid pattern kind: %s") % kind)
+    return '%s:%s' % (kind, pat)
+
+def getpattern(x, allkinds, err):
+    if x and x[0] == 'kindpat':
+        return getkindpat(x[1], x[2], allkinds, err)
+    return getstring(x, err)
+
+def getlist(x):
+    if not x:
+        return []
+    if x[0] == 'list':
+        return list(x[1:])
+    return [x]
+
+def getargs(x, min, max, err):
+    l = getlist(x)
+    if len(l) < min or len(l) > max:
+        raise error.ParseError(err)
+    return l
+
+def _analyze(x):
+    if x is None:
+        return x
+
+    op = x[0]
+    if op in {'string', 'symbol'}:
+        return x
+    if op == 'kindpat':
+        getsymbol(x[1])  # kind must be a symbol
+        t = _analyze(x[2])
+        return (op, x[1], t)
+    if op == 'group':
+        return _analyze(x[1])
+    if op == 'negate':
+        raise error.ParseError(_("can't use negate operator in this context"))
+    if op == 'not':
+        t = _analyze(x[1])
+        return (op, t)
+    if op == 'and':
+        ta = _analyze(x[1])
+        tb = _analyze(x[2])
+        return (op, ta, tb)
+    if op == 'minus':
+        return _analyze(('and', x[1], ('not', x[2])))
+    if op in {'list', 'or'}:
+        ts = tuple(_analyze(y) for y in x[1:])
+        return (op,) + ts
+    if op == 'func':
+        getsymbol(x[1])  # function name must be a symbol
+        ta = _analyze(x[2])
+        return (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def _insertstatushints(x):
+    """Insert hint nodes where status should be calculated (first path)
+
+    This works in bottom-up way, summing up status names and inserting hint
+    nodes at 'and' and 'or' as needed. Thus redundant hint nodes may be left.
+
+    Returns (status-names, new-tree) at the given subtree, where status-names
+    is a sum of status names referenced in the given subtree.
+    """
+    if x is None:
+        return (), x
+
+    op = x[0]
+    if op in {'string', 'symbol', 'kindpat'}:
+        return (), x
+    if op == 'not':
+        h, t = _insertstatushints(x[1])
+        return h, (op, t)
+    if op == 'and':
+        ha, ta = _insertstatushints(x[1])
+        hb, tb = _insertstatushints(x[2])
+        hr = ha + hb
+        if ha and hb:
+            return hr, ('withstatus', (op, ta, tb), ('string', ' '.join(hr)))
+        return hr, (op, ta, tb)
+    if op == 'or':
+        hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
+        hr = sum(hs, ())
+        if sum(bool(h) for h in hs) > 1:
+            return hr, ('withstatus', (op,) + ts, ('string', ' '.join(hr)))
+        return hr, (op,) + ts
+    if op == 'list':
+        hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
+        return sum(hs, ()), (op,) + ts
+    if op == 'func':
+        f = getsymbol(x[1])
+        # don't propagate 'ha' crossing a function boundary
+        ha, ta = _insertstatushints(x[2])
+        if getattr(symbols.get(f), '_callstatus', False):
+            return (f,), ('withstatus', (op, x[1], ta), ('string', f))
+        return (), (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def _mergestatushints(x, instatus):
+    """Remove redundant status hint nodes (second path)
+
+    This is the top-down path to eliminate inner hint nodes.
+    """
+    if x is None:
+        return x
+
+    op = x[0]
+    if op == 'withstatus':
+        if instatus:
+            # drop redundant hint node
+            return _mergestatushints(x[1], instatus)
+        t = _mergestatushints(x[1], instatus=True)
+        return (op, t, x[2])
+    if op in {'string', 'symbol', 'kindpat'}:
+        return x
+    if op == 'not':
+        t = _mergestatushints(x[1], instatus)
+        return (op, t)
+    if op == 'and':
+        ta = _mergestatushints(x[1], instatus)
+        tb = _mergestatushints(x[2], instatus)
+        return (op, ta, tb)
+    if op in {'list', 'or'}:
+        ts = tuple(_mergestatushints(y, instatus) for y in x[1:])
+        return (op,) + ts
+    if op == 'func':
+        # don't propagate 'instatus' crossing a function boundary
+        ta = _mergestatushints(x[2], instatus=False)
+        return (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def analyze(x):
+    """Transform raw parsed tree to evaluatable tree which can be fed to
+    optimize() or getmatch()
+
+    All pseudo operations should be mapped to real operations or functions
+    defined in methods or symbols table respectively.
+    """
+    t = _analyze(x)
+    _h, t = _insertstatushints(t)
+    return _mergestatushints(t, instatus=False)
+
+def _optimizeandops(op, ta, tb):
+    if tb is not None and tb[0] == 'not':
+        return ('minus', ta, tb[1])
+    return (op, ta, tb)
+
+def _optimizeunion(xs):
+    # collect string patterns so they can be compiled into a single regexp
+    ws, ts, ss = [], [], []
+    for x in xs:
+        w, t = _optimize(x)
+        if t is not None and t[0] in {'string', 'symbol', 'kindpat'}:
+            ss.append(t)
+            continue
+        ws.append(w)
+        ts.append(t)
+    if ss:
+        ws.append(WEIGHT_CHECK_FILENAME)
+        ts.append(('patterns',) + tuple(ss))
+    return ws, ts
+
+def _optimize(x):
+    if x is None:
+        return 0, x
+
+    op = x[0]
+    if op == 'withstatus':
+        w, t = _optimize(x[1])
+        return w, (op, t, x[2])
+    if op in {'string', 'symbol'}:
+        return WEIGHT_CHECK_FILENAME, x
+    if op == 'kindpat':
+        w, t = _optimize(x[2])
+        return w, (op, x[1], t)
+    if op == 'not':
+        w, t = _optimize(x[1])
+        return w, (op, t)
+    if op == 'and':
+        wa, ta = _optimize(x[1])
+        wb, tb = _optimize(x[2])
+        if wa <= wb:
+            return wa, _optimizeandops(op, ta, tb)
+        else:
+            return wb, _optimizeandops(op, tb, ta)
+    if op == 'or':
+        ws, ts = _optimizeunion(x[1:])
+        if len(ts) == 1:
+            return ws[0], ts[0] # 'or' operation is fully optimized out
+        ts = tuple(it[1] for it in sorted(enumerate(ts),
+                                          key=lambda it: ws[it[0]]))
+        return max(ws), (op,) + ts
+    if op == 'list':
+        ws, ts = zip(*(_optimize(y) for y in x[1:]))
+        return sum(ws), (op,) + ts
+    if op == 'func':
+        f = getsymbol(x[1])
+        w = getattr(symbols.get(f), '_weight', 1)
+        wa, ta = _optimize(x[2])
+        return w + wa, (op, x[1], ta)
+    raise error.ProgrammingError('invalid operator %r' % op)
+
+def optimize(x):
+    """Reorder/rewrite evaluatable tree for optimization
+
+    All pseudo operations should be transformed beforehand.
+    """
+    _w, t = _optimize(x)
+    return t
+
+def prettyformat(tree):
+    return parser.prettyformat(tree, ('string', 'symbol'))
--- a/mercurial/formatter.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/formatter.py	Mon Oct 22 14:46:06 2018 -0400
@@ -124,13 +124,15 @@
     error,
     pycompat,
     templatefilters,
-    templatefuncs,
     templatekw,
     templater,
     templateutil,
     util,
 )
-from .utils import dateutil
+from .utils import (
+    dateutil,
+    stringutil,
+)
 
 pickle = util.pickle
 
@@ -193,14 +195,16 @@
         # name is mandatory argument for now, but it could be optional if
         # we have default template keyword, e.g. {item}
         return self._converter.formatlist(data, name, fmt, sep)
-    def contexthint(self, datafields):
-        '''set of context object keys to be required given datafields set'''
-        return set()
     def context(self, **ctxs):
         '''insert context objects to be used to render template keywords'''
         ctxs = pycompat.byteskwargs(ctxs)
-        assert all(k in {'ctx', 'fctx'} for k in ctxs)
+        assert all(k in {'repo', 'ctx', 'fctx'} for k in ctxs)
         if self._converter.storecontext:
+            # populate missing resources in fctx -> ctx -> repo order
+            if 'fctx' in ctxs and 'ctx' not in ctxs:
+                ctxs['ctx'] = ctxs['fctx'].changectx()
+            if 'ctx' in ctxs and 'repo' not in ctxs:
+                ctxs['repo'] = ctxs['ctx'].repo()
             self._item.update(ctxs)
     def datahint(self):
         '''set of field names to be referenced'''
@@ -212,7 +216,7 @@
     def write(self, fields, deftext, *fielddata, **opts):
         '''do default text output while assigning data to item'''
         fieldkeys = fields.split()
-        assert len(fieldkeys) == len(fielddata)
+        assert len(fieldkeys) == len(fielddata), (fieldkeys, fielddata)
         self._item.update(zip(fieldkeys, fielddata))
     def condwrite(self, cond, fields, deftext, *fielddata, **opts):
         '''do conditional write (primarily for plain formatter)'''
@@ -320,7 +324,8 @@
         self._out = out
         self._out.write("%s = [\n" % self._topic)
     def _showitem(self):
-        self._out.write('    %s,\n' % pycompat.byterepr(self._item))
+        self._out.write('    %s,\n'
+                        % stringutil.pprint(self._item, indent=4, level=1))
     def end(self):
         baseformatter.end(self)
         self._out.write("]\n")
@@ -422,24 +427,6 @@
     def _symbolsused(self):
         return self._t.symbolsused(self._tref)
 
-    def contexthint(self, datafields):
-        '''set of context object keys to be required by the template, given
-        datafields overridden by immediate values'''
-        requires = set()
-        ksyms, fsyms = self._symbolsused
-        ksyms = ksyms - set(datafields.split())  # exclude immediate fields
-        symtables = [(ksyms, templatekw.keywords),
-                     (fsyms, templatefuncs.funcs)]
-        for syms, table in symtables:
-            for k in syms:
-                f = table.get(k)
-                if not f:
-                    continue
-                requires.update(getattr(f, '_requires', ()))
-        if 'repo' in requires:
-            requires.add('ctx')  # there's no API to pass repo to formatter
-        return requires & {'ctx', 'fctx'}
-
     def datahint(self):
         '''set of field names to be referenced from the template'''
         return self._symbolsused[0]
@@ -538,6 +525,10 @@
         t.cache[''] = tmpl
     return t
 
+# marker to denote a resource to be loaded on demand based on mapping values
+# (e.g. (ctx, path) -> fctx)
+_placeholder = object()
+
 class templateresources(templater.resourcemapper):
     """Resource mapper designed for the default templatekw and function"""
 
@@ -548,61 +539,81 @@
             'ui': ui,
         }
 
-    def availablekeys(self, context, mapping):
-        return {k for k, g in self._gettermap.iteritems()
-                if g(self, context, mapping, k) is not None}
+    def availablekeys(self, mapping):
+        return {k for k in self.knownkeys()
+                if self._getsome(mapping, k) is not None}
 
     def knownkeys(self):
-        return self._knownkeys
+        return {'cache', 'ctx', 'fctx', 'repo', 'revcache', 'ui'}
 
-    def lookup(self, context, mapping, key):
-        get = self._gettermap.get(key)
-        if not get:
+    def lookup(self, mapping, key):
+        if key not in self.knownkeys():
             return None
-        return get(self, context, mapping, key)
+        v = self._getsome(mapping, key)
+        if v is _placeholder:
+            v = mapping[key] = self._loadermap[key](self, mapping)
+        return v
 
     def populatemap(self, context, origmapping, newmapping):
         mapping = {}
-        if self._hasctx(newmapping):
+        if self._hasnodespec(newmapping):
             mapping['revcache'] = {}  # per-ctx cache
-        if (('node' in origmapping or self._hasctx(origmapping))
-            and ('node' in newmapping or self._hasctx(newmapping))):
+        if self._hasnodespec(origmapping) and self._hasnodespec(newmapping):
             orignode = templateutil.runsymbol(context, origmapping, 'node')
             mapping['originalnode'] = orignode
+        # put marker to override 'ctx'/'fctx' in mapping if any, and flag
+        # its existence to be reported by availablekeys()
+        if 'ctx' not in newmapping and self._hasliteral(newmapping, 'node'):
+            mapping['ctx'] = _placeholder
+        if 'fctx' not in newmapping and self._hasliteral(newmapping, 'path'):
+            mapping['fctx'] = _placeholder
         return mapping
 
-    def _getsome(self, context, mapping, key):
+    def _getsome(self, mapping, key):
         v = mapping.get(key)
         if v is not None:
             return v
         return self._resmap.get(key)
 
-    def _hasctx(self, mapping):
-        return 'ctx' in mapping or 'fctx' in mapping
+    def _hasliteral(self, mapping, key):
+        """Test if a literal value is set or unset in the given mapping"""
+        return key in mapping and not callable(mapping[key])
 
-    def _getctx(self, context, mapping, key):
-        ctx = mapping.get('ctx')
-        if ctx is not None:
-            return ctx
-        fctx = mapping.get('fctx')
-        if fctx is not None:
-            return fctx.changectx()
+    def _getliteral(self, mapping, key):
+        """Return value of the given name if it is a literal"""
+        v = mapping.get(key)
+        if callable(v):
+            return None
+        return v
+
+    def _hasnodespec(self, mapping):
+        """Test if context revision is set or unset in the given mapping"""
+        return 'node' in mapping or 'ctx' in mapping
 
-    def _getrepo(self, context, mapping, key):
-        ctx = self._getctx(context, mapping, 'ctx')
-        if ctx is not None:
-            return ctx.repo()
-        return self._getsome(context, mapping, key)
+    def _loadctx(self, mapping):
+        repo = self._getsome(mapping, 'repo')
+        node = self._getliteral(mapping, 'node')
+        if repo is None or node is None:
+            return
+        try:
+            return repo[node]
+        except error.RepoLookupError:
+            return None # maybe hidden/non-existent node
 
-    _gettermap = {
-        'cache': _getsome,
-        'ctx': _getctx,
-        'fctx': _getsome,
-        'repo': _getrepo,
-        'revcache': _getsome,
-        'ui': _getsome,
+    def _loadfctx(self, mapping):
+        ctx = self._getsome(mapping, 'ctx')
+        path = self._getliteral(mapping, 'path')
+        if ctx is None or path is None:
+            return None
+        try:
+            return ctx[path]
+        except error.LookupError:
+            return None # maybe removed file?
+
+    _loadermap = {
+        'ctx': _loadctx,
+        'fctx': _loadfctx,
     }
-    _knownkeys = set(_gettermap.keys())
 
 def formatter(ui, out, topic, opts):
     template = opts.get("template", "")
--- a/mercurial/graphmod.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/graphmod.py	Mon Oct 22 14:46:06 2018 -0400
@@ -22,6 +22,7 @@
 from .node import nullrev
 from . import (
     dagop,
+    pycompat,
     smartset,
     util,
 )
@@ -280,7 +281,7 @@
         line.extend(echars[-(remainder * 2):])
     return line
 
-def _drawendinglines(lines, extra, edgemap, seen):
+def _drawendinglines(lines, extra, edgemap, seen, state):
     """Draw ending lines for missing parent edges
 
     None indicates an edge that ends at between this node and the next
@@ -297,7 +298,8 @@
     while edgechars and edgechars[-1] is None:
         edgechars.pop()
     shift_size = max((edgechars.count(None) * 2) - 1, 0)
-    while len(lines) < 3 + shift_size:
+    minlines = 3 if not state['graphshorten'] else 2
+    while len(lines) < minlines + shift_size:
         lines.append(extra[:])
 
     if shift_size:
@@ -318,7 +320,7 @@
                 positions[i] = max(pos, targets[i])
                 line[pos] = '/' if pos > targets[i] else extra[toshift[i]]
 
-    map = {1: '|', 2: '~'}
+    map = {1: '|', 2: '~'} if not state['graphshorten'] else {1: '~'}
     for i, line in enumerate(lines):
         if None not in line:
             continue
@@ -426,16 +428,16 @@
     # shift_interline is the line containing the non-vertical
     # edges between this entry and the next
     shift_interline = echars[:idx * 2]
-    for i in xrange(2 + coldiff):
+    for i in pycompat.xrange(2 + coldiff):
         shift_interline.append(' ')
     count = ncols - idx - 1
     if coldiff == -1:
-        for i in xrange(count):
+        for i in pycompat.xrange(count):
             shift_interline.extend(['/', ' '])
     elif coldiff == 0:
         shift_interline.extend(echars[(idx + 1) * 2:ncols * 2])
     else:
-        for i in xrange(count):
+        for i in pycompat.xrange(count):
             shift_interline.extend(['\\', ' '])
 
     # draw edges from the current node to its parents
@@ -462,7 +464,7 @@
         while len(lines) < len(text):
             lines.append(extra_interline[:])
 
-    _drawendinglines(lines, extra_interline, edgemap, seen)
+    _drawendinglines(lines, extra_interline, edgemap, seen, state)
 
     while len(text) < len(lines):
         text.append("")
--- a/mercurial/help.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/help.py	Mon Oct 22 14:46:06 2018 -0400
@@ -25,6 +25,7 @@
     fileset,
     minirst,
     pycompat,
+    registrar,
     revset,
     templatefilters,
     templatefuncs,
@@ -47,6 +48,78 @@
     _("(EXPERIMENTAL)"),
 }
 
+# The order in which command categories will be displayed.
+# Extensions with custom categories should insert them into this list
+# after/before the appropriate item, rather than replacing the list or
+# assuming absolute positions.
+CATEGORY_ORDER = [
+    registrar.command.CATEGORY_REPO_CREATION,
+    registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT,
+    registrar.command.CATEGORY_COMMITTING,
+    registrar.command.CATEGORY_CHANGE_MANAGEMENT,
+    registrar.command.CATEGORY_CHANGE_ORGANIZATION,
+    registrar.command.CATEGORY_FILE_CONTENTS,
+    registrar.command.CATEGORY_CHANGE_NAVIGATION ,
+    registrar.command.CATEGORY_WORKING_DIRECTORY,
+    registrar.command.CATEGORY_IMPORT_EXPORT,
+    registrar.command.CATEGORY_MAINTENANCE,
+    registrar.command.CATEGORY_HELP,
+    registrar.command.CATEGORY_MISC,
+    registrar.command.CATEGORY_NONE,
+]
+
+# Human-readable category names. These are translated.
+# Extensions with custom categories should add their names here.
+CATEGORY_NAMES = {
+    registrar.command.CATEGORY_REPO_CREATION: 'Repository creation',
+    registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT:
+        'Remote repository management',
+    registrar.command.CATEGORY_COMMITTING: 'Change creation',
+    registrar.command.CATEGORY_CHANGE_NAVIGATION: 'Change navigation',
+    registrar.command.CATEGORY_CHANGE_MANAGEMENT: 'Change manipulation',
+    registrar.command.CATEGORY_CHANGE_ORGANIZATION: 'Change organization',
+    registrar.command.CATEGORY_WORKING_DIRECTORY:
+        'Working directory management',
+    registrar.command.CATEGORY_FILE_CONTENTS: 'File content management',
+    registrar.command.CATEGORY_IMPORT_EXPORT: 'Change import/export',
+    registrar.command.CATEGORY_MAINTENANCE: 'Repository maintenance',
+    registrar.command.CATEGORY_HELP: 'Help',
+    registrar.command.CATEGORY_MISC: 'Miscellaneous commands',
+    registrar.command.CATEGORY_NONE: 'Uncategorized commands',
+}
+
+# Topic categories.
+TOPIC_CATEGORY_IDS = 'ids'
+TOPIC_CATEGORY_OUTPUT = 'output'
+TOPIC_CATEGORY_CONFIG = 'config'
+TOPIC_CATEGORY_CONCEPTS = 'concepts'
+TOPIC_CATEGORY_MISC = 'misc'
+TOPIC_CATEGORY_NONE = 'none'
+
+# The order in which topic categories will be displayed.
+# Extensions with custom categories should insert them into this list
+# after/before the appropriate item, rather than replacing the list or
+# assuming absolute positions.
+TOPIC_CATEGORY_ORDER = [
+    TOPIC_CATEGORY_IDS,
+    TOPIC_CATEGORY_OUTPUT,
+    TOPIC_CATEGORY_CONFIG,
+    TOPIC_CATEGORY_CONCEPTS,
+    TOPIC_CATEGORY_MISC,
+    TOPIC_CATEGORY_NONE,
+]
+
+# Human-readable topic category names. These are translated.
+TOPIC_CATEGORY_NAMES = {
+    TOPIC_CATEGORY_IDS: 'Mercurial identifiers',
+    TOPIC_CATEGORY_OUTPUT: 'Mercurial output',
+    TOPIC_CATEGORY_CONFIG: 'Mercurial configuration',
+    TOPIC_CATEGORY_CONCEPTS: 'Concepts',
+    TOPIC_CATEGORY_MISC: 'Miscellaneous',
+    TOPIC_CATEGORY_NONE: 'Uncategorized topics',
+    TOPIC_CATEGORY_NONE: 'Uncategorized topics',
+}
+
 def listexts(header, exts, indent=1, showdeprecated=False):
     '''return a text listing of the given extensions'''
     rst = []
@@ -137,7 +210,8 @@
                'extensions': [],
                'extensioncommands': [],
                }
-    for names, header, doc in helptable:
+    for topic in helptable:
+        names, header, doc = topic[0:3]
         # Old extensions may use a str as doc.
         if (sum(map(lowercontains, names))
             or lowercontains(header)
@@ -205,6 +279,8 @@
      loaddoc('bundle2', subdir='internals')),
     (['bundles'], _('Bundles'),
      loaddoc('bundles', subdir='internals')),
+    (['cbor'], _('CBOR'),
+     loaddoc('cbor', subdir='internals')),
     (['censor'], _('Censor'),
      loaddoc('censor', subdir='internals')),
     (['changegroups'], _('Changegroups'),
@@ -217,6 +293,10 @@
      loaddoc('revlogs', subdir='internals')),
     (['wireprotocol'], _('Wire Protocol'),
      loaddoc('wireprotocol', subdir='internals')),
+    (['wireprotocolrpc'], _('Wire Protocol RPC'),
+     loaddoc('wireprotocolrpc', subdir='internals')),
+    (['wireprotocolv2'], _('Wire Protocol Version 2'),
+     loaddoc('wireprotocolv2', subdir='internals')),
 ])
 
 def internalshelp(ui):
@@ -229,36 +309,47 @@
     return ''.join(lines)
 
 helptable = sorted([
-    (['bundlespec'], _("Bundle File Formats"), loaddoc('bundlespec')),
-    (['color'], _("Colorizing Outputs"), loaddoc('color')),
-    (["config", "hgrc"], _("Configuration Files"), loaddoc('config')),
-    (['deprecated'], _("Deprecated Features"), loaddoc('deprecated')),
-    (["dates"], _("Date Formats"), loaddoc('dates')),
-    (["flags"], _("Command-line flags"), loaddoc('flags')),
-    (["patterns"], _("File Name Patterns"), loaddoc('patterns')),
+    (['bundlespec'], _("Bundle File Formats"), loaddoc('bundlespec'),
+     TOPIC_CATEGORY_CONCEPTS),
+    (['color'], _("Colorizing Outputs"), loaddoc('color'),
+     TOPIC_CATEGORY_OUTPUT),
+    (["config", "hgrc"], _("Configuration Files"), loaddoc('config'),
+     TOPIC_CATEGORY_CONFIG),
+    (['deprecated'], _("Deprecated Features"), loaddoc('deprecated'),
+     TOPIC_CATEGORY_MISC),
+    (["dates"], _("Date Formats"), loaddoc('dates'), TOPIC_CATEGORY_OUTPUT),
+    (["flags"], _("Command-line flags"), loaddoc('flags'),
+     TOPIC_CATEGORY_CONFIG),
+    (["patterns"], _("File Name Patterns"), loaddoc('patterns'),
+     TOPIC_CATEGORY_IDS),
     (['environment', 'env'], _('Environment Variables'),
-     loaddoc('environment')),
+     loaddoc('environment'), TOPIC_CATEGORY_CONFIG),
     (['revisions', 'revs', 'revsets', 'revset', 'multirevs', 'mrevs'],
-      _('Specifying Revisions'), loaddoc('revisions')),
-    (['filesets', 'fileset'], _("Specifying File Sets"), loaddoc('filesets')),
-    (['diffs'], _('Diff Formats'), loaddoc('diffs')),
+      _('Specifying Revisions'), loaddoc('revisions'), TOPIC_CATEGORY_IDS),
+    (['filesets', 'fileset'], _("Specifying File Sets"), loaddoc('filesets'),
+     TOPIC_CATEGORY_IDS),
+    (['diffs'], _('Diff Formats'), loaddoc('diffs'), TOPIC_CATEGORY_OUTPUT),
     (['merge-tools', 'mergetools', 'mergetool'], _('Merge Tools'),
-     loaddoc('merge-tools')),
+     loaddoc('merge-tools'), TOPIC_CATEGORY_CONFIG),
     (['templating', 'templates', 'template', 'style'], _('Template Usage'),
-     loaddoc('templates')),
-    (['urls'], _('URL Paths'), loaddoc('urls')),
-    (["extensions"], _("Using Additional Features"), extshelp),
-    (["subrepos", "subrepo"], _("Subrepositories"), loaddoc('subrepos')),
-    (["hgweb"], _("Configuring hgweb"), loaddoc('hgweb')),
-    (["glossary"], _("Glossary"), loaddoc('glossary')),
+     loaddoc('templates'), TOPIC_CATEGORY_OUTPUT),
+    (['urls'], _('URL Paths'), loaddoc('urls'), TOPIC_CATEGORY_IDS),
+    (["extensions"], _("Using Additional Features"), extshelp,
+     TOPIC_CATEGORY_CONFIG),
+    (["subrepos", "subrepo"], _("Subrepositories"), loaddoc('subrepos'),
+     TOPIC_CATEGORY_CONCEPTS),
+    (["hgweb"], _("Configuring hgweb"), loaddoc('hgweb'),
+     TOPIC_CATEGORY_CONFIG),
+    (["glossary"], _("Glossary"), loaddoc('glossary'), TOPIC_CATEGORY_CONCEPTS),
     (["hgignore", "ignore"], _("Syntax for Mercurial Ignore Files"),
-     loaddoc('hgignore')),
-    (["phases"], _("Working with Phases"), loaddoc('phases')),
+     loaddoc('hgignore'), TOPIC_CATEGORY_IDS),
+    (["phases"], _("Working with Phases"), loaddoc('phases'),
+     TOPIC_CATEGORY_CONCEPTS),
     (['scripting'], _('Using Mercurial from scripts and automation'),
-     loaddoc('scripting')),
-    (['internals'], _("Technical implementation topics"),
-     internalshelp),
-    (['pager'], _("Pager Support"), loaddoc('pager')),
+     loaddoc('scripting'), TOPIC_CATEGORY_MISC),
+    (['internals'], _("Technical implementation topics"), internalshelp,
+     TOPIC_CATEGORY_MISC),
+    (['pager'], _("Pager Support"), loaddoc('pager'), TOPIC_CATEGORY_CONFIG),
 ])
 
 # Maps topics with sub-topics to a list of their sub-topics.
@@ -332,7 +423,7 @@
             aliases, entry = cmdutil.findcmd(name, commands.table,
                                              strict=unknowncmd)
         except error.AmbiguousCommand as inst:
-            # py3k fix: except vars can't be used outside the scope of the
+            # py3 fix: except vars can't be used outside the scope of the
             # except block, nor can be used inside a lambda. python issue4617
             prefix = inst.args[0]
             select = lambda c: cmdutil.parsealiases(c)[0].startswith(prefix)
@@ -413,39 +504,37 @@
 
         return rst
 
-
     def helplist(select=None, **opts):
-        # list of commands
-        if name == "shortlist":
-            header = _('basic commands:\n\n')
-        elif name == "debug":
-            header = _('debug commands (internal and unsupported):\n\n')
-        else:
-            header = _('list of commands:\n\n')
-
+        # Category -> list of commands
+        cats = {}
+        # Command -> short description
         h = {}
-        cmds = {}
+        # Command -> string showing synonyms
+        syns = {}
         for c, e in commands.table.iteritems():
             fs = cmdutil.parsealiases(c)
             f = fs[0]
-            p = ''
-            if c.startswith("^"):
-                p = '^'
-            if select and not select(p + f):
+            syns[f] = ', '.join(fs)
+            func = e[0]
+            if select and not select(f):
                 continue
             if (not select and name != 'shortlist' and
-                e[0].__module__ != commands.__name__):
+                func.__module__ != commands.__name__):
                 continue
-            if name == "shortlist" and not p:
-                continue
-            doc = pycompat.getdoc(e[0])
+            if name == "shortlist":
+                if not getattr(func, 'helpbasic', False):
+                    continue
+            doc = pycompat.getdoc(func)
             if filtercmd(ui, f, name, doc):
                 continue
             doc = gettext(doc)
             if not doc:
                 doc = _("(no help text available)")
             h[f] = doc.splitlines()[0].rstrip()
-            cmds[f] = '|'.join(fs)
+
+            cat = getattr(func, 'helpcategory', None) or (
+                registrar.command.CATEGORY_NONE)
+            cats.setdefault(cat, []).append(f)
 
         rst = []
         if not h:
@@ -453,15 +542,42 @@
                 rst.append(_('no commands defined\n'))
             return rst
 
+        # Output top header.
         if not ui.quiet:
-            rst.append(header)
-        fns = sorted(h)
-        for f in fns:
-            if ui.verbose:
-                commacmds = cmds[f].replace("|",", ")
-                rst.append(" :%s: %s\n" % (commacmds, h[f]))
+            if name == "shortlist":
+                rst.append(_('basic commands:\n\n'))
+            elif name == "debug":
+                rst.append(_('debug commands (internal and unsupported):\n\n'))
             else:
-                rst.append(' :%s: %s\n' % (f, h[f]))
+                rst.append(_('list of commands:\n'))
+
+        def appendcmds(cmds):
+            cmds = sorted(cmds)
+            for c in cmds:
+                if ui.verbose:
+                    rst.append(" :%s: %s\n" % (syns[c], h[c]))
+                else:
+                    rst.append(' :%s: %s\n' % (c, h[c]))
+
+        if name in ('shortlist', 'debug'):
+            # List without categories.
+            appendcmds(h)
+        else:
+            # Check that all categories have an order.
+            missing_order = set(cats.keys()) - set(CATEGORY_ORDER)
+            if missing_order:
+                ui.develwarn('help categories missing from CATEGORY_ORDER: %s' %
+                             missing_order)
+
+            # List per category.
+            for cat in CATEGORY_ORDER:
+                catfns = cats.get(cat, [])
+                if catfns:
+                    if len(cats) > 1:
+                        catname = gettext(CATEGORY_NAMES[cat])
+                        rst.append("\n%s:\n" % catname)
+                    rst.append("\n")
+                    appendcmds(catfns)
 
         ex = opts.get
         anyopts = (ex(r'keyword') or not (ex(r'command') or ex(r'extension')))
@@ -471,12 +587,35 @@
                 rst.append('\n')
                 rst.extend(exts)
 
-            rst.append(_("\nadditional help topics:\n\n"))
-            topics = []
-            for names, header, doc in helptable:
-                topics.append((names[0], header))
-            for t, desc in topics:
-                rst.append(" :%s: %s\n" % (t, desc))
+            rst.append(_("\nadditional help topics:\n"))
+            # Group commands by category.
+            topiccats = {}
+            for topic in helptable:
+                names, header, doc = topic[0:3]
+                if len(topic) > 3 and topic[3]:
+                    category = topic[3]
+                else:
+                    category = TOPIC_CATEGORY_NONE
+
+                topiccats.setdefault(category, []).append((names[0], header))
+
+            # Check that all categories have an order.
+            missing_order = set(topiccats.keys()) - set(TOPIC_CATEGORY_ORDER)
+            if missing_order:
+                ui.develwarn(
+                    'help categories missing from TOPIC_CATEGORY_ORDER: %s' %
+                    missing_order)
+
+            # Output topics per category.
+            for cat in TOPIC_CATEGORY_ORDER:
+                topics = topiccats.get(cat, [])
+                if topics:
+                    if len(topiccats) > 1:
+                        catname = gettext(TOPIC_CATEGORY_NAMES[cat])
+                        rst.append("\n%s:\n" % catname)
+                    rst.append("\n")
+                    for t, desc in topics:
+                        rst.append(" :%s: %s\n" % (t, desc))
 
         if ui.quiet:
             pass
@@ -493,7 +632,7 @@
             elif name and not full:
                 rst.append(_("\n(use 'hg help %s' to show the full help "
                              "text)\n") % name)
-            elif name and cmds and name in cmds.keys():
+            elif name and syns and name in syns.keys():
                 rst.append(_("\n(use 'hg help -v -e %s' to show built-in "
                              "aliases and global options)\n") % name)
             else:
@@ -511,7 +650,8 @@
                     break
 
         if not header:
-            for names, header, doc in helptable:
+            for topic in helptable:
+                names, header, doc = topic[0:3]
                 if name in names:
                     break
             else:
@@ -642,8 +782,8 @@
 
     return ''.join(rst)
 
-def formattedhelp(ui, commands, name, keep=None, unknowncmd=False, full=True,
-                  **opts):
+def formattedhelp(ui, commands, fullname, keep=None, unknowncmd=False,
+                  full=True, **opts):
     """get help for a given topic (as a dotted name) as rendered rst
 
     Either returns the rendered help text or raises an exception.
@@ -652,19 +792,17 @@
         keep = []
     else:
         keep = list(keep) # make a copy so we can mutate this later
-    fullname = name
-    section = None
-    subtopic = None
-    if name and '.' in name:
-        name, remaining = name.split('.', 1)
-        remaining = encoding.lower(remaining)
-        if '.' in remaining:
-            subtopic, section = remaining.split('.', 1)
-        else:
-            if name in subtopics:
-                subtopic = remaining
-            else:
-                section = remaining
+
+    # <fullname> := <name>[.<subtopic][.<section>]
+    name = subtopic = section = None
+    if fullname is not None:
+        nameparts = fullname.split('.')
+        name = nameparts.pop(0)
+        if nameparts and name in subtopics:
+            subtopic = nameparts.pop(0)
+        if nameparts:
+            section = encoding.lower('.'.join(nameparts))
+
     textwidth = ui.configint('ui', 'textwidth')
     termwidth = ui.termwidth() - 2
     if textwidth <= 0 or termwidth < textwidth:
@@ -672,19 +810,19 @@
     text = help_(ui, commands, name,
                  subtopic=subtopic, unknowncmd=unknowncmd, full=full, **opts)
 
-    formatted, pruned = minirst.format(text, textwidth, keep=keep,
-                                       section=section)
+    blocks, pruned = minirst.parse(text, keep=keep)
+    if 'verbose' in pruned:
+        keep.append('omitted')
+    else:
+        keep.append('notomitted')
+    blocks, pruned = minirst.parse(text, keep=keep)
+    if section:
+        blocks = minirst.filtersections(blocks, section)
 
     # We could have been given a weird ".foo" section without a name
     # to look for, or we could have simply failed to found "foo.bar"
     # because bar isn't a section of foo
-    if section and not (formatted and name):
+    if section and not (blocks and name):
         raise error.Abort(_("help section not found: %s") % fullname)
 
-    if 'verbose' in pruned:
-        keep.append('omitted')
-    else:
-        keep.append('notomitted')
-    formatted, pruned = minirst.format(text, textwidth, keep=keep,
-                                       section=section)
-    return formatted
+    return minirst.formatplain(blocks, textwidth)
--- a/mercurial/help/config.txt	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/help/config.txt	Mon Oct 22 14:46:06 2018 -0400
@@ -438,6 +438,25 @@
 ``commands``
 ------------
 
+``resolve.confirm``
+    Confirm before performing action if no filename is passed.
+    (default: False)
+
+``resolve.explicit-re-merge``
+    Require uses of ``hg resolve`` to specify which action it should perform,
+    instead of re-merging files by default.
+    (default: False)
+
+``resolve.mark-check``
+    Determines what level of checking :hg:`resolve --mark` will perform before
+    marking files as resolved. Valid values are ``none`, ``warn``, and
+    ``abort``. ``warn`` will output a warning listing the file(s) that still
+    have conflict markers in them, but will still mark everything resolved.
+    ``abort`` will output the same warning but will not mark things as resolved.
+    If --all is passed and this is set to ``abort``, only a warning will be
+    shown (an error will not be raised).
+    (default: ``none``)
+
 ``status.relative``
     Make paths in :hg:`status` output relative to the current directory.
     (default: False)
@@ -1303,6 +1322,15 @@
     Optional. Always use the proxy, even for localhost and any entries
     in ``http_proxy.no``. (default: False)
 
+``http``
+----------
+
+Used to configure access to Mercurial repositories via HTTP.
+
+``timeout``
+    If set, blocking operations will timeout after that many seconds.
+    (default: None)
+
 ``merge``
 ---------
 
@@ -1333,6 +1361,11 @@
    halted, the repository is left in a normal ``unresolved`` merge state.
    (default: ``continue``)
 
+``strict-capability-check``
+   Whether capabilities of internal merge tools are checked strictly
+   or not, while examining rules to decide merge tool to be used.
+   (default: False)
+
 ``merge-patterns``
 ------------------
 
@@ -1903,6 +1936,10 @@
     repositories to the exchange format required by the bundle1 data
     format can consume a lot of CPU.
 
+``bundle2.stream``
+    Whether to allow clients to pull using the bundle2 streaming protocol.
+    (default: True)
+
 ``zliblevel``
     Integer between ``-1`` and ``9`` that controls the zlib compression level
     for wire protocol commands that send zlib compressed output (notably the
@@ -2330,7 +2367,7 @@
     to release, but over time the recommended config settings
     shift. Enable this config to opt in to get automatic tweaks to
     Mercurial's behavior over time. This config setting will have no
-    effet if ``HGPLAIN` is set or ``HGPLAINEXCEPT`` is set and does
+    effect if ``HGPLAIN`` is set or ``HGPLAINEXCEPT`` is set and does
     not include ``tweakdefaults``. (default: False)
 
 ``username``
@@ -2598,6 +2635,9 @@
 ``server-header``
     Value for HTTP ``Server`` response header.
 
+``static``
+    Directory where static files are served from.
+
 ``staticurl``
     Base URL to use for static files. If unset, static files (e.g. the
     hgicon.png favicon) will be served by the CGI script itself. Use
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/internals/cbor.txt	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,130 @@
+Mercurial uses Concise Binary Object Representation (CBOR)
+(RFC 7049) for various data formats.
+
+This document describes the subset of CBOR that Mercurial uses and
+gives recommendations for appropriate use of CBOR within Mercurial.
+
+Type Limitations
+================
+
+Major types 0 and 1 (unsigned integers and negative integers) MUST be
+fully supported.
+
+Major type 2 (byte strings) MUST be fully supported. However, there
+are limitations around the use of indefinite-length byte strings.
+(See below.)
+
+Major type 3 (text strings) are NOT supported.
+
+Major type 4 (arrays) MUST be supported. However, values are limited
+to the set of types described in the "Container Types" section below.
+And indefinite-length arrays are NOT supported.
+
+Major type 5 (maps) MUST be supported. However, key values are limited
+to the set of types described in the "Container Types" section below.
+And indefinite-length maps are NOT supported.
+
+Major type 6 (semantic tagging of major types) can be used with the
+following semantic tag values:
+
+258
+   Mathematical finite set. Suitable for representing Python's
+   ``set`` type.
+
+All other semantic tag values are not allowed.
+
+Major type 7 (simple data types) can be used with the following
+type values:
+
+20
+   False
+21
+   True
+22
+   Null
+31
+   Break stop code (for indefinite-length items).
+
+All other simple data type values (including every value requiring the
+1 byte extension) are disallowed.
+
+Indefinite-Length Byte Strings
+==============================
+
+Indefinite-length byte strings (major type 2) are allowed. However,
+they MUST NOT occur inside a container type (such as an array or map).
+i.e. they can only occur as the "top-most" element in a stream of
+values.
+
+Encoders and decoders SHOULD *stream* indefinite-length byte strings.
+i.e. an encoder or decoder SHOULD NOT buffer the entirety of a long
+byte string value when indefinite-length byte strings are being used
+if it can be avoided. Mercurial MAY use extremely long indefinite-length
+byte strings and buffering the source or destination value COULD lead to
+memory exhaustion.
+
+Chunks in an indefinite-length byte string SHOULD NOT exceed 2^20
+bytes.
+
+Container Types
+===============
+
+Mercurial may use the array (major type 4), map (major type 5), and
+set (semantic tag 258 plus major type 4 array) container types.
+
+An array may contain any supported type as values.
+
+A map MUST only use the following types as keys:
+
+* unsigned integers (major type 0)
+* negative integers (major type 1)
+* byte strings (major type 2) (but not indefinite-length byte strings)
+* false (simple type 20)
+* true (simple type 21)
+* null (simple type 22)
+
+A map MUST only use the following types as values:
+
+* all types supported as map keys
+* arrays
+* maps
+* sets
+
+A set may only use the following types as values:
+
+* all types supported as map keys
+
+It is recommended that keys in maps and values in sets and arrays all
+be of a uniform type.
+
+Avoiding Large Byte Strings
+===========================
+
+The use of large byte strings is discouraged, especially in scenarios where
+the total size of the byte string may by unbound for some inputs (e.g. when
+representing the content of a tracked file). It is highly recommended to use
+indefinite-length byte strings for these purposes.
+
+Since indefinite-length byte strings cannot be nested within an outer
+container (such as an array or map), to associate a large byte string
+with another data structure, it is recommended to use an array or
+map followed immediately by an indefinite-length byte string. For example,
+instead of the following map::
+
+   {
+      "key1": "value1",
+      "key2": "value2",
+      "long_value": "some very large value...",
+   }
+
+Use a map followed by a byte string:
+
+   {
+      "key1": "value1",
+      "key2": "value2",
+      "value_follows": True,
+   }
+   <BEGIN INDEFINITE-LENGTH BYTE STRING>
+   "some very large value"
+   "..."
+   <END INDEFINITE-LENGTH BYTE STRING>
--- a/mercurial/help/internals/changegroups.txt	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/help/internals/changegroups.txt	Mon Oct 22 14:46:06 2018 -0400
@@ -4,8 +4,8 @@
 
 There are 3 versions of changegroups: ``1``, ``2``, and ``3``. From a
 high-level, versions ``1`` and ``2`` are almost exactly the same, with the
-only difference being an additional item in the *delta header*.  Version
-``3`` adds support for revlog flags in the *delta header* and optionally
+only difference being an additional item in the *delta header*. Version
+``3`` adds support for storage flags in the *delta header* and optionally
 exchanging treemanifests (enabled by setting an option on the
 ``changegroup`` part in the bundle2).
 
@@ -127,6 +127,25 @@
 changegroup. This allows the delta to be expressed against any parent,
 which can result in smaller deltas and more efficient encoding of data.
 
+The *flags* field holds bitwise flags affecting the processing of revision
+data. The following flags are defined:
+
+32768
+   Censored revision. The revision's fulltext has been replaced by censor
+   metadata. May only occur on file revisions.
+16384
+   Ellipsis revision. Revision hash does not match data (likely due to rewritten
+   parents).
+8192
+   Externally stored. The revision fulltext contains ``key:value`` ``\n``
+   delimited metadata defining an object stored elsewhere. Used by the LFS
+   extension.
+
+For historical reasons, the integer values are identical to revlog version 1
+per-revision storage flags and correspond to bits being set in this 2-byte
+field. Bits were allocated starting from the most-significant bit, hence the
+reverse ordering and allocation of these flags.
+
 Changeset Segment
 =================
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/internals/linelog.txt	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,302 @@
+linelog is a storage format inspired by the "Interleaved deltas" idea. See
+https://en.wikipedia.org/wiki/Interleaved_deltas for its introduction.
+
+0. SCCS Weave
+
+  To understand what linelog is, first we have a quick look at a simplified
+  (with header removed) SCCS weave format, which is an implementation of the
+  "Interleaved deltas" idea.
+
+0.1 Basic SCCS Weave File Format
+
+  A SCCS weave file consists of plain text lines. Each line is either a
+  special instruction starting with "^A" or part of the content of the real
+  file the weave tracks. There are 3 important operations, where REV denotes
+  the revision number:
+
+    ^AI REV, marking the beginning of an insertion block introduced by REV
+    ^AD REV, marking the beginning of a deletion block introduced by REV
+    ^AE REV, marking the end of the block started by "^AI REV" or "^AD REV"
+
+  Note on revision numbers: For any two different revision numbers, one must
+  be an ancestor of the other to make them comparable. This enforces linear
+  history. Besides, the comparison functions (">=", "<") should be efficient.
+  This means, if revisions are strings like git or hg, an external map is
+  required to convert them into integers.
+
+  For example, to represent the following changes:
+
+    REV 1 | REV 2 | REV 3
+    ------+-------+-------
+    a     | a     | a
+    b     | b     | 2
+    c     | 1     | c
+          | 2     |
+          | c     |
+
+  A possible weave file looks like:
+
+    ^AI 1
+    a
+    ^AD 3
+    b
+    ^AI 2
+    1
+    ^AE 3
+    2
+    ^AE 2
+    c
+    ^AE 1
+
+  An "^AE" does not always match its nearest operation ("^AI" or "^AD"). In
+  the above example, "^AE 3" does not match the nearest "^AI 2" but "^AD 3".
+  Therefore we need some extra information for "^AE". The SCCS weave uses a
+  revision number. It could also be a boolean value about whether it is an
+  insertion or a deletion (see section 0.4).
+
+0.2 Checkout
+
+  The "checkout" operation is to retrieve file content at a given revision,
+  say X. It's doable by going through the file line by line and:
+
+    - If meet ^AI rev, and rev > X, find the corresponding ^AE and jump there
+    - If meet ^AD rev, and rev <= X, find the corresponding ^AE and jump there
+    - Ignore ^AE
+    - For normal lines, just output them
+
+0.3 Annotate
+
+  The "annotate" operation is to show extra metadata like the revision number
+  and the original line number a line comes from.
+
+  It's basically just a "Checkout". For the extra metadata, they can be stored
+  side by side with the line contents. Alternatively, we can infer the
+  revision number from "^AI"s.
+
+  Some SCM tools have to calculate diffs on the fly and thus are much slower
+  on this operation.
+
+0.4 Tree Structure
+
+  The word "interleaved" is used because "^AI" .. "^AE" and "^AD" .. "^AE"
+  blocks can be interleaved.
+
+  If we consider insertions and deletions separately, they can form tree
+  structures, respectively.
+
+    +--- ^AI 1        +--- ^AD 3
+    | +- ^AI 2        | +- ^AD 2
+    | |               | |
+    | +- ^AE 2        | +- ^AE 2
+    |                 |
+    +--- ^AE 1        +--- ^AE 3
+
+  More specifically, it's possible to build a tree for all insertions, where
+  the tree node has the structure "(rev, startline, endline)". "startline" is
+  the line number of "^AI" and "endline" is the line number of the matched
+  "^AE".  The tree will have these properties:
+
+    1. child.rev > parent.rev
+    2. child.startline > parent.startline
+    3. child.endline < parent.endline
+
+  A similar tree for all deletions can also be built with the first property
+  changed to:
+
+    1. child.rev < parent.rev
+
+0.5 Malformed Cases
+
+  The following cases are considered malformed in our implementation:
+
+    1. Interleaved insertions, or interleaved deletions.
+       It can be rewritten to a non-interleaved tree structure.
+
+       Take insertions as example, deletions are similar:
+
+       ^AI x         ^AI x
+       a             a
+       ^AI x + 1  -> ^AI x + 1
+       b             b
+       ^AE x         ^AE x + 1
+       c             ^AE x
+       ^AE x + 1     ^AI x + 1
+                     c
+                     ^AE x + 1
+
+    2. Nested insertions, where the inner one has a smaller revision number.
+       Or nested deletions, where the inner one has a larger revision number.
+       It can be rewritten to a non-nested form.
+
+       Take insertions as example, deletions are similar:
+
+       ^AI x + 1     ^AI x + 1
+       a             a
+       ^AI x      -> ^AE x + 1
+       b             ^AI x
+       ^AE x         b
+       c             ^AE x
+       ^AE x + 1     ^AI x + 1
+                     c
+                     ^AE x + 1
+
+    3. Insertion inside deletion with a smaller revision number.
+
+       Rewrite by duplicating the content inserted:
+
+       ^AD x          ^AD x
+       a              a
+       ^AI x + 1  ->  b
+       b              c
+       ^AE x + 1      ^AE x
+       c              ^AI x + 1
+       ^AE x          b
+                      ^AE x + 1
+
+       Note: If "annotate" purely depends on "^AI" information, then the
+       duplication content will lose track of where "b" is originally from.
+
+  Some of them may be valid in other implementations for special purposes. For
+  example, to "revive" a previously deleted block in a newer revision.
+
+0.6 Cases Can Be Optimized
+
+  It's always better to get things nested. For example, the left is more
+  efficient than the right while they represent the same content:
+
+    +--- ^AD 2          +- ^AD 1
+    | +- ^AD 1          |   LINE A
+    | |   LINE A        +- ^AE 1
+    | +- ^AE 1          +- ^AD 2
+    |     LINE B        |   LINE B
+    +--- ^AE 2          +- ^AE 2
+
+  Our implementation sometimes generates the less efficient data. To always
+  get the optimal form, it requires extra code complexity that seems unworthy.
+
+0.7 Inefficiency
+
+  The file format can be slow because:
+
+  - Inserting a new line at position P requires rewriting all data after P.
+  - Finding "^AE" requires walking through the content (O(N), where N is the
+    number of lines between "^AI/D" and "^AE").
+
+1. Linelog
+
+  The linelog is a binary format that dedicates to speed up mercurial (or
+  git)'s "annotate" operation. It's designed to avoid issues mentioned in
+  section 0.7.
+
+1.1 Content Stored
+
+  Linelog is not another storage for file contents. It only stores line
+  numbers and corresponding revision numbers, instead of actual line content.
+  This is okay for the "annotate" operation because usually the external
+  source is fast to checkout the content of a file at a specific revision.
+
+  A typical SCCS weave is also fast on the "grep" operation, which needs
+  random accesses to line contents from different revisions of a file. This
+  can be slow with linelog's no-line-content design. However we could use
+  an extra map ((rev, line num) -> line content) to speed it up.
+
+  Note the revision numbers in linelog should be independent from mercurial
+  integer revision numbers. There should be some mapping between linelog rev
+  and hg hash stored side by side, to make the files reusable after being
+  copied to another machine.
+
+1.2 Basic Format
+
+  A linelog file consists of "instruction"s. An "instruction" can be either:
+
+    - JGE  REV ADDR     # jump to ADDR if rev >= REV
+    - JL   REV ADDR     # jump to ADDR if rev < REV
+    - LINE REV LINENUM  # append the (LINENUM+1)-th line in revision REV
+
+  For example, here is the example linelog representing the same file with
+  3 revisions mentioned in section 0.1:
+
+    SCCS  |    Linelog
+    Weave | Addr : Instruction
+    ------+------+-------------
+    ^AI 1 |    0 : JL   1 8
+    a     |    1 : LINE 1 0
+    ^AD 3 |    2 : JGE  3 6
+    b     |    3 : LINE 1 1
+    ^AI 2 |    4 : JL   2 7
+    1     |    5 : LINE 2 2
+    ^AE 3 |
+    2     |    6 : LINE 2 3
+    ^AE 2 |
+    c     |    7 : LINE 1 2
+    ^AE 1 |
+          |    8 : END
+
+  This way, "find ^AE" is O(1) because we just jump there. And we can insert
+  new lines without rewriting most part of the file by appending new lines and
+  changing a single instruction to jump to them.
+
+  The current implementation uses 64 bits for an instruction: The opcode (JGE,
+  JL or LINE) takes 2 bits, REV takes 30 bits and ADDR or LINENUM takes 32
+  bits. It also stores the max revision number and buffer size at the first
+  64 bits for quick access to these values.
+
+1.3 Comparing with Mercurial's revlog format
+
+  Apparently, linelog is very different from revlog: linelog stores rev and
+  line numbers, while revlog has line contents and other metadata (like
+  parents, flags). However, the revlog format could also be used to store rev
+  and line numbers. For example, to speed up the annotate operation, we could
+  also pre-calculate annotate results and just store them using the revlog
+  format.
+
+  Therefore, linelog is actually somehow similar to revlog, with the important
+  trade-off that it only supports linear history (mentioned in section 0.1).
+  Essentially, the differences are:
+
+    a) Linelog is full of deltas, while revlog could contain full file
+       contents sometimes. So linelog is smaller. Revlog could trade
+       reconstruction speed for file size - best case, revlog is as small as
+       linelog.
+    b) The interleaved delta structure allows skipping large portion of
+       uninteresting deltas so linelog's content reconstruction is faster than
+       the delta-only version of revlog (however it's possible to construct
+       a case where interleaved deltas degrade to plain deltas, so linelog
+       worst case would be delta-only revlog). Revlog could trade file size
+       for reconstruction speed.
+    c) Linelog implicitly maintains the order of all lines it stores. So it
+       could dump all the lines from all revisions, with a reasonable order.
+       While revlog could also dump all line additions, it requires extra
+       computation to figure out the order putting those lines - that's some
+       kind of "merge".
+
+  "c" makes "hg absorb" easier to implement and makes it possible to do
+  "annotate --deleted".
+
+1.4 Malformed Cases Handling
+
+  The following "case 1", "case 2", and "case 3" refer to cases mentioned
+  in section 0.5.
+
+  Using the exposed API (replacelines), case 1 is impossible to generate,
+  although it's possible to generate it by constructing rawdata and load that
+  via linelog.fromdata.
+
+  Doing annotate(maxrev) before replacelines (aka. a1, a2 passed to
+  replacelines are related to the latest revision) eliminates the possibility
+  of case 3. That makes sense since usually you'd like to make edits on top of
+  the latest revision. Practically, both absorb and fastannotate do this.
+
+  Doing annotate(maxrev), plus replacelines(rev, ...) where rev >= maxrev
+  eliminates the possibility of case 2. That makes sense since usually the
+  edits belong to "new revisions", not "old revisions". Practically,
+  fastannotate does this. Absorb calls replacelines with rev < maxrev to edit
+  past revisions. So it needs some extra care to not generate case 2.
+
+  If case 1 occurs, that probably means linelog file corruption (assuming
+  linelog is edited via public APIs) the checkout or annotate result could
+  be less meaningful or even error out, but linelog wouldn't enter an infinite
+  loop.
+
+  If either case 2 or 3 occurs, linelog works as if the inner "^AI/D" and "^AE"
+  operations on the left side are silently ignored.
--- a/mercurial/help/internals/wireprotocol.txt	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/help/internals/wireprotocol.txt	Mon Oct 22 14:46:06 2018 -0400
@@ -220,9 +220,10 @@
 Requests to unknown commands or URLS result in an HTTP 404.
 TODO formally define response type, how error is communicated, etc.
 
-HTTP request and response bodies use the *Unified Frame-Based Protocol*
-(defined below) for media exchange. The entirety of the HTTP message
-body is 0 or more frames as defined by this protocol.
+HTTP request and response bodies use the ``hgrpc`` protocol for media
+exchange.` (See :hg:`help internals.wireprotocolrpc` for details of
+the protocol.) The entirety of the HTTP message body is 0 or more frames
+as defined by this protocol.
 
 Clients and servers MUST advertise the ``TBD`` media type via the
 ``Content-Type`` request and response headers. In addition, clients MUST
@@ -236,11 +237,10 @@
 Servers receiving requests with an invalid ``Content-Type`` header SHOULD
 respond with an HTTP 415.
 
-The command to run is specified in the POST payload as defined by the
-*Unified Frame-Based Protocol*. This is redundant with data already
-encoded in the URL. This is by design, so server operators can have
-better understanding about server activity from looking merely at
-HTTP access logs.
+The command to run is specified in the POST payload as defined by ``hgrpc``.
+This is redundant with data already encoded in the URL. This is by design,
+so server operators can have better understanding about server activity from
+looking merely at HTTP access logs.
 
 In most circumstances, the command specified in the URL MUST match
 the command specified in the frame-based payload or the server will
@@ -254,9 +254,9 @@
 *any* command and allow the execution of multiple commands. If the
 HTTP request issues multiple commands across multiple frames, all
 issued commands will be processed by the server. Per the defined
-behavior of the *Unified Frame-Based Protocol*, commands may be
-issued interleaved and responses may come back in a different order
-than they were issued. Clients MUST be able to deal with this.
+behavior of ``hgrpc```, commands may be issued interleaved and responses
+may come back in a different order than they were issued. Clients MUST
+be able to deal with this.
 
 SSH Protocol
 ============
@@ -513,503 +513,6 @@
 Following capabilities advertisement, the peers communicate using version
 1 of the SSH transport.
 
-Unified Frame-Based Protocol
-============================
-
-**Experimental and under development**
-
-The *Unified Frame-Based Protocol* is a communications protocol between
-Mercurial peers. The protocol aims to be mostly transport agnostic
-(works similarly on HTTP, SSH, etc).
-
-To operate the protocol, a bi-directional, half-duplex pipe supporting
-ordered sends and receives is required. That is, each peer has one pipe
-for sending data and another for receiving.
-
-All data is read and written in atomic units called *frames*. These
-are conceptually similar to TCP packets. Higher-level functionality
-is built on the exchange and processing of frames.
-
-All frames are associated with a *stream*. A *stream* provides a
-unidirectional grouping of frames. Streams facilitate two goals:
-content encoding and parallelism. There is a dedicated section on
-streams below.
-
-The protocol is request-response based: the client issues requests to
-the server, which issues replies to those requests. Server-initiated
-messaging is not currently supported, but this specification carves
-out room to implement it.
-
-All frames are associated with a numbered request. Frames can thus
-be logically grouped by their request ID.
-
-Frames begin with an 8 octet header followed by a variable length
-payload::
-
-    +------------------------------------------------+
-    |                 Length (24)                    |
-    +--------------------------------+---------------+
-    |         Request ID (16)        | Stream ID (8) |
-    +------------------+-------------+---------------+
-    | Stream Flags (8) |
-    +-----------+------+
-    | Type (4)  |
-    +-----------+
-    | Flags (4) |
-    +===========+===================================================|
-    |                     Frame Payload (0...)                    ...
-    +---------------------------------------------------------------+
-
-The length of the frame payload is expressed as an unsigned 24 bit
-little endian integer. Values larger than 65535 MUST NOT be used unless
-given permission by the server as part of the negotiated capabilities
-during the handshake. The frame header is not part of the advertised
-frame length. The payload length is the over-the-wire length. If there
-is content encoding applied to the payload as part of the frame's stream,
-the length is the output of that content encoding, not the input.
-
-The 16-bit ``Request ID`` field denotes the integer request identifier,
-stored as an unsigned little endian integer. Odd numbered requests are
-client-initiated. Even numbered requests are server-initiated. This
-refers to where the *request* was initiated - not where the *frame* was
-initiated, so servers will send frames with odd ``Request ID`` in
-response to client-initiated requests. Implementations are advised to
-start ordering request identifiers at ``1`` and ``0``, increment by
-``2``, and wrap around if all available numbers have been exhausted.
-
-The 8-bit ``Stream ID`` field denotes the stream that the frame is
-associated with. Frames belonging to a stream may have content
-encoding applied and the receiver may need to decode the raw frame
-payload to obtain the original data. Odd numbered IDs are
-client-initiated. Even numbered IDs are server-initiated.
-
-The 8-bit ``Stream Flags`` field defines stream processing semantics.
-See the section on streams below.
-
-The 4-bit ``Type`` field denotes the type of frame being sent.
-
-The 4-bit ``Flags`` field defines special, per-type attributes for
-the frame.
-
-The sections below define the frame types and their behavior.
-
-Command Request (``0x01``)
---------------------------
-
-This frame contains a request to run a command.
-
-The payload consists of a CBOR map defining the command request. The
-bytestring keys of that map are:
-
-name
-   Name of the command that should be executed (bytestring).
-args
-   Map of bytestring keys to various value types containing the named
-   arguments to this command.
-
-   Each command defines its own set of argument names and their expected
-   types.
-
-This frame type MUST ONLY be sent from clients to servers: it is illegal
-for a server to send this frame to a client.
-
-The following flag values are defined for this type:
-
-0x01
-   New command request. When set, this frame represents the beginning
-   of a new request to run a command. The ``Request ID`` attached to this
-   frame MUST NOT be active.
-0x02
-   Command request continuation. When set, this frame is a continuation
-   from a previous command request frame for its ``Request ID``. This
-   flag is set when the CBOR data for a command request does not fit
-   in a single frame.
-0x04
-   Additional frames expected. When set, the command request didn't fit
-   into a single frame and additional CBOR data follows in a subsequent
-   frame.
-0x08
-   Command data frames expected. When set, command data frames are
-   expected to follow the final command request frame for this request.
-
-``0x01`` MUST be set on the initial command request frame for a
-``Request ID``.
-
-``0x01`` or ``0x02`` MUST be set to indicate this frame's role in
-a series of command request frames.
-
-If command data frames are to be sent, ``0x08`` MUST be set on ALL
-command request frames.
-
-Command Data (``0x02``)
------------------------
-
-This frame contains raw data for a command.
-
-Most commands can be executed by specifying arguments. However,
-arguments have an upper bound to their length. For commands that
-accept data that is beyond this length or whose length isn't known
-when the command is initially sent, they will need to stream
-arbitrary data to the server. This frame type facilitates the sending
-of this data.
-
-The payload of this frame type consists of a stream of raw data to be
-consumed by the command handler on the server. The format of the data
-is command specific.
-
-The following flag values are defined for this type:
-
-0x01
-   Command data continuation. When set, the data for this command
-   continues into a subsequent frame.
-
-0x02
-   End of data. When set, command data has been fully sent to the
-   server. The command has been fully issued and no new data for this
-   command will be sent. The next frame will belong to a new command.
-
-Command Response Data (``0x03``)
---------------------------------
-
-This frame contains response data to an issued command.
-
-Response data ALWAYS consists of a series of 1 or more CBOR encoded
-values. A CBOR value may be using indefinite length encoding. And the
-bytes constituting the value may span several frames.
-
-The following flag values are defined for this type:
-
-0x01
-   Data continuation. When set, an additional frame containing response data
-   will follow.
-0x02
-   End of data. When set, the response data has been fully sent and
-   no additional frames for this response will be sent.
-
-The ``0x01`` flag is mutually exclusive with the ``0x02`` flag.
-
-Error Occurred (``0x05``)
--------------------------
-
-Some kind of error occurred.
-
-There are 3 general kinds of failures that can occur:
-
-* Command error encountered before any response issued
-* Command error encountered after a response was issued
-* Protocol or stream level error
-
-This frame type is used to capture the latter cases. (The general
-command error case is handled by the leading CBOR map in
-``Command Response`` frames.)
-
-The payload of this frame contains a CBOR map detailing the error. That
-map has the following bytestring keys:
-
-type
-   (bytestring) The overall type of error encountered. Can be one of the
-   following values:
-
-   protocol
-      A protocol-level error occurred. This typically means someone
-      is violating the framing protocol semantics and the server is
-      refusing to proceed.
-
-   server
-      A server-level error occurred. This typically indicates some kind of
-      logic error on the server, likely the fault of the server.
-
-   command
-      A command-level error, likely the fault of the client.
-
-message
-   (array of maps) A richly formatted message that is intended for
-   human consumption. See the ``Human Output Side-Channel`` frame
-   section for a description of the format of this data structure.
-
-Human Output Side-Channel (``0x06``)
-------------------------------------
-
-This frame contains a message that is intended to be displayed to
-people. Whereas most frames communicate machine readable data, this
-frame communicates textual data that is intended to be shown to
-humans.
-
-The frame consists of a series of *formatting requests*. Each formatting
-request consists of a formatting string, arguments for that formatting
-string, and labels to apply to that formatting string.
-
-A formatting string is a printf()-like string that allows variable
-substitution within the string. Labels allow the rendered text to be
-*decorated*. Assuming use of the canonical Mercurial code base, a
-formatting string can be the input to the ``i18n._`` function. This
-allows messages emitted from the server to be localized. So even if
-the server has different i18n settings, people could see messages in
-their *native* settings. Similarly, the use of labels allows
-decorations like coloring and underlining to be applied using the
-client's configured rendering settings.
-
-Formatting strings are similar to ``printf()`` strings or how
-Python's ``%`` operator works. The only supported formatting sequences
-are ``%s`` and ``%%``. ``%s`` will be replaced by whatever the string
-at that position resolves to. ``%%`` will be replaced by ``%``. All
-other 2-byte sequences beginning with ``%`` represent a literal
-``%`` followed by that character. However, future versions of the
-wire protocol reserve the right to allow clients to opt in to receiving
-formatting strings with additional formatters, hence why ``%%`` is
-required to represent the literal ``%``.
-
-The frame payload consists of a CBOR array of CBOR maps. Each map
-defines an *atom* of text data to print. Each *atom* has the following
-bytestring keys:
-
-msg
-   (bytestring) The formatting string. Content MUST be ASCII.
-args (optional)
-   Array of bytestrings defining arguments to the formatting string.
-labels (optional)
-   Array of bytestrings defining labels to apply to this atom.
-
-All data to be printed MUST be encoded into a single frame: this frame
-does not support spanning data across multiple frames.
-
-All textual data encoded in these frames is assumed to be line delimited.
-The last atom in the frame SHOULD end with a newline (``\n``). If it
-doesn't, clients MAY add a newline to facilitate immediate printing.
-
-Progress Update (``0x07``)
---------------------------
-
-This frame holds the progress of an operation on the peer. Consumption
-of these frames allows clients to display progress bars, estimated
-completion times, etc.
-
-Each frame defines the progress of a single operation on the peer. The
-payload consists of a CBOR map with the following bytestring keys:
-
-topic
-   Topic name (string)
-pos
-   Current numeric position within the topic (integer)
-total
-   Total/end numeric position of this topic (unsigned integer)
-label (optional)
-   Unit label (string)
-item (optional)
-   Item name (string)
-
-Progress state is created when a frame is received referencing a
-*topic* that isn't currently tracked. Progress tracking for that
-*topic* is finished when a frame is received reporting the current
-position of that topic as ``-1``.
-
-Multiple *topics* may be active at any given time.
-
-Rendering of progress information is not mandated or governed by this
-specification: implementations MAY render progress information however
-they see fit, including not at all.
-
-The string data describing the topic SHOULD be static strings to
-facilitate receivers localizing that string data. The emitter
-MUST normalize all string data to valid UTF-8 and receivers SHOULD
-validate that received data conforms to UTF-8. The topic name
-SHOULD be ASCII.
-
-Stream Encoding Settings (``0x08``)
------------------------------------
-
-This frame type holds information defining the content encoding
-settings for a *stream*.
-
-This frame type is likely consumed by the protocol layer and is not
-passed on to applications.
-
-This frame type MUST ONLY occur on frames having the *Beginning of Stream*
-``Stream Flag`` set.
-
-The payload of this frame defines what content encoding has (possibly)
-been applied to the payloads of subsequent frames in this stream.
-
-The payload begins with an 8-bit integer defining the length of the
-encoding *profile*, followed by the string name of that profile, which
-must be an ASCII string. All bytes that follow can be used by that
-profile for supplemental settings definitions. See the section below
-on defined encoding profiles.
-
-Stream States and Flags
------------------------
-
-Streams can be in two states: *open* and *closed*. An *open* stream
-is active and frames attached to that stream could arrive at any time.
-A *closed* stream is not active. If a frame attached to a *closed*
-stream arrives, that frame MUST have an appropriate stream flag
-set indicating beginning of stream. All streams are in the *closed*
-state by default.
-
-The ``Stream Flags`` field denotes a set of bit flags for defining
-the relationship of this frame within a stream. The following flags
-are defined:
-
-0x01
-   Beginning of stream. The first frame in the stream MUST set this
-   flag. When received, the ``Stream ID`` this frame is attached to
-   becomes ``open``.
-
-0x02
-   End of stream. The last frame in a stream MUST set this flag. When
-   received, the ``Stream ID`` this frame is attached to becomes
-   ``closed``. Any content encoding context associated with this stream
-   can be destroyed after processing the payload of this frame.
-
-0x04
-   Apply content encoding. When set, any content encoding settings
-   defined by the stream should be applied when attempting to read
-   the frame. When not set, the frame payload isn't encoded.
-
-Streams
--------
-
-Streams - along with ``Request IDs`` - facilitate grouping of frames.
-But the purpose of each is quite different and the groupings they
-constitute are independent.
-
-A ``Request ID`` is essentially a tag. It tells you which logical
-request a frame is associated with.
-
-A *stream* is a sequence of frames grouped for the express purpose
-of applying a stateful encoding or for denoting sub-groups of frames.
-
-Unlike ``Request ID``s which span the request and response, a stream
-is unidirectional and stream IDs are independent from client to
-server.
-
-There is no strict hierarchical relationship between ``Request IDs``
-and *streams*. A stream can contain frames having multiple
-``Request IDs``. Frames belonging to the same ``Request ID`` can
-span multiple streams.
-
-One goal of streams is to facilitate content encoding. A stream can
-define an encoding to be applied to frame payloads. For example, the
-payload transmitted over the wire may contain output from a
-zstandard compression operation and the receiving end may decompress
-that payload to obtain the original data.
-
-The other goal of streams is to facilitate concurrent execution. For
-example, a server could spawn 4 threads to service a request that can
-be easily parallelized. Each of those 4 threads could write into its
-own stream. Those streams could then in turn be delivered to 4 threads
-on the receiving end, with each thread consuming its stream in near
-isolation. The *main* thread on both ends merely does I/O and
-encodes/decodes frame headers: the bulk of the work is done by worker
-threads.
-
-In addition, since content encoding is defined per stream, each
-*worker thread* could perform potentially CPU bound work concurrently
-with other threads. This approach of applying encoding at the
-sub-protocol / stream level eliminates a potential resource constraint
-on the protocol stream as a whole (it is common for the throughput of
-a compression engine to be smaller than the throughput of a network).
-
-Having multiple streams - each with their own encoding settings - also
-facilitates the use of advanced data compression techniques. For
-example, a transmitter could see that it is generating data faster
-and slower than the receiving end is consuming it and adjust its
-compression settings to trade CPU for compression ratio accordingly.
-
-While streams can define a content encoding, not all frames within
-that stream must use that content encoding. This can be useful when
-data is being served from caches and being derived dynamically. A
-cache could pre-compressed data so the server doesn't have to
-recompress it. The ability to pick and choose which frames are
-compressed allows servers to easily send data to the wire without
-involving potentially expensive encoding overhead.
-
-Content Encoding Profiles
--------------------------
-
-Streams can have named content encoding *profiles* associated with
-them. A profile defines a shared understanding of content encoding
-settings and behavior.
-
-The following profiles are defined:
-
-TBD
-
-Command Protocol
-----------------
-
-A client can request that a remote run a command by sending it
-frames defining that command. This logical stream is composed of
-1 or more ``Command Request`` frames and and 0 or more ``Command Data``
-frames.
-
-All frames composing a single command request MUST be associated with
-the same ``Request ID``.
-
-Clients MAY send additional command requests without waiting on the
-response to a previous command request. If they do so, they MUST ensure
-that the ``Request ID`` field of outbound frames does not conflict
-with that of an active ``Request ID`` whose response has not yet been
-fully received.
-
-Servers MAY respond to commands in a different order than they were
-sent over the wire. Clients MUST be prepared to deal with this. Servers
-also MAY start executing commands in a different order than they were
-received, or MAY execute multiple commands concurrently.
-
-If there is a dependency between commands or a race condition between
-commands executing (e.g. a read-only command that depends on the results
-of a command that mutates the repository), then clients MUST NOT send
-frames issuing a command until a response to all dependent commands has
-been received.
-TODO think about whether we should express dependencies between commands
-to avoid roundtrip latency.
-
-A command is defined by a command name, 0 or more command arguments,
-and optional command data.
-
-Arguments are the recommended mechanism for transferring fixed sets of
-parameters to a command. Data is appropriate for transferring variable
-data. Thinking in terms of HTTP, arguments would be headers and data
-would be the message body.
-
-It is recommended for servers to delay the dispatch of a command
-until all argument have been received. Servers MAY impose limits on the
-maximum argument size.
-TODO define failure mechanism.
-
-Servers MAY dispatch to commands immediately once argument data
-is available or delay until command data is received in full.
-
-Once a ``Command Request`` frame is sent, a client must be prepared to
-receive any of the following frames associated with that request:
-``Command Response``, ``Error Response``, ``Human Output Side-Channel``,
-``Progress Update``.
-
-The *main* response for a command will be in ``Command Response`` frames.
-The payloads of these frames consist of 1 or more CBOR encoded values.
-The first CBOR value on the first ``Command Response`` frame is special
-and denotes the overall status of the command. This CBOR map contains
-the following bytestring keys:
-
-status
-   (bytestring) A well-defined message containing the overall status of
-   this command request. The following values are defined:
-
-   ok
-      The command was received successfully and its response follows.
-   error
-      There was an error processing the command. More details about the
-      error are encoded in the ``error`` key.
-
-error (optional)
-   A map containing information about an encountered error. The map has the
-   following keys:
-
-   message
-      (array of maps) A message describing the error. The message uses the
-      same format as those in the ``Human Output Side-Channel`` frame.
-
 Capabilities
 ============
 
@@ -1199,6 +702,25 @@
 
 This capability/command was introduced in Mercurial 1.9 (released July 2011).
 
+lfs
+---
+
+Indicates that the LFS extension is enabled on the server.  It makes no claims
+about the repository actually having LFS blobs committed to it.
+
+This capability was introduced by the LFS extension in Mercurial 4.5 (released
+Feb 2018).
+
+lfs-serve
+---------
+
+Indicates that the LFS extension is enabled on the server, and LFS blobs are
+committed to the remote repository.  (Specifically, it indicates that the 'lfs'
+requirement is present in the remote repository.)
+
+This capability was introduced by the LFS extension in Mercurial 4.8 (released
+Nov 2018).
+
 lookup
 ------
 
@@ -1379,6 +901,9 @@
 This section contains a list of all wire protocol commands implemented by
 the canonical Mercurial server.
 
+See :hg:`help internals.wireprotocolv2` for information on commands exposed
+to the frame-based protocol.
+
 batch
 -----
 
@@ -1750,164 +1275,3 @@
 
 The server may also respond with a generic error type, which contains a string
 indicating the failure.
-
-Frame-Based Protocol Commands
-=============================
-
-**Experimental and under active development**
-
-This section documents the wire protocol commands exposed to transports
-using the frame-based protocol. The set of commands exposed through
-these transports is distinct from the set of commands exposed to legacy
-transports.
-
-The frame-based protocol uses CBOR to encode command execution requests.
-All command arguments must be mapped to a specific or set of CBOR data
-types.
-
-The response to many commands is also CBOR. There is no common response
-format: each command defines its own response format.
-
-TODO require node type be specified, as N bytes of binary node value
-could be ambiguous once SHA-1 is replaced.
-
-branchmap
----------
-
-Obtain heads in named branches.
-
-Receives no arguments.
-
-The response is a map with bytestring keys defining the branch name.
-Values are arrays of bytestring defining raw changeset nodes.
-
-capabilities
-------------
-
-Obtain the server's capabilities.
-
-Receives no arguments.
-
-This command is typically called only as part of the handshake during
-initial connection establishment.
-
-The response is a map with bytestring keys defining server information.
-
-The defined keys are:
-
-commands
-   A map defining available wire protocol commands on this server.
-
-   Keys in the map are the names of commands that can be invoked. Values
-   are maps defining information about that command. The bytestring keys
-   are:
-
-      args
-         A map of argument names and their expected types.
-
-         Types are defined as a representative value for the expected type.
-         e.g. an argument expecting a boolean type will have its value
-         set to true. An integer type will have its value set to 42. The
-         actual values are arbitrary and may not have meaning.
-      permissions
-         An array of permissions required to execute this command.
-
-compression
-   An array of maps defining available compression format support.
-
-   The array is sorted from most preferred to least preferred.
-
-   Each entry has the following bytestring keys:
-
-      name
-         Name of the compression engine. e.g. ``zstd`` or ``zlib``.
-
-framingmediatypes
-   An array of bytestrings defining the supported framing protocol
-   media types. Servers will not accept media types not in this list.
-
-rawrepoformats
-   An array of storage formats the repository is using. This set of
-   requirements can be used to determine whether a client can read a
-   *raw* copy of file data available.
-
-heads
------
-
-Obtain DAG heads in the repository.
-
-The command accepts the following arguments:
-
-publiconly (optional)
-   (boolean) If set, operate on the DAG for public phase changesets only.
-   Non-public (i.e. draft) phase DAG heads will not be returned.
-
-The response is a CBOR array of bytestrings defining changeset nodes
-of DAG heads. The array can be empty if the repository is empty or no
-changesets satisfied the request.
-
-TODO consider exposing phase of heads in response
-
-known
------
-
-Determine whether a series of changeset nodes is known to the server.
-
-The command accepts the following arguments:
-
-nodes
-   (array of bytestrings) List of changeset nodes whose presence to
-   query.
-
-The response is a bytestring where each byte contains a 0 or 1 for the
-corresponding requested node at the same index.
-
-TODO use a bit array for even more compact response
-
-listkeys
---------
-
-List values in a specified ``pushkey`` namespace.
-
-The command receives the following arguments:
-
-namespace
-   (bytestring) Pushkey namespace to query.
-
-The response is a map with bytestring keys and values.
-
-TODO consider using binary to represent nodes in certain pushkey namespaces.
-
-lookup
-------
-
-Try to resolve a value to a changeset revision.
-
-Unlike ``known`` which operates on changeset nodes, lookup operates on
-node fragments and other names that a user may use.
-
-The command receives the following arguments:
-
-key
-   (bytestring) Value to try to resolve.
-
-On success, returns a bytestring containing the resolved node.
-
-pushkey
--------
-
-Set a value using the ``pushkey`` protocol.
-
-The command receives the following arguments:
-
-namespace
-   (bytestring) Pushkey namespace to operate on.
-key
-   (bytestring) The pushkey key to set.
-old
-   (bytestring) Old value for this key.
-new
-   (bytestring) New value for this key.
-
-TODO consider using binary to represent nodes is certain pushkey namespaces.
-TODO better define response type and meaning.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/internals/wireprotocolrpc.txt	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,740 @@
+**Experimental and under development**
+
+This document describe's Mercurial's transport-agnostic remote procedure
+call (RPC) protocol which is used to perform interactions with remote
+servers. This protocol is also referred to as ``hgrpc``.
+
+The protocol has the following high-level features:
+
+* Concurrent request and response support (multiple commands can be issued
+  simultaneously and responses can be streamed simultaneously).
+* Supports half-duplex and full-duplex connections.
+* All data is transmitted within *frames*, which have a well-defined
+  header and encode their length.
+* Side-channels for sending progress updates and printing output. Text
+  output from the remote can be localized locally.
+* Support for simultaneous and long-lived compression streams, even across
+  requests.
+* Uses CBOR for data exchange.
+
+The protocol is not specific to Mercurial and could be used by other
+applications.
+
+High-level Overview
+===================
+
+To operate the protocol, a bi-directional, half-duplex pipe supporting
+ordered sends and receives is required. That is, each peer has one pipe
+for sending data and another for receiving. Full-duplex pipes are also
+supported.
+
+All data is read and written in atomic units called *frames*. These
+are conceptually similar to TCP packets. Higher-level functionality
+is built on the exchange and processing of frames.
+
+All frames are associated with a *stream*. A *stream* provides a
+unidirectional grouping of frames. Streams facilitate two goals:
+content encoding and parallelism. There is a dedicated section on
+streams below.
+
+The protocol is request-response based: the client issues requests to
+the server, which issues replies to those requests. Server-initiated
+messaging is not currently supported, but this specification carves
+out room to implement it.
+
+All frames are associated with a numbered request. Frames can thus
+be logically grouped by their request ID.
+
+Frames
+======
+
+Frames begin with an 8 octet header followed by a variable length
+payload::
+
+    +------------------------------------------------+
+    |                 Length (24)                    |
+    +--------------------------------+---------------+
+    |         Request ID (16)        | Stream ID (8) |
+    +------------------+-------------+---------------+
+    | Stream Flags (8) |
+    +-----------+------+
+    | Type (4)  |
+    +-----------+
+    | Flags (4) |
+    +===========+===================================================|
+    |                     Frame Payload (0...)                    ...
+    +---------------------------------------------------------------+
+
+The length of the frame payload is expressed as an unsigned 24 bit
+little endian integer. Values larger than 65535 MUST NOT be used unless
+given permission by the server as part of the negotiated capabilities
+during the handshake. The frame header is not part of the advertised
+frame length. The payload length is the over-the-wire length. If there
+is content encoding applied to the payload as part of the frame's stream,
+the length is the output of that content encoding, not the input.
+
+The 16-bit ``Request ID`` field denotes the integer request identifier,
+stored as an unsigned little endian integer. Odd numbered requests are
+client-initiated. Even numbered requests are server-initiated. This
+refers to where the *request* was initiated - not where the *frame* was
+initiated, so servers will send frames with odd ``Request ID`` in
+response to client-initiated requests. Implementations are advised to
+start ordering request identifiers at ``1`` and ``0``, increment by
+``2``, and wrap around if all available numbers have been exhausted.
+
+The 8-bit ``Stream ID`` field denotes the stream that the frame is
+associated with. Frames belonging to a stream may have content
+encoding applied and the receiver may need to decode the raw frame
+payload to obtain the original data. Odd numbered IDs are
+client-initiated. Even numbered IDs are server-initiated.
+
+The 8-bit ``Stream Flags`` field defines stream processing semantics.
+See the section on streams below.
+
+The 4-bit ``Type`` field denotes the type of frame being sent.
+
+The 4-bit ``Flags`` field defines special, per-type attributes for
+the frame.
+
+The sections below define the frame types and their behavior.
+
+Command Request (``0x01``)
+--------------------------
+
+This frame contains a request to run a command.
+
+The payload consists of a CBOR map defining the command request. The
+bytestring keys of that map are:
+
+name
+   Name of the command that should be executed (bytestring).
+args
+   Map of bytestring keys to various value types containing the named
+   arguments to this command.
+
+   Each command defines its own set of argument names and their expected
+   types.
+
+redirect (optional)
+   (map) Advertises client support for following response *redirects*.
+
+   This map has the following bytestring keys:
+
+   targets
+      (array of bytestring) List of named redirect targets supported by
+      this client. The names come from the targets advertised by the
+      server's *capabilities* message.
+
+   hashes
+      (array of bytestring) List of preferred hashing algorithms that can
+      be used for content integrity verification.
+
+   See the *Content Redirects* section below for more on content redirects.
+
+This frame type MUST ONLY be sent from clients to servers: it is illegal
+for a server to send this frame to a client.
+
+The following flag values are defined for this type:
+
+0x01
+   New command request. When set, this frame represents the beginning
+   of a new request to run a command. The ``Request ID`` attached to this
+   frame MUST NOT be active.
+0x02
+   Command request continuation. When set, this frame is a continuation
+   from a previous command request frame for its ``Request ID``. This
+   flag is set when the CBOR data for a command request does not fit
+   in a single frame.
+0x04
+   Additional frames expected. When set, the command request didn't fit
+   into a single frame and additional CBOR data follows in a subsequent
+   frame.
+0x08
+   Command data frames expected. When set, command data frames are
+   expected to follow the final command request frame for this request.
+
+``0x01`` MUST be set on the initial command request frame for a
+``Request ID``.
+
+``0x01`` or ``0x02`` MUST be set to indicate this frame's role in
+a series of command request frames.
+
+If command data frames are to be sent, ``0x08`` MUST be set on ALL
+command request frames.
+
+Command Data (``0x02``)
+-----------------------
+
+This frame contains raw data for a command.
+
+Most commands can be executed by specifying arguments. However,
+arguments have an upper bound to their length. For commands that
+accept data that is beyond this length or whose length isn't known
+when the command is initially sent, they will need to stream
+arbitrary data to the server. This frame type facilitates the sending
+of this data.
+
+The payload of this frame type consists of a stream of raw data to be
+consumed by the command handler on the server. The format of the data
+is command specific.
+
+The following flag values are defined for this type:
+
+0x01
+   Command data continuation. When set, the data for this command
+   continues into a subsequent frame.
+
+0x02
+   End of data. When set, command data has been fully sent to the
+   server. The command has been fully issued and no new data for this
+   command will be sent. The next frame will belong to a new command.
+
+Command Response Data (``0x03``)
+--------------------------------
+
+This frame contains response data to an issued command.
+
+Response data ALWAYS consists of a series of 1 or more CBOR encoded
+values. A CBOR value may be using indefinite length encoding. And the
+bytes constituting the value may span several frames.
+
+The following flag values are defined for this type:
+
+0x01
+   Data continuation. When set, an additional frame containing response data
+   will follow.
+0x02
+   End of data. When set, the response data has been fully sent and
+   no additional frames for this response will be sent.
+
+The ``0x01`` flag is mutually exclusive with the ``0x02`` flag.
+
+Error Occurred (``0x05``)
+-------------------------
+
+Some kind of error occurred.
+
+There are 3 general kinds of failures that can occur:
+
+* Command error encountered before any response issued
+* Command error encountered after a response was issued
+* Protocol or stream level error
+
+This frame type is used to capture the latter cases. (The general
+command error case is handled by the leading CBOR map in
+``Command Response`` frames.)
+
+The payload of this frame contains a CBOR map detailing the error. That
+map has the following bytestring keys:
+
+type
+   (bytestring) The overall type of error encountered. Can be one of the
+   following values:
+
+   protocol
+      A protocol-level error occurred. This typically means someone
+      is violating the framing protocol semantics and the server is
+      refusing to proceed.
+
+   server
+      A server-level error occurred. This typically indicates some kind of
+      logic error on the server, likely the fault of the server.
+
+   command
+      A command-level error, likely the fault of the client.
+
+message
+   (array of maps) A richly formatted message that is intended for
+   human consumption. See the ``Human Output Side-Channel`` frame
+   section for a description of the format of this data structure.
+
+Human Output Side-Channel (``0x06``)
+------------------------------------
+
+This frame contains a message that is intended to be displayed to
+people. Whereas most frames communicate machine readable data, this
+frame communicates textual data that is intended to be shown to
+humans.
+
+The frame consists of a series of *formatting requests*. Each formatting
+request consists of a formatting string, arguments for that formatting
+string, and labels to apply to that formatting string.
+
+A formatting string is a printf()-like string that allows variable
+substitution within the string. Labels allow the rendered text to be
+*decorated*. Assuming use of the canonical Mercurial code base, a
+formatting string can be the input to the ``i18n._`` function. This
+allows messages emitted from the server to be localized. So even if
+the server has different i18n settings, people could see messages in
+their *native* settings. Similarly, the use of labels allows
+decorations like coloring and underlining to be applied using the
+client's configured rendering settings.
+
+Formatting strings are similar to ``printf()`` strings or how
+Python's ``%`` operator works. The only supported formatting sequences
+are ``%s`` and ``%%``. ``%s`` will be replaced by whatever the string
+at that position resolves to. ``%%`` will be replaced by ``%``. All
+other 2-byte sequences beginning with ``%`` represent a literal
+``%`` followed by that character. However, future versions of the
+wire protocol reserve the right to allow clients to opt in to receiving
+formatting strings with additional formatters, hence why ``%%`` is
+required to represent the literal ``%``.
+
+The frame payload consists of a CBOR array of CBOR maps. Each map
+defines an *atom* of text data to print. Each *atom* has the following
+bytestring keys:
+
+msg
+   (bytestring) The formatting string. Content MUST be ASCII.
+args (optional)
+   Array of bytestrings defining arguments to the formatting string.
+labels (optional)
+   Array of bytestrings defining labels to apply to this atom.
+
+All data to be printed MUST be encoded into a single frame: this frame
+does not support spanning data across multiple frames.
+
+All textual data encoded in these frames is assumed to be line delimited.
+The last atom in the frame SHOULD end with a newline (``\n``). If it
+doesn't, clients MAY add a newline to facilitate immediate printing.
+
+Progress Update (``0x07``)
+--------------------------
+
+This frame holds the progress of an operation on the peer. Consumption
+of these frames allows clients to display progress bars, estimated
+completion times, etc.
+
+Each frame defines the progress of a single operation on the peer. The
+payload consists of a CBOR map with the following bytestring keys:
+
+topic
+   Topic name (string)
+pos
+   Current numeric position within the topic (integer)
+total
+   Total/end numeric position of this topic (unsigned integer)
+label (optional)
+   Unit label (string)
+item (optional)
+   Item name (string)
+
+Progress state is created when a frame is received referencing a
+*topic* that isn't currently tracked. Progress tracking for that
+*topic* is finished when a frame is received reporting the current
+position of that topic as ``-1``.
+
+Multiple *topics* may be active at any given time.
+
+Rendering of progress information is not mandated or governed by this
+specification: implementations MAY render progress information however
+they see fit, including not at all.
+
+The string data describing the topic SHOULD be static strings to
+facilitate receivers localizing that string data. The emitter
+MUST normalize all string data to valid UTF-8 and receivers SHOULD
+validate that received data conforms to UTF-8. The topic name
+SHOULD be ASCII.
+
+Sender Protocol Settings (``0x08``)
+-----------------------------------
+
+This frame type advertises the sender's support for various protocol and
+stream level features. The data advertised in this frame is used to influence
+subsequent behavior of the current frame exchange channel.
+
+The frame payload consists of a CBOR map. It may contain the following
+bytestring keys:
+
+contentencodings
+   (array of bytestring) A list of content encodings supported by the
+   sender, in order of most to least preferred.
+
+   Peers are allowed to encode stream data using any of the listed
+   encodings.
+
+   See the ``Content Encoding Profiles`` section for an enumeration
+   of supported content encodings.
+
+   If not defined, the value is assumed to be a list with the single value
+   ``identity``, meaning only the no-op encoding is supported.
+
+   Senders MAY filter the set of advertised encodings against what it
+   knows the receiver supports (e.g. if the receiver advertised encodings
+   via the capabilities descriptor). However, doing so will prevent
+   servers from gaining an understanding of the aggregate capabilities
+   of clients. So clients are discouraged from doing so.
+
+When this frame is not sent/received, the receiver assumes default values
+for all keys.
+
+If encountered, this frame type MUST be sent before any other frame type
+in a channel.
+
+The following flag values are defined for this frame type:
+
+0x01
+   Data continuation. When set, an additional frame containing more protocol
+   settings immediately follows.
+0x02
+   End of data. When set, the protocol settings data has been completely
+   sent.
+
+The ``0x01`` flag is mutually exclusive with the ``0x02`` flag.
+
+Stream Encoding Settings (``0x09``)
+-----------------------------------
+
+This frame type holds information defining the content encoding
+settings for a *stream*.
+
+This frame type is likely consumed by the protocol layer and is not
+passed on to applications.
+
+This frame type MUST ONLY occur on frames having the *Beginning of Stream*
+``Stream Flag`` set.
+
+The payload of this frame defines what content encoding has (possibly)
+been applied to the payloads of subsequent frames in this stream.
+
+The payload consists of a series of CBOR values. The first value is a
+bytestring denoting the content encoding profile of the data in this
+stream. Subsequent CBOR values supplement this simple value in a
+profile-specific manner. See the ``Content Encoding Profiles`` section
+for more.
+
+In the absence of this frame on a stream, it is assumed the stream is
+using the ``identity`` content encoding.
+
+The following flag values are defined for this frame type:
+
+0x01
+   Data continuation. When set, an additional frame containing more encoding
+   settings immediately follows.
+0x02
+   End of data. When set, the encoding settings data has been completely
+   sent.
+
+The ``0x01`` flag is mutually exclusive with the ``0x02`` flag.
+
+Stream States and Flags
+=======================
+
+Streams can be in two states: *open* and *closed*. An *open* stream
+is active and frames attached to that stream could arrive at any time.
+A *closed* stream is not active. If a frame attached to a *closed*
+stream arrives, that frame MUST have an appropriate stream flag
+set indicating beginning of stream. All streams are in the *closed*
+state by default.
+
+The ``Stream Flags`` field denotes a set of bit flags for defining
+the relationship of this frame within a stream. The following flags
+are defined:
+
+0x01
+   Beginning of stream. The first frame in the stream MUST set this
+   flag. When received, the ``Stream ID`` this frame is attached to
+   becomes ``open``.
+
+0x02
+   End of stream. The last frame in a stream MUST set this flag. When
+   received, the ``Stream ID`` this frame is attached to becomes
+   ``closed``. Any content encoding context associated with this stream
+   can be destroyed after processing the payload of this frame.
+
+0x04
+   Apply content encoding. When set, any content encoding settings
+   defined by the stream should be applied when attempting to read
+   the frame. When not set, the frame payload isn't encoded.
+
+TODO consider making stream opening and closing communicated via
+explicit frame types (e.g. a "stream state change" frame) rather than
+flags on all frames. This would make stream state changes more explicit,
+as they could only occur on specific frame types.
+
+Streams
+=======
+
+Streams - along with ``Request IDs`` - facilitate grouping of frames.
+But the purpose of each is quite different and the groupings they
+constitute are independent.
+
+A ``Request ID`` is essentially a tag. It tells you which logical
+request a frame is associated with.
+
+A *stream* is a sequence of frames grouped for the express purpose
+of applying a stateful encoding or for denoting sub-groups of frames.
+
+Unlike ``Request ID``s which span the request and response, a stream
+is unidirectional and stream IDs are independent from client to
+server.
+
+There is no strict hierarchical relationship between ``Request IDs``
+and *streams*. A stream can contain frames having multiple
+``Request IDs``. Frames belonging to the same ``Request ID`` can
+span multiple streams.
+
+One goal of streams is to facilitate content encoding. A stream can
+define an encoding to be applied to frame payloads. For example, the
+payload transmitted over the wire may contain output from a
+zstandard compression operation and the receiving end may decompress
+that payload to obtain the original data.
+
+The other goal of streams is to facilitate concurrent execution. For
+example, a server could spawn 4 threads to service a request that can
+be easily parallelized. Each of those 4 threads could write into its
+own stream. Those streams could then in turn be delivered to 4 threads
+on the receiving end, with each thread consuming its stream in near
+isolation. The *main* thread on both ends merely does I/O and
+encodes/decodes frame headers: the bulk of the work is done by worker
+threads.
+
+In addition, since content encoding is defined per stream, each
+*worker thread* could perform potentially CPU bound work concurrently
+with other threads. This approach of applying encoding at the
+sub-protocol / stream level eliminates a potential resource constraint
+on the protocol stream as a whole (it is common for the throughput of
+a compression engine to be smaller than the throughput of a network).
+
+Having multiple streams - each with their own encoding settings - also
+facilitates the use of advanced data compression techniques. For
+example, a transmitter could see that it is generating data faster
+and slower than the receiving end is consuming it and adjust its
+compression settings to trade CPU for compression ratio accordingly.
+
+While streams can define a content encoding, not all frames within
+that stream must use that content encoding. This can be useful when
+data is being served from caches and being derived dynamically. A
+cache could pre-compressed data so the server doesn't have to
+recompress it. The ability to pick and choose which frames are
+compressed allows servers to easily send data to the wire without
+involving potentially expensive encoding overhead.
+
+Content Encoding Profiles
+=========================
+
+Streams can have named content encoding *profiles* associated with
+them. A profile defines a shared understanding of content encoding
+settings and behavior.
+
+Profiles are described in the following sections.
+
+identity
+--------
+
+The ``identity`` profile is a no-op encoding: the encoded bytes are
+exactly the input bytes.
+
+This profile MUST be supported by all peers.
+
+In the absence of an identified profile, the ``identity`` profile is
+assumed.
+
+zstd-8mb
+--------
+
+Zstandard encoding (RFC 8478). Zstandard is a fast and effective lossless
+compression format.
+
+This profile allows decompressor window sizes of up to 8 MB.
+
+zlib
+----
+
+zlib compressed data (RFC 1950). zlib is a widely-used and supported
+lossless compression format.
+
+It isn't as fast as zstandard and it is recommended to use zstandard instead,
+if possible.
+
+Command Protocol
+================
+
+A client can request that a remote run a command by sending it
+frames defining that command. This logical stream is composed of
+1 or more ``Command Request`` frames and and 0 or more ``Command Data``
+frames.
+
+All frames composing a single command request MUST be associated with
+the same ``Request ID``.
+
+Clients MAY send additional command requests without waiting on the
+response to a previous command request. If they do so, they MUST ensure
+that the ``Request ID`` field of outbound frames does not conflict
+with that of an active ``Request ID`` whose response has not yet been
+fully received.
+
+Servers MAY respond to commands in a different order than they were
+sent over the wire. Clients MUST be prepared to deal with this. Servers
+also MAY start executing commands in a different order than they were
+received, or MAY execute multiple commands concurrently.
+
+If there is a dependency between commands or a race condition between
+commands executing (e.g. a read-only command that depends on the results
+of a command that mutates the repository), then clients MUST NOT send
+frames issuing a command until a response to all dependent commands has
+been received.
+TODO think about whether we should express dependencies between commands
+to avoid roundtrip latency.
+
+A command is defined by a command name, 0 or more command arguments,
+and optional command data.
+
+Arguments are the recommended mechanism for transferring fixed sets of
+parameters to a command. Data is appropriate for transferring variable
+data. Thinking in terms of HTTP, arguments would be headers and data
+would be the message body.
+
+It is recommended for servers to delay the dispatch of a command
+until all argument have been received. Servers MAY impose limits on the
+maximum argument size.
+TODO define failure mechanism.
+
+Servers MAY dispatch to commands immediately once argument data
+is available or delay until command data is received in full.
+
+Once a ``Command Request`` frame is sent, a client must be prepared to
+receive any of the following frames associated with that request:
+``Command Response``, ``Error Response``, ``Human Output Side-Channel``,
+``Progress Update``.
+
+The *main* response for a command will be in ``Command Response`` frames.
+The payloads of these frames consist of 1 or more CBOR encoded values.
+The first CBOR value on the first ``Command Response`` frame is special
+and denotes the overall status of the command. This CBOR map contains
+the following bytestring keys:
+
+status
+   (bytestring) A well-defined message containing the overall status of
+   this command request. The following values are defined:
+
+   ok
+      The command was received successfully and its response follows.
+   error
+      There was an error processing the command. More details about the
+      error are encoded in the ``error`` key.
+   redirect
+      The response for this command is available elsewhere. Details on
+      where are in the ``location`` key.
+
+error (optional)
+   A map containing information about an encountered error. The map has the
+   following keys:
+
+   message
+      (array of maps) A message describing the error. The message uses the
+      same format as those in the ``Human Output Side-Channel`` frame.
+
+location (optional)
+   (map) Presence indicates that a *content redirect* has occurred. The map
+   provides the external location of the content.
+
+   This map contains the following bytestring keys:
+
+   url
+      (bytestring) URL from which this content may be requested.
+
+   mediatype
+      (bytestring) The media type for the fetched content. e.g.
+      ``application/mercurial-*``.
+
+      In some transports, this value is also advertised by the transport.
+      e.g. as the ``Content-Type`` HTTP header.
+
+   size (optional)
+      (unsigned integer) Total size of remote object in bytes. This is
+      the raw size of the entity that will be fetched, minus any
+      non-Mercurial protocol encoding (e.g. HTTP content or transfer
+      encoding.)
+
+   fullhashes (optional)
+      (array of arrays) Content hashes for the entire payload. Each entry
+      is an array of bytestrings containing the hash name and the hash value.
+
+   fullhashseed (optional)
+      (bytestring) Optional seed value to feed into hasher for full content
+      hash verification.
+
+   serverdercerts (optional)
+      (array of bytestring) DER encoded x509 certificates for the server. When
+      defined, clients MAY validate that the x509 certificate on the target
+      server exactly matches the certificate used here.
+
+   servercadercerts (optional)
+      (array of bytestring) DER encoded x509 certificates for the certificate
+      authority of the target server. When defined, clients MAY validate that
+      the x509 on the target server was signed by CA certificate in this set.
+
+   # TODO support for giving client an x509 certificate pair to be used as a
+   # client certificate.
+
+   # TODO support common authentication mechanisms (e.g. HTTP basic/digest
+   # auth).
+
+   # TODO support custom authentication mechanisms. This likely requires
+   # server to advertise required auth mechanism so client can filter.
+
+   # TODO support chained hashes. e.g. hash for each 1MB segment so client
+   # can iteratively validate data without having to consume all of it first.
+
+TODO formalize when error frames can be seen and how errors can be
+recognized midway through a command response.
+
+Content Redirects
+=================
+
+Servers have the ability to respond to ANY command request with a
+*redirect* to another location. Such a response is referred to as a *redirect
+response*. (This feature is conceptually similar to HTTP redirects, but is
+more powerful.)
+
+A *redirect response* MUST ONLY be issued if the client advertises support
+for a redirect *target*.
+
+A *redirect response* MUST NOT be issued unless the client advertises support
+for one.
+
+Clients advertise support for *redirect responses* after looking at the server's
+*capabilities* data, which is fetched during initial server connection
+handshake. The server's capabilities data advertises named *targets* for
+potential redirects.
+
+Each target is described by a protocol name, connection and protocol features,
+etc. The server also advertises target-agnostic redirect settings, such as
+which hash algorithms are supported for content integrity checking. (See
+the documentation for the *capabilities* command for more.)
+
+Clients examine the set of advertised redirect targets for compatibility.
+When sending a command request, the client advertises the set of redirect
+target names it is willing to follow, along with some other settings influencing
+behavior.
+
+For example, say the server is advertising a ``cdn`` redirect target that
+requires SNI and TLS 1.2. If the client supports those features, it will
+send command requests stating that the ``cdn`` target is acceptable to use.
+But if the client doesn't support SNI or TLS 1.2 (or maybe it encountered an
+error using this target from a previous request), then it omits this target
+name.
+
+If the client advertises support for a redirect target, the server MAY
+substitute the normal, inline response data for a *redirect response* -
+one where the initial CBOR map has a ``status`` key with value ``redirect``.
+
+The *redirect response* at a minimum advertises the URL where the response
+can be retrieved.
+
+The *redirect response* MAY also advertise additional details about that
+content and how to retrieve it. Notably, the response may contain the
+x509 public certificates for the server being redirected to or the
+certificate authority that signed that server's certificate. Unless the
+client has existing settings that offer stronger trust validation than what
+the server advertises, the client SHOULD use the server-provided certificates
+when validating the connection to the remote server in place of any default
+connection verification checks. This is because certificates coming from
+the server SHOULD establish a stronger chain of trust than what the default
+certification validation mechanism in most environments provides. (By default,
+certificate validation ensures the signer of the cert chains up to a set of
+trusted root certificates. And if an explicit certificate or CA certificate
+is presented, that greadly reduces the set of certificates that will be
+recognized as valid, thus reducing the potential for a "bad" certificate
+to be used and trusted.)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/internals/wireprotocolv2.txt	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,722 @@
+**Experimental and under active development**
+
+This section documents the wire protocol commands exposed to transports
+using the frame-based protocol. The set of commands exposed through
+these transports is distinct from the set of commands exposed to legacy
+transports.
+
+The frame-based protocol uses CBOR to encode command execution requests.
+All command arguments must be mapped to a specific or set of CBOR data
+types.
+
+The response to many commands is also CBOR. There is no common response
+format: each command defines its own response format.
+
+TODOs
+=====
+
+* Add "node namespace" support to each command. In order to support
+  SHA-1 hash transition, we want servers to be able to expose different
+  "node namespaces" for the same data. Every command operating on nodes
+  should specify which "node namespace" it is operating on and responses
+  should encode the "node namespace" accordingly.
+
+Commands
+========
+
+The sections below detail all commands available to wire protocol version
+2.
+
+branchmap
+---------
+
+Obtain heads in named branches.
+
+Receives no arguments.
+
+The response is a map with bytestring keys defining the branch name.
+Values are arrays of bytestring defining raw changeset nodes.
+
+capabilities
+------------
+
+Obtain the server's capabilities.
+
+Receives no arguments.
+
+This command is typically called only as part of the handshake during
+initial connection establishment.
+
+The response is a map with bytestring keys defining server information.
+
+The defined keys are:
+
+commands
+   A map defining available wire protocol commands on this server.
+
+   Keys in the map are the names of commands that can be invoked. Values
+   are maps defining information about that command. The bytestring keys
+   are:
+
+      args
+         (map) Describes arguments accepted by the command.
+
+         Keys are bytestrings denoting the argument name.
+
+         Values are maps describing the argument. The map has the following
+         bytestring keys:
+
+         default
+            (varied) The default value for this argument if not specified. Only
+            present if ``required`` is not true.
+
+         required
+            (boolean) Whether the argument must be specified. Failure to send
+            required arguments will result in an error executing the command.
+
+         type
+            (bytestring) The type of the argument. e.g. ``bytes`` or ``bool``.
+
+         validvalues
+            (set) Values that are recognized for this argument. Some arguments
+            only allow a fixed set of values to be specified. These arguments
+            may advertise that set in this key. If this set is advertised and
+            a value not in this set is specified, the command should result
+            in error.
+
+      permissions
+         An array of permissions required to execute this command.
+
+      *
+         (various) Individual commands may define extra keys that supplement
+         generic command metadata. See the command definition for more.
+
+framingmediatypes
+   An array of bytestrings defining the supported framing protocol
+   media types. Servers will not accept media types not in this list.
+
+pathfilterprefixes
+   (set of bytestring) Matcher prefixes that are recognized when performing
+   path filtering. Specifying a path filter whose type/prefix does not
+   match one in this set will likely be rejected by the server.
+
+rawrepoformats
+   An array of storage formats the repository is using. This set of
+   requirements can be used to determine whether a client can read a
+   *raw* copy of file data available.
+
+redirect
+   A map declaring potential *content redirects* that may be used by this
+   server. Contains the following bytestring keys:
+
+   targets
+      (array of maps) Potential redirect targets. Values are maps describing
+      this target in more detail. Each map has the following bytestring keys:
+
+      name
+         (bytestring) Identifier for this target. The identifier will be used
+         by clients to uniquely identify this target.
+
+      protocol
+         (bytestring) High-level network protocol. Values can be
+         ``http``, ```https``, ``ssh``, etc.
+
+      uris
+          (array of bytestrings) Representative URIs for this target.
+
+      snirequired (optional)
+          (boolean) Indicates whether Server Name Indication is required
+          to use this target. Defaults to False.
+
+      tlsversions (optional)
+          (array of bytestring) Indicates which TLS versions are supported by
+          this target. Values are ``1.1``, ``1.2``, ``1.3``, etc.
+
+   hashes
+      (array of bytestring) Indicates support for hashing algorithms that are
+      used to ensure content integrity. Values include ``sha1``, ``sha256``,
+      etc.
+
+changesetdata
+-------------
+
+Obtain various data related to changesets.
+
+The command accepts the following arguments:
+
+revisions
+   (array of maps) Specifies revisions whose data is being requested. Each
+   value in the array is a map describing revisions. See the
+   *Revisions Specifiers* section below for the format of this map.
+
+   Data will be sent for the union of all revisions resolved by all
+   revision specifiers.
+
+   Only revision specifiers operating on changeset revisions are allowed.
+
+fields
+   (set of bytestring) Which data associated with changelog revisions to
+   fetch. The following values are recognized:
+
+   bookmarks
+      Bookmarks associated with a revision.
+
+   parents
+      Parent revisions.
+
+   phase
+      The phase state of a revision.
+
+   revision
+      The raw, revision data for the changelog entry. The hash of this data
+      will match the revision's node value.
+
+The response bytestream starts with a CBOR map describing the data that follows.
+This map has the following bytestring keys:
+
+totalitems
+   (unsigned integer) Total number of changelog revisions whose data is being
+   transferred. This maps to the set of revisions in the requested node
+   range, not the total number of records that follow (see below for why).
+
+Following the map header is a series of 0 or more CBOR values. If values
+are present, the first value will always be a map describing a single changeset
+revision.
+
+If the ``fieldsfollowing`` key is present, the map will immediately be followed
+by N CBOR bytestring values, where N is the number of elements in
+``fieldsfollowing``. Each bytestring value corresponds to a field denoted
+by ``fieldsfollowing``.
+
+Following the optional bytestring field values is the next revision descriptor
+map, or end of stream.
+
+Each revision descriptor map has the following bytestring keys:
+
+node
+   (bytestring) The node value for this revision. This is the SHA-1 hash of
+   the raw revision data.
+
+bookmarks (optional)
+   (array of bytestrings) Bookmarks attached to this revision. Only present
+   if ``bookmarks`` data is being requested and the revision has bookmarks
+   attached.
+
+fieldsfollowing (optional)
+   (array of 2-array) Denotes what fields immediately follow this map. Each
+   value is an array with 2 elements: the bytestring field name and an unsigned
+   integer describing the length of the data, in bytes.
+
+   If this key isn't present, no special fields will follow this map.
+
+   The following fields may be present:
+
+   revision
+      Raw, revision data for the changelog entry. Contains a serialized form
+      of the changeset data, including the author, date, commit message, set
+      of changed files, manifest node, and other metadata.
+
+      Only present if the ``revision`` field was requested.
+
+parents (optional)
+   (array of bytestrings) The nodes representing the parent revisions of this
+   revision. Only present if ``parents`` data is being requested.
+
+phase (optional)
+   (bytestring) The phase that a revision is in. Recognized values are
+   ``secret``, ``draft``, and ``public``. Only present if ``phase`` data
+   is being requested.
+
+The set of changeset revisions emitted may not match the exact set of
+changesets requested. Furthermore, the set of keys present on each
+map may vary. This is to facilitate emitting changeset updates as well
+as new revisions.
+
+For example, if the request wants ``phase`` and ``revision`` data,
+the response may contain entries for each changeset in the common nodes
+set with the ``phase`` key and without the ``revision`` key in order
+to reflect a phase-only update.
+
+TODO support different revision selection mechanisms (e.g. non-public, specific
+revisions)
+TODO support different hash "namespaces" for revisions (e.g. sha-1 versus other)
+TODO support emitting obsolescence data
+TODO support filtering based on relevant paths (narrow clone)
+TODO support hgtagsfnodes cache / tags data
+TODO support branch heads cache
+TODO consider unify query mechanism. e.g. as an array of "query descriptors"
+rather than a set of top-level arguments that have semantics when combined.
+
+filedata
+--------
+
+Obtain various data related to an individual tracked file.
+
+The command accepts the following arguments:
+
+fields
+   (set of bytestring) Which data associated with a file to fetch.
+   The following values are recognized:
+
+   linknode
+      The changeset node introducing this revision.
+
+   parents
+      Parent nodes for the revision.
+
+   revision
+      The raw revision data for a file.
+
+haveparents
+   (bool) Whether the client has the parent revisions of all requested
+   nodes. If set, the server may emit revision data as deltas against
+   any parent revision. If not set, the server MUST only emit deltas for
+   revisions previously emitted by this command.
+
+   False is assumed in the absence of any value.
+
+nodes
+   (array of bytestrings) File nodes whose data to retrieve.
+
+path
+   (bytestring) Path of the tracked file whose data to retrieve.
+
+TODO allow specifying revisions via alternate means (such as from
+changeset revisions or ranges)
+
+The response bytestream starts with a CBOR map describing the data that
+follows. It has the following bytestream keys:
+
+totalitems
+   (unsigned integer) Total number of file revisions whose data is
+   being returned.
+
+Following the map header is a series of 0 or more CBOR values. If values
+are present, the first value will always be a map describing a single changeset
+revision.
+
+If the ``fieldsfollowing`` key is present, the map will immediately be followed
+by N CBOR bytestring values, where N is the number of elements in
+``fieldsfollowing``. Each bytestring value corresponds to a field denoted
+by ``fieldsfollowing``.
+
+Following the optional bytestring field values is the next revision descriptor
+map, or end of stream.
+
+Each revision descriptor map has the following bytestring keys:
+
+Each map has the following bytestring keys:
+
+node
+   (bytestring) The node of the file revision whose data is represented.
+
+deltabasenode
+   (bytestring) Node of the file revision the following delta is against.
+
+   Only present if the ``revision`` field is requested and delta data
+   follows this map.
+
+fieldsfollowing
+   (array of 2-array) Denotes extra bytestring fields that following this map.
+   See the documentation for ``changesetdata`` for semantics.
+
+   The following named fields may be present:
+
+   ``delta``
+      The delta data to use to construct the fulltext revision.
+
+      Only present if the ``revision`` field is requested and a delta is
+      being emitted. The ``deltabasenode`` top-level key will also be
+      present if this field is being emitted.
+
+   ``revision``
+      The fulltext revision data for this manifest. Only present if the
+      ``revision`` field is requested and a fulltext revision is being emitted.
+
+parents
+   (array of bytestring) The nodes of the parents of this file revision.
+
+   Only present if the ``parents`` field is requested.
+
+When ``revision`` data is requested, the server chooses to emit either fulltext
+revision data or a delta. What the server decides can be inferred by looking
+for the presence of the ``delta`` or ``revision`` keys in the
+``fieldsfollowing`` array.
+
+filesdata
+---------
+
+Obtain various data related to multiple tracked files for specific changesets.
+
+This command is similar to ``filedata`` with the main difference being that
+individual requests operate on multiple file paths. This allows clients to
+request data for multiple paths by issuing a single command.
+
+The command accepts the following arguments:
+
+fields
+   (set of bytestring) Which data associated with a file to fetch.
+   The following values are recognized:
+
+   linknode
+      The changeset node introducing this revision.
+
+   parents
+      Parent nodes for the revision.
+
+   revision
+      The raw revision data for a file.
+
+haveparents
+   (bool) Whether the client has the parent revisions of all requested
+   nodes.
+
+pathfilter
+   (map) Defines a filter that determines what file paths are relevant.
+
+   See the *Path Filters* section for more.
+
+   If the argument is omitted, it is assumed that all paths are relevant.
+
+revisions
+   (array of maps) Specifies revisions whose data is being requested. Each value
+   in the array is a map describing revisions. See the *Revisions Specifiers*
+   section below for the format of this map.
+
+   Data will be sent for the union of all revisions resolved by all revision
+   specifiers.
+
+   Only revision specifiers operating on changeset revisions are allowed.
+
+The response bytestream starts with a CBOR map describing the data that
+follows. This map has the following bytestring keys:
+
+totalpaths
+   (unsigned integer) Total number of paths whose data is being transferred.
+
+totalitems
+   (unsigned integer) Total number of file revisions whose data is being
+   transferred.
+
+Following the map header are 0 or more sequences of CBOR values. Each sequence
+represents data for a specific tracked path. Each sequence begins with a CBOR
+map describing the file data that follows. Following that map is N CBOR values
+describing file revision data. The format of this data is identical to that
+returned by the ``filedata`` command.
+
+Each sequence's map header has the following bytestring keys:
+
+path
+   (bytestring) The tracked file path whose data follows.
+
+totalitems
+   (unsigned integer) Total number of file revisions whose data is being
+   transferred.
+
+The ``haveparents`` argument has significant implications on the data
+transferred.
+
+When ``haveparents`` is true, the command MAY only emit data for file
+revisions introduced by the set of changeset revisions whose data is being
+requested. In other words, the command may assume that all file revisions
+for all relevant paths for ancestors of the requested changeset revisions
+are present on the receiver.
+
+When ``haveparents`` is false, the command MUST assume that the receiver
+has no file revisions data. This means that all referenced file revisions
+in the queried set of changeset revisions will be sent.
+
+TODO we'll probably want a more complicated mechanism for the client to
+specify which ancestor revisions are known.
+TODO we may want to make linknodes an array so multiple changesets can be
+marked as introducing a file revision, since this can occur with e.g. hidden
+changesets.
+
+heads
+-----
+
+Obtain DAG heads in the repository.
+
+The command accepts the following arguments:
+
+publiconly (optional)
+   (boolean) If set, operate on the DAG for public phase changesets only.
+   Non-public (i.e. draft) phase DAG heads will not be returned.
+
+The response is a CBOR array of bytestrings defining changeset nodes
+of DAG heads. The array can be empty if the repository is empty or no
+changesets satisfied the request.
+
+TODO consider exposing phase of heads in response
+
+known
+-----
+
+Determine whether a series of changeset nodes is known to the server.
+
+The command accepts the following arguments:
+
+nodes
+   (array of bytestrings) List of changeset nodes whose presence to
+   query.
+
+The response is a bytestring where each byte contains a 0 or 1 for the
+corresponding requested node at the same index.
+
+TODO use a bit array for even more compact response
+
+listkeys
+--------
+
+List values in a specified ``pushkey`` namespace.
+
+The command receives the following arguments:
+
+namespace
+   (bytestring) Pushkey namespace to query.
+
+The response is a map with bytestring keys and values.
+
+TODO consider using binary to represent nodes in certain pushkey namespaces.
+
+lookup
+------
+
+Try to resolve a value to a changeset revision.
+
+Unlike ``known`` which operates on changeset nodes, lookup operates on
+node fragments and other names that a user may use.
+
+The command receives the following arguments:
+
+key
+   (bytestring) Value to try to resolve.
+
+On success, returns a bytestring containing the resolved node.
+
+manifestdata
+------------
+
+Obtain various data related to manifests (which are lists of files in
+a revision).
+
+The command accepts the following arguments:
+
+fields
+   (set of bytestring) Which data associated with manifests to fetch.
+   The following values are recognized:
+
+   parents
+      Parent nodes for the manifest.
+
+   revision
+      The raw revision data for the manifest.
+
+haveparents
+   (bool) Whether the client has the parent revisions of all requested
+   nodes. If set, the server may emit revision data as deltas against
+   any parent revision. If not set, the server MUST only emit deltas for
+   revisions previously emitted by this command.
+
+   False is assumed in the absence of any value.
+
+nodes
+   (array of bytestring) Manifest nodes whose data to retrieve.
+
+tree
+   (bytestring) Path to manifest to retrieve. The empty bytestring represents
+   the root manifest. All other values represent directories/trees within
+   the repository.
+
+TODO allow specifying revisions via alternate means (such as from changeset
+revisions or ranges)
+TODO consider recursive expansion of manifests (with path filtering for
+narrow use cases)
+
+The response bytestream starts with a CBOR map describing the data that
+follows. It has the following bytestring keys:
+
+totalitems
+   (unsigned integer) Total number of manifest revisions whose data is
+   being returned.
+
+Following the map header is a series of 0 or more CBOR values. If values
+are present, the first value will always be a map describing a single manifest
+revision.
+
+If the ``fieldsfollowing`` key is present, the map will immediately be followed
+by N CBOR bytestring values, where N is the number of elements in
+``fieldsfollowing``. Each bytestring value corresponds to a field denoted
+by ``fieldsfollowing``.
+
+Following the optional bytestring field values is the next revision descriptor
+map, or end of stream.
+
+Each revision descriptor map has the following bytestring keys:
+
+node
+   (bytestring) The node of the manifest revision whose data is represented.
+
+deltabasenode
+   (bytestring) The node that the delta representation of this revision is
+   computed against. Only present if the ``revision`` field is requested and
+   a delta is being emitted.
+
+fieldsfollowing
+   (array of 2-array) Denotes extra bytestring fields that following this map.
+   See the documentation for ``changesetdata`` for semantics.
+
+   The following named fields may be present:
+
+   ``delta``
+      The delta data to use to construct the fulltext revision.
+
+      Only present if the ``revision`` field is requested and a delta is
+      being emitted. The ``deltabasenode`` top-level key will also be
+      present if this field is being emitted.
+
+   ``revision``
+      The fulltext revision data for this manifest. Only present if the
+      ``revision`` field is requested and a fulltext revision is being emitted.
+
+parents
+   (array of bytestring) The nodes of the parents of this manifest revision.
+   Only present if the ``parents`` field is requested.
+
+When ``revision`` data is requested, the server chooses to emit either fulltext
+revision data or a delta. What the server decides can be inferred by looking
+for the presence of ``delta`` or ``revision`` in the ``fieldsfollowing`` array.
+
+Servers MAY advertise the following extra fields in the capabilities
+descriptor for this command:
+
+recommendedbatchsize
+   (unsigned integer) Number of revisions the server recommends as a batch
+   query size. If defined, clients needing to issue multiple ``manifestdata``
+   commands to obtain needed data SHOULD construct their commands to have
+   this many revisions per request.
+
+pushkey
+-------
+
+Set a value using the ``pushkey`` protocol.
+
+The command receives the following arguments:
+
+namespace
+   (bytestring) Pushkey namespace to operate on.
+key
+   (bytestring) The pushkey key to set.
+old
+   (bytestring) Old value for this key.
+new
+   (bytestring) New value for this key.
+
+TODO consider using binary to represent nodes is certain pushkey namespaces.
+TODO better define response type and meaning.
+
+rawstorefiledata
+----------------
+
+Allows retrieving raw files used to store repository data.
+
+The command accepts the following arguments:
+
+files
+   (array of bytestring) Describes the files that should be retrieved.
+
+   The meaning of values in this array is dependent on the storage backend used
+   by the server.
+
+The response bytestream starts with a CBOR map describing the data that follows.
+This map has the following bytestring keys:
+
+filecount
+   (unsigned integer) Total number of files whose data is being transferred.
+
+totalsize
+   (unsigned integer) Total size in bytes of files data that will be
+   transferred. This is file on-disk size and not wire size.
+
+Following the map header are N file segments. Each file segment consists of a
+CBOR map followed by an indefinite length bytestring. Each map has the following
+bytestring keys:
+
+location
+   (bytestring) Denotes the location in the repository where the file should be
+   written. Values map to vfs instances to use for the writing.
+
+path
+   (bytestring) Path of file being transferred. Path is the raw store
+   path and can be any sequence of bytes that can be tracked in a Mercurial
+   manifest.
+
+size
+   (unsigned integer) Size of file data. This will be the final written
+   file size. The total size of the data that follows the CBOR map
+   will be greater due to encoding overhead of CBOR.
+
+TODO this command is woefully incomplete. If we are to move forward with a
+stream clone analog, it needs a lot more metadata around how to describe what
+files are available to retrieve, other semantics.
+
+Revision Specifiers
+===================
+
+A *revision specifier* is a map that evaluates to a set of revisions.
+
+A *revision specifier* has a ``type`` key that defines the revision
+selection type to perform. Other keys in the map are used in a
+type-specific manner.
+
+The following types are defined:
+
+changesetexplicit
+   An explicit set of enumerated changeset revisions.
+
+   The ``nodes`` key MUST contain an array of full binary nodes, expressed
+   as bytestrings.
+
+changesetexplicitdepth
+   Like ``changesetexplicit``, but contains a ``depth`` key defining the
+   unsigned integer number of ancestor revisions to also resolve. For each
+   value in ``nodes``, DAG ancestors will be walked until up to N total
+   revisions from that ancestry walk are present in the final resolved set.
+
+changesetdagrange
+   Defines revisions via a DAG range of changesets on the changelog.
+
+   The ``roots`` key MUST contain an array of full, binary node values
+   representing the *root* revisions.
+
+   The ``heads`` key MUST contain an array of full, binary nodes values
+   representing the *head* revisions.
+
+   The DAG range between ``roots`` and ``heads`` will be resolved and all
+   revisions between will be used. Nodes in ``roots`` are not part of the
+   resolved set. Nodes in ``heads`` are. The ``roots`` array may be empty.
+   The ``heads`` array MUST be defined.
+
+Path Filters
+============
+
+Various commands accept a *path filter* argument that defines the set of file
+paths relevant to the request.
+
+A *path filter* is defined as a map with the bytestring keys ``include`` and
+``exclude``. Each is an array of bytestring values. Each value defines a pattern
+rule (see :hg:`help patterns`) that is used to match file paths.
+
+A path matches the path filter if it is matched by a rule in the ``include``
+set but doesn't match a rule in the ``exclude`` set. In other words, a path
+matcher takes the union of all ``include`` patterns and then substracts the
+union of all ``exclude`` patterns.
+
+Patterns MUST be prefixed with their pattern type. Only the following pattern
+types are allowed: ``path:``, ``rootfilesin:``.
+
+If the ``include`` key is omitted, it is assumed that all paths are
+relevant. The patterns from ``exclude`` will still be used, if defined.
+
+An example value is ``path:tests/foo``, which would match a file named
+``tests/foo`` or a directory ``tests/foo`` and all files under it.
--- a/mercurial/help/merge-tools.txt	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/help/merge-tools.txt	Mon Oct 22 14:46:06 2018 -0400
@@ -36,8 +36,9 @@
 
 .. internaltoolsmarker
 
-Internal tools are always available and do not require a GUI but will by default
-not handle symlinks or binary files.
+Internal tools are always available and do not require a GUI but will
+by default not handle symlinks or binary files. See next section for
+detail about "actual capabilities" described above.
 
 Choosing a merge tool
 =====================
@@ -54,8 +55,7 @@
 
 3. If the filename of the file to be merged matches any of the patterns in the
    merge-patterns configuration section, the first usable merge tool
-   corresponding to a matching pattern is used. Here, binary capabilities of the
-   merge tool are not considered.
+   corresponding to a matching pattern is used.
 
 4. If ui.merge is set it will be considered next. If the value is not the name
    of a configured tool, the specified value is used and must be executable by
@@ -72,6 +72,30 @@
 
 8. Otherwise, ``:prompt`` is used.
 
+For historical reason, Mercurial treats merge tools as below while
+examining rules above.
+
+==== =============== ====== =======
+step specified via   binary symlink
+==== =============== ====== =======
+1.   --tool          o/o    o/o
+2.   HGMERGE         o/o    o/o
+3.   merge-patterns  o/o(*) x/?(*)
+4.   ui.merge        x/?(*) x/?(*)
+==== =============== ====== =======
+
+Each capability column indicates Mercurial behavior for
+internal/external merge tools at examining each rule.
+
+- "o": "assume that a tool has capability"
+- "x": "assume that a tool does not have capability"
+- "?": "check actual capability of a tool"
+
+If ``merge.strict-capability-check`` configuration is true, Mercurial
+checks capabilities of merge tools strictly in (*) cases above (= each
+capability column becomes "?/?"). It is false by default for backward
+compatibility.
+
 .. note::
 
    After selecting a merge program, Mercurial will by default attempt
--- a/mercurial/hg.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/hg.py	Mon Oct 22 14:46:06 2018 -0400
@@ -35,6 +35,7 @@
     logcmdutil,
     logexchange,
     merge as mergemod,
+    narrowspec,
     node,
     phases,
     scmutil,
@@ -48,10 +49,6 @@
     vfs as vfsmod,
 )
 
-from .utils import (
-    stringutil,
-)
-
 release = lock.release
 
 # shared features
@@ -158,35 +155,49 @@
 wirepeersetupfuncs = []
 
 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
-                intents=None):
+                intents=None, createopts=None):
     """return a repository object for the specified path"""
-    obj = _peerlookup(path).instance(ui, path, create, intents=intents)
+    obj = _peerlookup(path).instance(ui, path, create, intents=intents,
+                                     createopts=createopts)
     ui = getattr(obj, "ui", ui)
+    if ui.configbool('devel', 'debug.extensions'):
+        log = lambda msg, *values: ui.debug('debug.extensions: ',
+            msg % values, label='debug.extensions')
+    else:
+        log = lambda *a, **kw: None
     for f in presetupfuncs or []:
         f(ui, obj)
-    for name, module in extensions.extensions(ui):
-        hook = getattr(module, 'reposetup', None)
-        if hook:
-            hook(ui, obj)
+    log('- executing reposetup hooks\n')
+    with util.timedcm('all reposetup') as allreposetupstats:
+        for name, module in extensions.extensions(ui):
+            log('  - running reposetup for %s\n' % (name,))
+            hook = getattr(module, 'reposetup', None)
+            if hook:
+                with util.timedcm('reposetup %r', name) as stats:
+                    hook(ui, obj)
+                log('  > reposetup for %r took %s\n', name, stats)
+    log('> all reposetup took %s\n', allreposetupstats)
     if not obj.local():
         for f in wirepeersetupfuncs:
             f(ui, obj)
     return obj
 
-def repository(ui, path='', create=False, presetupfuncs=None, intents=None):
+def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
+               createopts=None):
     """return a repository object for the specified path"""
     peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
-                       intents=intents)
+                       intents=intents, createopts=createopts)
     repo = peer.local()
     if not repo:
         raise error.Abort(_("repository '%s' is not local") %
                          (path or peer.url()))
     return repo.filtered('visible')
 
-def peer(uiorrepo, opts, path, create=False, intents=None):
+def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
     '''return a repository peer for the specified path'''
     rui = remoteui(uiorrepo, opts)
-    return _peerorrepo(rui, path, create, intents=intents).peer()
+    return _peerorrepo(rui, path, create, intents=intents,
+                       createopts=createopts).peer()
 
 def defaultdest(source):
     '''return default destination of clone if none is given
@@ -246,45 +257,19 @@
         rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
     else:
         srcrepo = source.local()
-        origsource = source = srcrepo.url()
         checkout = None
 
-    sharedpath = srcrepo.sharedpath # if our source is already sharing
-
-    destwvfs = vfsmod.vfs(dest, realpath=True)
-    destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
-
-    if destvfs.lexists():
-        raise error.Abort(_('destination already exists'))
-
-    if not destwvfs.isdir():
-        destwvfs.mkdir()
-    destvfs.makedir()
+    shareditems = set()
+    if bookmarks:
+        shareditems.add(sharedbookmarks)
 
-    requirements = ''
-    try:
-        requirements = srcrepo.vfs.read('requires')
-    except IOError as inst:
-        if inst.errno != errno.ENOENT:
-            raise
+    r = repository(ui, dest, create=True, createopts={
+        'sharedrepo': srcrepo,
+        'sharedrelative': relative,
+        'shareditems': shareditems,
+    })
 
-    if relative:
-        try:
-            sharedpath = os.path.relpath(sharedpath, destvfs.base)
-            requirements += 'relshared\n'
-        except (IOError, ValueError) as e:
-            # ValueError is raised on Windows if the drive letters differ on
-            # each path
-            raise error.Abort(_('cannot calculate relative path'),
-                              hint=stringutil.forcebytestr(e))
-    else:
-        requirements += 'shared\n'
-
-    destvfs.write('requires', requirements)
-    destvfs.write('sharedpath', sharedpath)
-
-    r = repository(ui, destwvfs.base)
-    postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
+    postshare(srcrepo, r, defaultpath=defaultpath)
     _postshareupdate(r, update, checkout=checkout)
     return r
 
@@ -292,6 +277,11 @@
     """convert a shared repository to a normal one
 
     Copy the store data to the repo and remove the sharedpath data.
+
+    Returns a new repository object representing the unshared repository.
+
+    The passed repository object is not usable after this function is
+    called.
     """
 
     destlock = lock = None
@@ -314,17 +304,23 @@
         destlock and destlock.release()
         lock and lock.release()
 
-    # update store, spath, svfs and sjoin of repo
-    repo.unfiltered().__init__(repo.baseui, repo.root)
+    # Removing share changes some fundamental properties of the repo instance.
+    # So we instantiate a new repo object and operate on it rather than
+    # try to keep the existing repo usable.
+    newrepo = repository(repo.baseui, repo.root, create=False)
 
     # TODO: figure out how to access subrepos that exist, but were previously
     #       removed from .hgsub
-    c = repo['.']
+    c = newrepo['.']
     subs = c.substate
     for s in sorted(subs):
         c.sub(s).unshare()
 
-def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
+    localrepo.poisonrepository(repo)
+
+    return newrepo
+
+def postshare(sourcerepo, destrepo, defaultpath=None):
     """Called after a new shared repo is created.
 
     The new repo only has a requirements file and pointer to the source.
@@ -339,10 +335,6 @@
                     'default = %s\n')
         destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
 
-    with destrepo.wlock():
-        if bookmarks:
-            destrepo.vfs.write('shared', sharedbookmarks + '\n')
-
 def _postshareupdate(repo, update, checkout=None):
     """Maybe perform a working directory update after a shared repo is created.
 
@@ -373,31 +365,30 @@
     try:
         hardlink = None
         topic = _('linking') if hardlink else _('copying')
-        progress = ui.makeprogress(topic)
-        num = 0
-        srcpublishing = srcrepo.publishing()
-        srcvfs = vfsmod.vfs(srcrepo.sharedpath)
-        dstvfs = vfsmod.vfs(destpath)
-        for f in srcrepo.store.copylist():
-            if srcpublishing and f.endswith('phaseroots'):
-                continue
-            dstbase = os.path.dirname(f)
-            if dstbase and not dstvfs.exists(dstbase):
-                dstvfs.mkdir(dstbase)
-            if srcvfs.exists(f):
-                if f.endswith('data'):
-                    # 'dstbase' may be empty (e.g. revlog format 0)
-                    lockfile = os.path.join(dstbase, "lock")
-                    # lock to avoid premature writing to the target
-                    destlock = lock.lock(dstvfs, lockfile)
-                hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
-                                             hardlink, progress)
-                num += n
-        if hardlink:
-            ui.debug("linked %d files\n" % num)
-        else:
-            ui.debug("copied %d files\n" % num)
-        progress.complete()
+        with ui.makeprogress(topic) as progress:
+            num = 0
+            srcpublishing = srcrepo.publishing()
+            srcvfs = vfsmod.vfs(srcrepo.sharedpath)
+            dstvfs = vfsmod.vfs(destpath)
+            for f in srcrepo.store.copylist():
+                if srcpublishing and f.endswith('phaseroots'):
+                    continue
+                dstbase = os.path.dirname(f)
+                if dstbase and not dstvfs.exists(dstbase):
+                    dstvfs.mkdir(dstbase)
+                if srcvfs.exists(f):
+                    if f.endswith('data'):
+                        # 'dstbase' may be empty (e.g. revlog format 0)
+                        lockfile = os.path.join(dstbase, "lock")
+                        # lock to avoid premature writing to the target
+                        destlock = lock.lock(dstvfs, lockfile)
+                    hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
+                                                 hardlink, progress)
+                    num += n
+            if hardlink:
+                ui.debug("linked %d files\n" % num)
+            else:
+                ui.debug("copied %d files\n" % num)
         return destlock
     except: # re-raises
         release(destlock)
@@ -487,7 +478,8 @@
         util.copyfile(srcbranchcache, dstbranchcache)
 
 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
-          update=True, stream=False, branch=None, shareopts=None):
+          update=True, stream=False, branch=None, shareopts=None,
+          storeincludepats=None, storeexcludepats=None, depth=None):
     """Make a copy of an existing repository.
 
     Create a copy of an existing repository in a new directory.  The
@@ -529,6 +521,13 @@
     repository. "identity" means the name is derived from the node of the first
     changeset in the repository. "remote" means the name is derived from the
     remote's path/URL. Defaults to "identity."
+
+    storeincludepats and storeexcludepats: sets of file patterns to include and
+    exclude in the repository copy, respectively. If not defined, all files
+    will be included (a "full" clone). Otherwise a "narrow" clone containing
+    only the requested files will be performed. If ``storeincludepats`` is not
+    defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
+    ``path:.``. If both are empty sets, no files will be cloned.
     """
 
     if isinstance(source, bytes):
@@ -561,6 +560,44 @@
         elif destvfs.listdir():
             raise error.Abort(_("destination '%s' is not empty") % dest)
 
+    createopts = {}
+    narrow = False
+
+    if storeincludepats is not None:
+        narrowspec.validatepatterns(storeincludepats)
+        narrow = True
+
+    if storeexcludepats is not None:
+        narrowspec.validatepatterns(storeexcludepats)
+        narrow = True
+
+    if narrow:
+        # Include everything by default if only exclusion patterns defined.
+        if storeexcludepats and not storeincludepats:
+            storeincludepats = {'path:.'}
+
+        createopts['narrowfiles'] = True
+
+    if depth:
+        createopts['shallowfilestore'] = True
+
+    if srcpeer.capable(b'lfs-serve'):
+        # Repository creation honors the config if it disabled the extension, so
+        # we can't just announce that lfs will be enabled.  This check avoids
+        # saying that lfs will be enabled, and then saying it's an unknown
+        # feature.  The lfs creation option is set in either case so that a
+        # requirement is added.  If the extension is explicitly disabled but the
+        # requirement is set, the clone aborts early, before transferring any
+        # data.
+        createopts['lfs'] = True
+
+        if extensions.disabledext('lfs'):
+            ui.status(_('(remote is using large file support (lfs), but it is '
+                        'explicitly disabled in the local configuration)\n'))
+        else:
+            ui.status(_('(remote is using large file support (lfs); lfs will '
+                        'be enabled for this repository)\n'))
+
     shareopts = shareopts or {}
     sharepool = shareopts.get('pool')
     sharenamemode = shareopts.get('mode')
@@ -592,6 +629,11 @@
             raise error.Abort(_('unknown share naming mode: %s') %
                               sharenamemode)
 
+        # TODO this is a somewhat arbitrary restriction.
+        if narrow:
+            ui.status(_('(pooled storage not supported for narrow clones)\n'))
+            sharepath = None
+
         if sharepath:
             return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
                                   dest, pull=pull, rev=revs, update=update,
@@ -612,6 +654,10 @@
             and not phases.hassecret(srcrepo)):
             copy = not pull and not revs
 
+        # TODO this is a somewhat arbitrary restriction.
+        if narrow:
+            copy = False
+
         if copy:
             try:
                 # we use a lock here because if we race with commit, we
@@ -626,7 +672,7 @@
             srcrepo.hook('preoutgoing', throw=True, source='clone')
             hgdir = os.path.realpath(os.path.join(dest, ".hg"))
             if not os.path.exists(dest):
-                os.mkdir(dest)
+                util.makedirs(dest)
             else:
                 # only clean up directories we create ourselves
                 cleandir = hgdir
@@ -658,8 +704,9 @@
                           node=node.hex(node.nullid))
         else:
             try:
-                destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
-                                # only pass ui when no srcrepo
+                # only pass ui when no srcrepo
+                destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
+                                createopts=createopts)
             except OSError as inst:
                 if inst.errno == errno.EEXIST:
                     cleandir = None
@@ -687,6 +734,10 @@
                 revs = None
             local = destpeer.local()
             if local:
+                if narrow:
+                    with local.lock():
+                        local.setnarrowpats(storeincludepats, storeexcludepats)
+
                 u = util.url(abspath)
                 defaulturl = bytes(u)
                 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
@@ -699,8 +750,17 @@
                 overrides = {('ui', 'quietbookmarkmove'): True}
                 with local.ui.configoverride(overrides, 'clone'):
                     exchange.pull(local, srcpeer, revs,
-                                  streamclonerequested=stream)
+                                  streamclonerequested=stream,
+                                  includepats=storeincludepats,
+                                  excludepats=storeexcludepats,
+                                  depth=depth)
             elif srcrepo:
+                # TODO lift restriction once exchange.push() accepts narrow
+                # push.
+                if narrow:
+                    raise error.Abort(_('narrow clone not available for '
+                                        'remote destinations'))
+
                 exchange.push(srcrepo, destpeer, revs=revs,
                               bookmarks=srcrepo._bookmarks.keys())
             else:
@@ -789,7 +849,7 @@
     When overwrite is set, changes are clobbered, merged else
 
     returns stats (see pydoc mercurial.merge.applyupdates)"""
-    return mergemod.update(repo, node, False, overwrite,
+    return mergemod.update(repo, node, branchmerge=False, force=overwrite,
                            labels=['working copy', 'destination'],
                            updatecheck=updatecheck)
 
@@ -892,8 +952,8 @@
     """Branch merge with node, resolving changes. Return true if any
     unresolved conflicts."""
     if not abort:
-        stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
-                                labels=labels)
+        stats = mergemod.update(repo, node, branchmerge=True, force=force,
+                                mergeforce=mergeforce, labels=labels)
     else:
         ms = mergemod.mergestate.read(repo)
         if ms.active():
--- a/mercurial/hgweb/common.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/hgweb/common.py	Mon Oct 22 14:46:06 2018 -0400
@@ -182,7 +182,8 @@
             break
     try:
         os.stat(path)
-        ct = mimetypes.guess_type(pycompat.fsdecode(path))[0] or "text/plain"
+        ct = pycompat.sysbytes(
+            mimetypes.guess_type(pycompat.fsdecode(path))[0] or r"text/plain")
         with open(path, 'rb') as fh:
             data = fh.read()
 
--- a/mercurial/hgweb/hgweb_mod.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/hgweb/hgweb_mod.py	Mon Oct 22 14:46:06 2018 -0400
@@ -140,11 +140,6 @@
         if not staticurl.endswith('/'):
             staticurl += '/'
 
-        # some functions for the templater
-
-        def motd(**map):
-            yield self.config('web', 'motd')
-
         # figure out which style to use
 
         vars = {}
@@ -177,12 +172,16 @@
             'urlbase': req.advertisedbaseurl,
             'repo': self.reponame,
             'encoding': encoding.encoding,
-            'motd': motd,
             'sessionvars': sessionvars,
             'pathdef': makebreadcrumb(req.apppath),
             'style': style,
             'nonce': self.nonce,
         }
+        templatekeyword = registrar.templatekeyword(defaults)
+        @templatekeyword('motd', requires=())
+        def motd(context, mapping):
+            yield self.config('web', 'motd')
+
         tres = formatter.templateresources(self.repo.ui, self.repo)
         tmpl = templater.templater.frommapfile(mapfile,
                                                filters=filters,
@@ -436,10 +435,14 @@
             res.status = '404 Not Found'
             res.headers['Content-Type'] = ctype
             return rctx.sendtemplate('error', error=msg)
-        except (error.RepoError, error.RevlogError) as e:
+        except (error.RepoError, error.StorageError) as e:
             res.status = '500 Internal Server Error'
             res.headers['Content-Type'] = ctype
             return rctx.sendtemplate('error', error=pycompat.bytestr(e))
+        except error.Abort as e:
+            res.status = '403 Forbidden'
+            res.headers['Content-Type'] = ctype
+            return rctx.sendtemplate('error', error=pycompat.bytestr(e))
         except ErrorResponse as e:
             for k, v in e.headers:
                 res.headers[k] = v
--- a/mercurial/hgweb/hgwebdir_mod.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/hgweb/hgwebdir_mod.py	Mon Oct 22 14:46:06 2018 -0400
@@ -33,6 +33,7 @@
     hg,
     profiling,
     pycompat,
+    registrar,
     scmutil,
     templater,
     templateutil,
@@ -382,8 +383,7 @@
                     fname = virtual[7:]
                 else:
                     fname = req.qsparams['static']
-                static = self.ui.config("web", "static", None,
-                                        untrusted=False)
+                static = self.ui.config("web", "static", untrusted=False)
                 if not static:
                     tp = self.templatepath or templater.templatepaths()
                     if isinstance(tp, str):
@@ -495,12 +495,6 @@
 
     def templater(self, req, nonce):
 
-        def motd(**map):
-            if self.motd is not None:
-                yield self.motd
-            else:
-                yield config('web', 'motd')
-
         def config(section, name, default=uimod._unset, untrusted=True):
             return self.ui.config(section, name, default, untrusted)
 
@@ -520,7 +514,6 @@
 
         defaults = {
             "encoding": encoding.encoding,
-            "motd": motd,
             "url": req.apppath + '/',
             "logourl": logourl,
             "logoimg": logoimg,
@@ -529,5 +522,13 @@
             "style": style,
             "nonce": nonce,
         }
+        templatekeyword = registrar.templatekeyword(defaults)
+        @templatekeyword('motd', requires=())
+        def motd(context, mapping):
+            if self.motd is not None:
+                yield self.motd
+            else:
+                yield config('web', 'motd')
+
         tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
         return tmpl
--- a/mercurial/hgweb/server.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/hgweb/server.py	Mon Oct 22 14:46:06 2018 -0400
@@ -101,8 +101,8 @@
         try:
             self.do_write()
         except Exception:
-            self._start_response("500 Internal Server Error", [])
-            self._write("Internal Server Error")
+            self._start_response(r"500 Internal Server Error", [])
+            self._write(b"Internal Server Error")
             self._done()
             tb = r"".join(traceback.format_exception(*sys.exc_info()))
             # We need a native-string newline to poke in the log
@@ -174,8 +174,12 @@
         env[r'wsgi.errors'] = _error_logger(self)
         env[r'wsgi.multithread'] = isinstance(self.server,
                                              socketserver.ThreadingMixIn)
-        env[r'wsgi.multiprocess'] = isinstance(self.server,
-                                              socketserver.ForkingMixIn)
+        if util.safehasattr(socketserver, 'ForkingMixIn'):
+            env[r'wsgi.multiprocess'] = isinstance(self.server,
+                                                   socketserver.ForkingMixIn)
+        else:
+            env[r'wsgi.multiprocess'] = False
+
         env[r'wsgi.run_once'] = 0
 
         wsgiref.validate.check_environ(env)
@@ -201,12 +205,12 @@
         self._chunked = False
         for h in self.saved_headers:
             self.send_header(*h)
-            if h[0].lower() == 'content-length':
+            if h[0].lower() == r'content-length':
                 self.length = int(h[1])
         if (self.length is None and
             saved_status[0] != common.HTTP_NOT_MODIFIED):
             self._chunked = (not self.close_connection and
-                             self.request_version == "HTTP/1.1")
+                             self.request_version == r'HTTP/1.1')
             if self._chunked:
                 self.send_header(r'Transfer-Encoding', r'chunked')
             else:
@@ -219,7 +223,7 @@
         code, msg = http_status.split(None, 1)
         code = int(code)
         self.saved_status = http_status
-        bad_headers = ('connection', 'transfer-encoding')
+        bad_headers = (r'connection', r'transfer-encoding')
         self.saved_headers = [h for h in headers
                               if h[0].lower() not in bad_headers]
         return self._write
--- a/mercurial/hgweb/webcommands.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/hgweb/webcommands.py	Mon Oct 22 14:46:06 2018 -0400
@@ -123,12 +123,15 @@
     text = fctx.data()
     mt = 'application/binary'
     if guessmime:
-        mt = mimetypes.guess_type(path)[0]
+        mt = mimetypes.guess_type(pycompat.fsdecode(path))[0]
         if mt is None:
             if stringutil.binary(text):
                 mt = 'application/binary'
             else:
                 mt = 'text/plain'
+        else:
+            mt = pycompat.sysbytes(mt)
+
     if mt.startswith('text/'):
         mt += '; charset="%s"' % encoding.encoding
 
@@ -143,10 +146,12 @@
     f = fctx.path()
     text = fctx.data()
     parity = paritygen(web.stripecount)
-    ishead = fctx.filerev() in fctx.filelog().headrevs()
+    ishead = fctx.filenode() in fctx.filelog().heads()
 
     if stringutil.binary(text):
-        mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
+        mt = pycompat.sysbytes(
+            mimetypes.guess_type(pycompat.fsdecode(f))[0]
+            or r'application/octet-stream')
         text = '(binary:%s)' % mt
 
     def lines(context):
@@ -215,7 +220,7 @@
 
         def revgen():
             cl = web.repo.changelog
-            for i in xrange(len(web.repo) - 1, 0, -100):
+            for i in pycompat.xrange(len(web.repo) - 1, 0, -100):
                 l = []
                 for j in cl.revs(max(0, i - 99), i):
                     ctx = web.repo[j]
@@ -294,7 +299,7 @@
 
         for ctx in searchfunc[0](funcarg):
             count += 1
-            n = ctx.node()
+            n = scmutil.binnode(ctx)
             showtags = webutil.showtag(web.repo, 'changelogtag', n)
             files = webutil.listfilediffs(ctx.files(), n, web.maxfiles)
 
@@ -521,7 +526,7 @@
         symrev = 'tip'
     path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
     mf = ctx.manifest()
-    node = ctx.node()
+    node = scmutil.binnode(ctx)
 
     files = {}
     dirs = {}
@@ -857,9 +862,9 @@
 
     def filelines(f):
         if f.isbinary():
-            mt = mimetypes.guess_type(f.path())[0]
-            if not mt:
-                mt = 'application/octet-stream'
+            mt = pycompat.sysbytes(
+                mimetypes.guess_type(pycompat.fsdecode(f.path()))[0]
+                or r'application/octet-stream')
             return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
         return f.data().splitlines()
 
@@ -868,7 +873,7 @@
     leftrev = parent.rev()
     leftnode = parent.node()
     rightrev = ctx.rev()
-    rightnode = ctx.node()
+    rightnode = scmutil.binnode(ctx)
     if path in ctx:
         fctx = ctx[path]
         rightlines = filelines(fctx)
@@ -922,7 +927,7 @@
     fctx = webutil.filectx(web.repo, web.req)
     f = fctx.path()
     parity = paritygen(web.stripecount)
-    ishead = fctx.filerev() in fctx.filelog().headrevs()
+    ishead = fctx.filenode() in fctx.filelog().heads()
 
     # parents() is called once per line and several lines likely belong to
     # same revision. So it is worth caching.
@@ -945,8 +950,9 @@
 
     def annotate(context):
         if fctx.isbinary():
-            mt = (mimetypes.guess_type(fctx.path())[0]
-                  or 'application/octet-stream')
+            mt = pycompat.sysbytes(
+                mimetypes.guess_type(pycompat.fsdecode(fctx.path()))[0]
+                or r'application/octet-stream')
             lines = [dagop.annotateline(fctx=fctx.filectx(fctx.filerev()),
                                         lineno=1, text='(binary:%s)' % mt)]
         else:
@@ -1166,7 +1172,7 @@
     key = web.req.qsparams['node']
 
     if type_ not in webutil.archivespecs:
-        msg = 'Unsupported archive type: %s' % type_
+        msg = 'Unsupported archive type: %s' % stringutil.pprint(type_)
         raise ErrorResponse(HTTP_NOT_FOUND, msg)
 
     if not ((type_ in allowed or
@@ -1221,7 +1227,7 @@
     fname = web.req.qsparams['file']
     # a repo owner may set web.static in .hg/hgrc to get any file
     # readable by the user running the CGI script
-    static = web.config("web", "static", None, untrusted=False)
+    static = web.config("web", "static", untrusted=False)
     if not static:
         tp = web.templatepath or templater.templatepaths()
         if isinstance(tp, str):
@@ -1401,7 +1407,8 @@
     topicname = web.req.qsparams.get('node')
     if not topicname:
         def topics(context):
-            for entries, summary, _doc in helpmod.helptable:
+            for h in helpmod.helptable:
+                entries, summary, _doc = h[0:3]
                 yield {'topic': entries[0], 'summary': summary}
 
         early, other = [], []
@@ -1411,8 +1418,8 @@
             if 'DEPRECATED' in doc or c.startswith('debug'):
                 continue
             cmd = primary(c)
-            if cmd.startswith('^'):
-                early.append((cmd[1:], doc))
+            if getattr(e[0], 'helpbasic', False):
+                early.append((cmd, doc))
             else:
                 other.append((cmd, doc))
 
--- a/mercurial/hgweb/webutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/hgweb/webutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -320,7 +320,8 @@
 
 def cleanpath(repo, path):
     path = path.lstrip('/')
-    return pathutil.canonpath(repo.root, '', path)
+    auditor = pathutil.pathauditor(repo.root, realfs=False)
+    return pathutil.canonpath(repo.root, '', path, auditor=auditor)
 
 def changectx(repo, req):
     changeid = "tip"
@@ -408,8 +409,14 @@
 
 whyunstable._requires = {'repo', 'ctx'}
 
+# helper to mark a function as a new-style template keyword; can be removed
+# once old-style function gets unsupported and new-style becomes the default
+def _kwfunc(f):
+    f._requires = ()
+    return f
+
 def commonentry(repo, ctx):
-    node = ctx.node()
+    node = scmutil.binnode(ctx)
     return {
         # TODO: perhaps ctx.changectx() should be assigned if ctx is a
         # filectx, but I'm not pretty sure if that would always work because
@@ -432,8 +439,8 @@
         'branches': nodebranchdict(repo, ctx),
         'tags': nodetagsdict(repo, node),
         'bookmarks': nodebookmarksdict(repo, node),
-        'parent': lambda **x: parents(ctx),
-        'child': lambda **x: children(ctx),
+        'parent': _kwfunc(lambda context, mapping: parents(ctx)),
+        'child': _kwfunc(lambda context, mapping: children(ctx)),
     }
 
 def changelistentry(web, ctx):
@@ -444,15 +451,15 @@
     '''
     repo = web.repo
     rev = ctx.rev()
-    n = ctx.node()
+    n = scmutil.binnode(ctx)
     showtags = showtag(repo, 'changelogtag', n)
     files = listfilediffs(ctx.files(), n, web.maxfiles)
 
     entry = commonentry(repo, ctx)
     entry.update(
-        allparents=lambda **x: parents(ctx),
-        parent=lambda **x: parents(ctx, rev - 1),
-        child=lambda **x: children(ctx, rev + 1),
+        allparents=_kwfunc(lambda context, mapping: parents(ctx)),
+        parent=_kwfunc(lambda context, mapping: parents(ctx, rev - 1)),
+        child=_kwfunc(lambda context, mapping: children(ctx, rev + 1)),
         changelogtag=showtags,
         files=files,
     )
@@ -478,7 +485,7 @@
     if 'node' in req.qsparams:
         return templatefilters.revescape(req.qsparams['node'])
     else:
-        return short(ctx.node())
+        return short(scmutil.binnode(ctx))
 
 def _listfilesgen(context, ctx, stripecount):
     parity = paritygen(stripecount)
@@ -494,8 +501,9 @@
 def changesetentry(web, ctx):
     '''Obtain a dictionary to be used to render the "changeset" template.'''
 
-    showtags = showtag(web.repo, 'changesettag', ctx.node())
-    showbookmarks = showbookmark(web.repo, 'changesetbookmark', ctx.node())
+    showtags = showtag(web.repo, 'changesettag', scmutil.binnode(ctx))
+    showbookmarks = showbookmark(web.repo, 'changesetbookmark',
+                                 scmutil.binnode(ctx))
     showbranch = nodebranchnodefault(ctx)
 
     basectx = basechangectx(web.repo, web.req)
@@ -521,7 +529,7 @@
         changesetbranch=showbranch,
         files=templateutil.mappedgenerator(_listfilesgen,
                                            args=(ctx, web.stripecount)),
-        diffsummary=lambda **x: diffsummary(diffstatsgen),
+        diffsummary=_kwfunc(lambda context, mapping: diffsummary(diffstatsgen)),
         diffstat=diffstats,
         archives=web.archivelist(ctx.hex()),
         **pycompat.strkwargs(commonentry(web.repo, ctx)))
@@ -610,24 +618,25 @@
 
 def _getcompblockgen(context, leftlines, rightlines, opcodes):
     for type, llo, lhi, rlo, rhi in opcodes:
+        type = pycompat.sysbytes(type)
         len1 = lhi - llo
         len2 = rhi - rlo
         count = min(len1, len2)
-        for i in xrange(count):
+        for i in pycompat.xrange(count):
             yield _compline(type=type,
                             leftlineno=llo + i + 1,
                             leftline=leftlines[llo + i],
                             rightlineno=rlo + i + 1,
                             rightline=rightlines[rlo + i])
         if len1 > len2:
-            for i in xrange(llo + count, lhi):
+            for i in pycompat.xrange(llo + count, lhi):
                 yield _compline(type=type,
                                 leftlineno=i + 1,
                                 leftline=leftlines[i],
                                 rightlineno=None,
                                 rightline=None)
         elif len2 > len1:
-            for i in xrange(rlo + count, rhi):
+            for i in pycompat.xrange(rlo + count, rhi):
                 yield _compline(type=type,
                                 leftlineno=None,
                                 leftline=None,
--- a/mercurial/hook.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/hook.py	Mon Oct 22 14:46:06 2018 -0400
@@ -150,7 +150,7 @@
     if repo:
         cwd = repo.root
     else:
-        cwd = pycompat.getcwd()
+        cwd = encoding.getcwd()
     r = ui.system(cmd, environ=env, cwd=cwd, blockedtag='exthook-%s' % (name,))
 
     duration = util.timer() - starttime
--- a/mercurial/httppeer.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/httppeer.py	Mon Oct 22 14:46:06 2018 -0400
@@ -16,9 +16,6 @@
 import weakref
 
 from .i18n import _
-from .thirdparty import (
-    cbor,
-)
 from . import (
     bundle2,
     error,
@@ -35,7 +32,9 @@
     wireprotov2server,
 )
 from .utils import (
+    cborutil,
     interfaceutil,
+    stringutil,
 )
 
 httplib = util.httplib
@@ -64,47 +63,12 @@
     result = []
 
     n = 0
-    for i in xrange(0, len(value), valuelen):
+    for i in pycompat.xrange(0, len(value), valuelen):
         n += 1
         result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
 
     return result
 
-def _wraphttpresponse(resp):
-    """Wrap an HTTPResponse with common error handlers.
-
-    This ensures that any I/O from any consumer raises the appropriate
-    error and messaging.
-    """
-    origread = resp.read
-
-    class readerproxy(resp.__class__):
-        def read(self, size=None):
-            try:
-                return origread(size)
-            except httplib.IncompleteRead as e:
-                # e.expected is an integer if length known or None otherwise.
-                if e.expected:
-                    msg = _('HTTP request error (incomplete response; '
-                            'expected %d bytes got %d)') % (e.expected,
-                                                           len(e.partial))
-                else:
-                    msg = _('HTTP request error (incomplete response)')
-
-                raise error.PeerTransportError(
-                    msg,
-                    hint=_('this may be an intermittent network failure; '
-                           'if the error persists, consider contacting the '
-                           'network or server operator'))
-            except httplib.HTTPException as e:
-                raise error.PeerTransportError(
-                    _('HTTP request error (%s)') % e,
-                    hint=_('this may be an intermittent network failure; '
-                           'if the error persists, consider contacting the '
-                           'network or server operator'))
-
-    resp.__class__ = readerproxy
-
 class _multifile(object):
     def __init__(self, *fileobjs):
         for f in fileobjs:
@@ -325,7 +289,7 @@
                 % (util.timer() - start, code))
 
     # Insert error handlers for common I/O failures.
-    _wraphttpresponse(res)
+    urlmod.wrapresponse(res)
 
     return res
 
@@ -401,8 +365,8 @@
     elif version_info == (0, 2):
         # application/mercurial-0.2 always identifies the compression
         # engine in the payload header.
-        elen = struct.unpack('B', resp.read(1))[0]
-        ename = resp.read(elen)
+        elen = struct.unpack('B', util.readexactly(resp, 1))[0]
+        ename = util.readexactly(resp, elen)
         engine = util.compengines.forwiretype(ename)
 
         resp = engine.decompressorreader(resp)
@@ -441,7 +405,11 @@
         return True
 
     def close(self):
-        pass
+        self.ui.note(_('(sent %d HTTP requests and %d bytes; '
+                       'received %d bytes in responses)\n') %
+                     (self._urlopener.requestscount,
+                      self._urlopener.sentbytescount,
+                      self._urlopener.receivedbytescount))
 
     # End of ipeerconnection interface.
 
@@ -544,11 +512,33 @@
     def _abort(self, exception):
         raise exception
 
-def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):
-    reactor = wireprotoframing.clientreactor(hasmultiplesend=False,
-                                             buffersends=True)
+def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests,
+                  redirect):
+    wireprotoframing.populatestreamencoders()
+
+    uiencoders = ui.configlist(b'experimental', b'httppeer.v2-encoder-order')
+
+    if uiencoders:
+        encoders = []
 
-    handler = wireprotov2peer.clienthandler(ui, reactor)
+        for encoder in uiencoders:
+            if encoder not in wireprotoframing.STREAM_ENCODERS:
+                ui.warn(_(b'wire protocol version 2 encoder referenced in '
+                          b'config (%s) is not known; ignoring\n') % encoder)
+            else:
+                encoders.append(encoder)
+
+    else:
+        encoders = wireprotoframing.STREAM_ENCODERS_ORDER
+
+    reactor = wireprotoframing.clientreactor(ui,
+                                             hasmultiplesend=False,
+                                             buffersends=True,
+                                             clientcontentencoders=encoders)
+
+    handler = wireprotov2peer.clienthandler(ui, reactor,
+                                            opener=opener,
+                                            requestbuilder=requestbuilder)
 
     url = '%s/%s' % (apiurl, permission)
 
@@ -557,8 +547,12 @@
     else:
         url += '/%s' % requests[0][0]
 
+    ui.debug('sending %d commands\n' % len(requests))
     for command, args, f in requests:
-        assert not list(handler.callcommand(command, args, f))
+        ui.debug('sending command %s: %s\n' % (
+            command, stringutil.pprint(args, indent=2)))
+        assert not list(handler.callcommand(command, args, f,
+                                            redirect=redirect))
 
     # TODO stream this.
     body = b''.join(map(bytes, handler.flushcommands()))
@@ -600,12 +594,14 @@
 
 @interfaceutil.implementer(repository.ipeercommandexecutor)
 class httpv2executor(object):
-    def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):
+    def __init__(self, ui, opener, requestbuilder, apiurl, descriptor,
+                 redirect):
         self._ui = ui
         self._opener = opener
         self._requestbuilder = requestbuilder
         self._apiurl = apiurl
         self._descriptor = descriptor
+        self._redirect = redirect
         self._sent = False
         self._closed = False
         self._neededpermissions = set()
@@ -705,7 +701,7 @@
 
         handler, resp = sendv2request(
             self._ui, self._opener, self._requestbuilder, self._apiurl,
-            permission, calls)
+            permission, calls, self._redirect)
 
         # TODO we probably want to validate the HTTP code, media type, etc.
 
@@ -724,6 +720,8 @@
         if not self._responsef:
             return
 
+        # TODO ^C here may not result in immediate program termination.
+
         try:
             self._responsef.result()
         finally:
@@ -743,17 +741,15 @@
     def _handleresponse(self, handler, resp):
         # Called in a thread to read the response.
 
-        while handler.readframe(resp):
+        while handler.readdata(resp):
             pass
 
-# TODO implement interface for version 2 peers
-@interfaceutil.implementer(repository.ipeerconnection,
-                           repository.ipeercapabilities,
-                           repository.ipeerrequests)
+@interfaceutil.implementer(repository.ipeerv2)
 class httpv2peer(object):
     def __init__(self, ui, repourl, apipath, opener, requestbuilder,
                  apidescriptor):
         self.ui = ui
+        self.apidescriptor = apidescriptor
 
         if repourl.endswith('/'):
             repourl = repourl[:-1]
@@ -763,7 +759,8 @@
         self._apiurl = '%s/%s' % (repourl, apipath)
         self._opener = opener
         self._requestbuilder = requestbuilder
-        self._descriptor = apidescriptor
+
+        self._redirect = wireprotov2peer.supportedredirects(ui, apidescriptor)
 
     # Start of ipeerconnection.
 
@@ -781,7 +778,11 @@
         return False
 
     def close(self):
-        pass
+        self.ui.note(_('(sent %d HTTP requests and %d bytes; '
+                       'received %d bytes in responses)\n') %
+                     (self._opener.requestscount,
+                      self._opener.sentbytescount,
+                      self._opener.receivedbytescount))
 
     # End of ipeerconnection.
 
@@ -797,9 +798,13 @@
             return True
 
         # Other concepts.
-        if name in ('bundle2',):
+        if name in ('bundle2'):
             return True
 
+        # Alias command-* to presence of command of that name.
+        if name.startswith('command-'):
+            return name[len('command-'):] in self.apidescriptor['commands']
+
         return False
 
     def requirecap(self, name, purpose):
@@ -818,7 +823,7 @@
 
     def commandexecutor(self):
         return httpv2executor(self.ui, self._opener, self._requestbuilder,
-                              self._apiurl, self._descriptor)
+                              self._apiurl, self.apidescriptor, self._redirect)
 
 # Registry of API service names to metadata about peers that handle it.
 #
@@ -907,8 +912,8 @@
     if advertisev2:
         if ct == 'application/mercurial-cbor':
             try:
-                info = cbor.loads(rawdata)
-            except cbor.CBORDecodeError:
+                info = cborutil.decodeall(rawdata)[0]
+            except cborutil.CBORDecodeError:
                 raise error.Abort(_('error decoding CBOR from remote server'),
                                   hint=_('try again and consider contacting '
                                          'the server operator'))
@@ -977,7 +982,7 @@
     return httppeer(ui, path, respurl, opener, requestbuilder,
                     info['v1capabilities'])
 
-def instance(ui, path, create, intents=None):
+def instance(ui, path, create, intents=None, createopts=None):
     if create:
         raise error.Abort(_('cannot create new http repository'))
     try:
--- a/mercurial/i18n.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/i18n.py	Mon Oct 22 14:46:06 2018 -0400
@@ -75,7 +75,9 @@
             # goofy unicode docstrings in test
             paragraphs = message.split(u'\n\n')
         else:
-            paragraphs = [p.decode("ascii") for p in message.split('\n\n')]
+            # should be ascii, but we have unicode docstrings in test, which
+            # are converted to utf-8 bytes on Python 3.
+            paragraphs = [p.decode("utf-8") for p in message.split('\n\n')]
         # Be careful not to translate the empty string -- it holds the
         # meta data of the .po file.
         u = u'\n\n'.join([p and _ugettext(p) or u'' for p in paragraphs])
--- a/mercurial/keepalive.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/keepalive.py	Mon Oct 22 14:46:06 2018 -0400
@@ -172,8 +172,11 @@
             return dict(self._hostmap)
 
 class KeepAliveHandler(object):
-    def __init__(self):
+    def __init__(self, timeout=None):
         self._cm = ConnectionManager()
+        self._timeout = timeout
+        self.requestscount = 0
+        self.sentbytescount = 0
 
     #### Connection Management
     def open_connections(self):
@@ -232,7 +235,7 @@
                 h = self._cm.get_ready_conn(host)
             else:
                 # no (working) free connections were found.  Create a new one.
-                h = http_class(host)
+                h = http_class(host, timeout=self._timeout)
                 if DEBUG:
                     DEBUG.info("creating new connection to %s (%d)",
                                host, id(h))
@@ -247,8 +250,10 @@
         except (socket.error, httplib.HTTPException) as err:
             raise urlerr.urlerror(err)
 
-        # if not a persistent connection, don't try to reuse it
-        if r.will_close:
+        # If not a persistent connection, don't try to reuse it. Look
+        # for this using getattr() since vcr doesn't define this
+        # attribute, and in that case always close the connection.
+        if getattr(r, r'will_close', True):
             self._cm.remove(h)
 
         if DEBUG:
@@ -310,6 +315,8 @@
         return r
 
     def _start_transaction(self, h, req):
+        oldbytescount = getattr(h, 'sentbytescount', 0)
+
         # What follows mostly reimplements HTTPConnection.request()
         # except it adds self.parent.addheaders in the mix and sends headers
         # in a deterministic order (to make testing easier).
@@ -344,6 +351,17 @@
         if urllibcompat.hasdata(req):
             h.send(data)
 
+        # This will fail to record events in case of I/O failure. That's OK.
+        self.requestscount += 1
+        self.sentbytescount += getattr(h, 'sentbytescount', 0) - oldbytescount
+
+        try:
+            self.parent.requestscount += 1
+            self.parent.sentbytescount += (
+                getattr(h, 'sentbytescount', 0) - oldbytescount)
+        except AttributeError:
+            pass
+
 class HTTPHandler(KeepAliveHandler, urlreq.httphandler):
     pass
 
@@ -376,6 +394,7 @@
                                       method=method, **extrakw)
         self.fileno = sock.fileno
         self.code = None
+        self.receivedbytescount = 0
         self._rbuf = ''
         self._rbufsize = 8096
         self._handler = None # inserted by the handler later
@@ -415,9 +434,21 @@
                 s = self._rbuf[:amt]
                 self._rbuf = self._rbuf[amt:]
                 return s
+        # Careful! http.client.HTTPResponse.read() on Python 3 is
+        # implemented using readinto(), which can duplicate self._rbuf
+        # if it's not empty.
+        s = self._rbuf
+        self._rbuf = ''
+        data = self._raw_read(amt)
 
-        s = self._rbuf + self._raw_read(amt)
-        self._rbuf = ''
+        self.receivedbytescount += len(data)
+        self._connection.receivedbytescount += len(data)
+        try:
+            self._handler.parent.receivedbytescount += len(data)
+        except AttributeError:
+            pass
+
+        s += data
         return s
 
     # stolen from Python SVN #68532 to fix issue1088
@@ -493,6 +524,13 @@
             if not new:
                 break
 
+            self.receivedbytescount += len(new)
+            self._connection.receivedbytescount += len(new)
+            try:
+                self._handler.parent.receivedbytescount += len(new)
+            except AttributeError:
+                pass
+
             chunks.append(new)
             i = new.find('\n')
             if i >= 0:
@@ -538,6 +576,14 @@
             return total
         mv = memoryview(dest)
         got = self._raw_readinto(mv[have:total])
+
+        self.receivedbytescount += got
+        self._connection.receivedbytescount += got
+        try:
+            self._handler.receivedbytescount += got
+        except AttributeError:
+            pass
+
         dest[0:have] = self._rbuf
         got += len(self._rbuf)
         self._rbuf = ''
@@ -580,9 +626,11 @@
             data = read(blocksize)
             while data:
                 self.sock.sendall(data)
+                self.sentbytescount += len(data)
                 data = read(blocksize)
         else:
             self.sock.sendall(str)
+            self.sentbytescount += len(str)
     except socket.error as v:
         reraise = True
         if v[0] == errno.EPIPE:      # Broken pipe
@@ -610,11 +658,19 @@
     return safegetresponse
 
 class HTTPConnection(httplib.HTTPConnection):
+    # url.httpsconnection inherits from this. So when adding/removing
+    # attributes, be sure to audit httpsconnection() for unintended
+    # consequences.
+
     # use the modified response class
     response_class = HTTPResponse
     send = safesend
     getresponse = wrapgetresponse(httplib.HTTPConnection)
 
+    def __init__(self, *args, **kwargs):
+        httplib.HTTPConnection.__init__(self, *args, **kwargs)
+        self.sentbytescount = 0
+        self.receivedbytescount = 0
 
 #########################################################################
 #####   TEST FUNCTIONS
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/linelog.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,436 @@
+# linelog - efficient cache for annotate data
+#
+# Copyright 2018 Google LLC.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""linelog is an efficient cache for annotate data inspired by SCCS Weaves.
+
+SCCS Weaves are an implementation of
+https://en.wikipedia.org/wiki/Interleaved_deltas. See
+mercurial/help/internals/linelog.txt for an exploration of SCCS weaves
+and how linelog works in detail.
+
+Here's a hacker's summary: a linelog is a program which is executed in
+the context of a revision. Executing the program emits information
+about lines, including the revision that introduced them and the line
+number in the file at the introducing revision. When an insertion or
+deletion is performed on the file, a jump instruction is used to patch
+in a new body of annotate information.
+"""
+from __future__ import absolute_import, print_function
+
+import abc
+import struct
+
+from .thirdparty import (
+    attr,
+)
+from . import (
+    pycompat,
+)
+
+_llentry = struct.Struct('>II')
+
+class LineLogError(Exception):
+    """Error raised when something bad happens internally in linelog."""
+
+@attr.s
+class lineinfo(object):
+    # Introducing revision of this line.
+    rev = attr.ib()
+    # Line number for this line in its introducing revision.
+    linenum = attr.ib()
+    # Private. Offset in the linelog program of this line. Used internally.
+    _offset = attr.ib()
+
+@attr.s
+class annotateresult(object):
+    rev = attr.ib()
+    lines = attr.ib()
+    _eof = attr.ib()
+
+    def __iter__(self):
+        return iter(self.lines)
+
+class _llinstruction(object):
+
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def __init__(self, op1, op2):
+        pass
+
+    @abc.abstractmethod
+    def __str__(self):
+        pass
+
+    def __repr__(self):
+        return str(self)
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        pass
+
+    @abc.abstractmethod
+    def encode(self):
+        """Encode this instruction to the binary linelog format."""
+
+    @abc.abstractmethod
+    def execute(self, rev, pc, emit):
+        """Execute this instruction.
+
+        Args:
+          rev: The revision we're annotating.
+          pc: The current offset in the linelog program.
+          emit: A function that accepts a single lineinfo object.
+
+        Returns:
+          The new value of pc. Returns None if exeuction should stop
+          (that is, we've found the end of the file.)
+        """
+
+class _jge(_llinstruction):
+    """If the current rev is greater than or equal to op1, jump to op2."""
+
+    def __init__(self, op1, op2):
+        self._cmprev = op1
+        self._target = op2
+
+    def __str__(self):
+        return r'JGE %d %d' % (self._cmprev, self._target)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._cmprev == other._cmprev
+                and self._target == other._target)
+
+    def encode(self):
+        return _llentry.pack(self._cmprev << 2, self._target)
+
+    def execute(self, rev, pc, emit):
+        if rev >= self._cmprev:
+            return self._target
+        return pc + 1
+
+class _jump(_llinstruction):
+    """Unconditional jumps are expressed as a JGE with op1 set to 0."""
+
+    def __init__(self, op1, op2):
+        if op1 != 0:
+            raise LineLogError("malformed JUMP, op1 must be 0, got %d" % op1)
+        self._target = op2
+
+    def __str__(self):
+        return r'JUMP %d' % (self._target)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._target == other._target)
+
+    def encode(self):
+        return _llentry.pack(0, self._target)
+
+    def execute(self, rev, pc, emit):
+        return self._target
+
+class _eof(_llinstruction):
+    """EOF is expressed as a JGE that always jumps to 0."""
+
+    def __init__(self, op1, op2):
+        if op1 != 0:
+            raise LineLogError("malformed EOF, op1 must be 0, got %d" % op1)
+        if op2 != 0:
+            raise LineLogError("malformed EOF, op2 must be 0, got %d" % op2)
+
+    def __str__(self):
+        return r'EOF'
+
+    def __eq__(self, other):
+        return type(self) == type(other)
+
+    def encode(self):
+        return _llentry.pack(0, 0)
+
+    def execute(self, rev, pc, emit):
+        return None
+
+class _jl(_llinstruction):
+    """If the current rev is less than op1, jump to op2."""
+
+    def __init__(self, op1, op2):
+        self._cmprev = op1
+        self._target = op2
+
+    def __str__(self):
+        return r'JL %d %d' % (self._cmprev, self._target)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._cmprev == other._cmprev
+                and self._target == other._target)
+
+    def encode(self):
+        return _llentry.pack(1 | (self._cmprev << 2), self._target)
+
+    def execute(self, rev, pc, emit):
+        if rev < self._cmprev:
+            return self._target
+        return pc + 1
+
+class _line(_llinstruction):
+    """Emit a line."""
+
+    def __init__(self, op1, op2):
+        # This line was introduced by this revision number.
+        self._rev = op1
+        # This line had the specified line number in the introducing revision.
+        self._origlineno = op2
+
+    def __str__(self):
+        return r'LINE %d %d' % (self._rev, self._origlineno)
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._rev == other._rev
+                and self._origlineno == other._origlineno)
+
+    def encode(self):
+        return _llentry.pack(2 | (self._rev << 2), self._origlineno)
+
+    def execute(self, rev, pc, emit):
+        emit(lineinfo(self._rev, self._origlineno, pc))
+        return pc + 1
+
+def _decodeone(data, offset):
+    """Decode a single linelog instruction from an offset in a buffer."""
+    try:
+        op1, op2 = _llentry.unpack_from(data, offset)
+    except struct.error as e:
+        raise LineLogError('reading an instruction failed: %r' % e)
+    opcode = op1 & 0b11
+    op1 = op1 >> 2
+    if opcode == 0:
+        if op1 == 0:
+            if op2 == 0:
+                return _eof(op1, op2)
+            return _jump(op1, op2)
+        return _jge(op1, op2)
+    elif opcode == 1:
+        return _jl(op1, op2)
+    elif opcode == 2:
+        return _line(op1, op2)
+    raise NotImplementedError('Unimplemented opcode %r' % opcode)
+
+class linelog(object):
+    """Efficient cache for per-line history information."""
+
+    def __init__(self, program=None, maxrev=0):
+        if program is None:
+            # We pad the program with an extra leading EOF so that our
+            # offsets will match the C code exactly. This means we can
+            # interoperate with the C code.
+            program = [_eof(0, 0), _eof(0, 0)]
+        self._program = program
+        self._lastannotate = None
+        self._maxrev = maxrev
+
+    def __eq__(self, other):
+        return (type(self) == type(other)
+                and self._program == other._program
+                and self._maxrev == other._maxrev)
+
+    def __repr__(self):
+        return '<linelog at %s: maxrev=%d size=%d>' % (
+            hex(id(self)), self._maxrev, len(self._program))
+
+    def debugstr(self):
+        fmt = r'%%%dd %%s' % len(str(len(self._program)))
+        return pycompat.sysstr('\n').join(
+            fmt % (idx, i) for idx, i in enumerate(self._program[1:], 1))
+
+    @classmethod
+    def fromdata(cls, buf):
+        if len(buf) % _llentry.size != 0:
+            raise LineLogError(
+                "invalid linelog buffer size %d (must be a multiple of %d)" % (
+                    len(buf), _llentry.size))
+        expected = len(buf) / _llentry.size
+        fakejge = _decodeone(buf, 0)
+        if isinstance(fakejge, _jump):
+            maxrev = 0
+        else:
+            maxrev = fakejge._cmprev
+        numentries = fakejge._target
+        if expected != numentries:
+            raise LineLogError("corrupt linelog data: claimed"
+                               " %d entries but given data for %d entries" % (
+                                   expected, numentries))
+        instructions = [_eof(0, 0)]
+        for offset in pycompat.xrange(1, numentries):
+            instructions.append(_decodeone(buf, offset * _llentry.size))
+        return cls(instructions, maxrev=maxrev)
+
+    def encode(self):
+        hdr = _jge(self._maxrev, len(self._program)).encode()
+        return hdr + ''.join(i.encode() for i in self._program[1:])
+
+    def clear(self):
+        self._program = []
+        self._maxrev = 0
+        self._lastannotate = None
+
+    def replacelines_vec(self, rev, a1, a2, blines):
+        return self.replacelines(rev, a1, a2, 0, len(blines),
+                                 _internal_blines=blines)
+
+    def replacelines(self, rev, a1, a2, b1, b2, _internal_blines=None):
+        """Replace lines [a1, a2) with lines [b1, b2)."""
+        if self._lastannotate:
+            # TODO(augie): make replacelines() accept a revision at
+            # which we're editing as well as a revision to mark
+            # responsible for the edits. In hg-experimental it's
+            # stateful like this, so we're doing the same thing to
+            # retain compatibility with absorb until that's imported.
+            ar = self._lastannotate
+        else:
+            ar = self.annotate(rev)
+            #        ar = self.annotate(self._maxrev)
+        if a1 > len(ar.lines):
+            raise LineLogError(
+                '%d contains %d lines, tried to access line %d' % (
+                    rev, len(ar.lines), a1))
+        elif a1 == len(ar.lines):
+            # Simulated EOF instruction since we're at EOF, which
+            # doesn't have a "real" line.
+            a1inst = _eof(0, 0)
+            a1info = lineinfo(0, 0, ar._eof)
+        else:
+            a1info = ar.lines[a1]
+            a1inst = self._program[a1info._offset]
+        programlen = self._program.__len__
+        oldproglen = programlen()
+        appendinst = self._program.append
+
+        # insert
+        blineinfos = []
+        bappend = blineinfos.append
+        if b1 < b2:
+            # Determine the jump target for the JGE at the start of
+            # the new block.
+            tgt = oldproglen + (b2 - b1 + 1)
+            # Jump to skip the insert if we're at an older revision.
+            appendinst(_jl(rev, tgt))
+            for linenum in pycompat.xrange(b1, b2):
+                if _internal_blines is None:
+                    bappend(lineinfo(rev, linenum, programlen()))
+                    appendinst(_line(rev, linenum))
+                else:
+                    newrev, newlinenum = _internal_blines[linenum]
+                    bappend(lineinfo(newrev, newlinenum, programlen()))
+                    appendinst(_line(newrev, newlinenum))
+        # delete
+        if a1 < a2:
+            if a2 > len(ar.lines):
+                raise LineLogError(
+                    '%d contains %d lines, tried to access line %d' % (
+                        rev, len(ar.lines), a2))
+            elif a2 == len(ar.lines):
+                endaddr = ar._eof
+            else:
+                endaddr = ar.lines[a2]._offset
+            if a2 > 0 and rev < self._maxrev:
+                # If we're here, we're deleting a chunk of an old
+                # commit, so we need to be careful and not touch
+                # invisible lines between a2-1 and a2 (IOW, lines that
+                # are added later).
+                endaddr = ar.lines[a2 - 1]._offset + 1
+            appendinst(_jge(rev, endaddr))
+        # copy instruction from a1
+        a1instpc = programlen()
+        appendinst(a1inst)
+        # if a1inst isn't a jump or EOF, then we need to add an unconditional
+        # jump back into the program here.
+        if not isinstance(a1inst, (_jump, _eof)):
+            appendinst(_jump(0, a1info._offset + 1))
+        # Patch instruction at a1, which makes our patch live.
+        self._program[a1info._offset] = _jump(0, oldproglen)
+
+        # Update self._lastannotate in place. This serves as a cache to avoid
+        # expensive "self.annotate" in this function, when "replacelines" is
+        # used continuously.
+        if len(self._lastannotate.lines) > a1:
+            self._lastannotate.lines[a1]._offset = a1instpc
+        else:
+            assert isinstance(a1inst, _eof)
+            self._lastannotate._eof = a1instpc
+        self._lastannotate.lines[a1:a2] = blineinfos
+        self._lastannotate.rev = max(self._lastannotate.rev, rev)
+
+        if rev > self._maxrev:
+            self._maxrev = rev
+
+    def annotate(self, rev):
+        pc = 1
+        lines = []
+        executed = 0
+        # Sanity check: if instructions executed exceeds len(program), we
+        # hit an infinite loop in the linelog program somehow and we
+        # should stop.
+        while pc is not None and executed < len(self._program):
+            inst = self._program[pc]
+            lastpc = pc
+            pc = inst.execute(rev, pc, lines.append)
+            executed += 1
+        if pc is not None:
+            raise LineLogError(
+                r'Probably hit an infinite loop in linelog. Program:\n' +
+                self.debugstr())
+        ar = annotateresult(rev, lines, lastpc)
+        self._lastannotate = ar
+        return ar
+
+    @property
+    def maxrev(self):
+        return self._maxrev
+
+    # Stateful methods which depend on the value of the last
+    # annotation run. This API is for compatiblity with the original
+    # linelog, and we should probably consider refactoring it.
+    @property
+    def annotateresult(self):
+        """Return the last annotation result. C linelog code exposed this."""
+        return [(l.rev, l.linenum) for l in self._lastannotate.lines]
+
+    def getoffset(self, line):
+        return self._lastannotate.lines[line]._offset
+
+    def getalllines(self, start=0, end=0):
+        """Get all lines that ever occurred in [start, end).
+
+        Passing start == end == 0 means "all lines ever".
+
+        This works in terms of *internal* program offsets, not line numbers.
+        """
+        pc = start or 1
+        lines = []
+        # only take as many steps as there are instructions in the
+        # program - if we don't find an EOF or our stop-line before
+        # then, something is badly broken.
+        for step in pycompat.xrange(len(self._program)):
+            inst = self._program[pc]
+            nextpc = pc + 1
+            if isinstance(inst, _jump):
+                nextpc = inst._target
+            elif isinstance(inst, _eof):
+                return lines
+            elif isinstance(inst, (_jl, _jge)):
+                pass
+            elif isinstance(inst, _line):
+                lines.append((inst._rev, inst._origlineno))
+            else:
+                raise LineLogError("Illegal instruction %r" % inst)
+            if nextpc == end:
+                return lines
+            pc = nextpc
+        raise LineLogError("Failed to perform getalllines")
--- a/mercurial/localrepo.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/localrepo.py	Mon Oct 22 14:46:06 2018 -0400
@@ -17,8 +17,10 @@
 
 from .i18n import _
 from .node import (
+    bin,
     hex,
     nullid,
+    nullrev,
     short,
 )
 from . import (
@@ -56,7 +58,7 @@
     revsetlang,
     scmutil,
     sparse,
-    store,
+    store as storemod,
     subrepoutil,
     tags as tagsmod,
     transaction,
@@ -70,6 +72,10 @@
     stringutil,
 )
 
+from .revlogutils import (
+    constants as revlogconst,
+)
+
 release = lockmod.release
 urlerr = util.urlerr
 urlreq = util.urlreq
@@ -372,8 +378,456 @@
 # set to reflect that the extension knows how to handle that requirements.
 featuresetupfuncs = set()
 
-@interfaceutil.implementer(repository.completelocalrepository)
+def makelocalrepository(baseui, path, intents=None):
+    """Create a local repository object.
+
+    Given arguments needed to construct a local repository, this function
+    performs various early repository loading functionality (such as
+    reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
+    the repository can be opened, derives a type suitable for representing
+    that repository, and returns an instance of it.
+
+    The returned object conforms to the ``repository.completelocalrepository``
+    interface.
+
+    The repository type is derived by calling a series of factory functions
+    for each aspect/interface of the final repository. These are defined by
+    ``REPO_INTERFACES``.
+
+    Each factory function is called to produce a type implementing a specific
+    interface. The cumulative list of returned types will be combined into a
+    new type and that type will be instantiated to represent the local
+    repository.
+
+    The factory functions each receive various state that may be consulted
+    as part of deriving a type.
+
+    Extensions should wrap these factory functions to customize repository type
+    creation. Note that an extension's wrapped function may be called even if
+    that extension is not loaded for the repo being constructed. Extensions
+    should check if their ``__name__`` appears in the
+    ``extensionmodulenames`` set passed to the factory function and no-op if
+    not.
+    """
+    ui = baseui.copy()
+    # Prevent copying repo configuration.
+    ui.copy = baseui.copy
+
+    # Working directory VFS rooted at repository root.
+    wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
+
+    # Main VFS for .hg/ directory.
+    hgpath = wdirvfs.join(b'.hg')
+    hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
+
+    # The .hg/ path should exist and should be a directory. All other
+    # cases are errors.
+    if not hgvfs.isdir():
+        try:
+            hgvfs.stat()
+        except OSError as e:
+            if e.errno != errno.ENOENT:
+                raise
+
+        raise error.RepoError(_(b'repository %s not found') % path)
+
+    # .hg/requires file contains a newline-delimited list of
+    # features/capabilities the opener (us) must have in order to use
+    # the repository. This file was introduced in Mercurial 0.9.2,
+    # which means very old repositories may not have one. We assume
+    # a missing file translates to no requirements.
+    try:
+        requirements = set(hgvfs.read(b'requires').splitlines())
+    except IOError as e:
+        if e.errno != errno.ENOENT:
+            raise
+        requirements = set()
+
+    # The .hg/hgrc file may load extensions or contain config options
+    # that influence repository construction. Attempt to load it and
+    # process any new extensions that it may have pulled in.
+    try:
+        ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
+        # Run this before extensions.loadall() so extensions can be
+        # automatically enabled.
+        afterhgrcload(ui, wdirvfs, hgvfs, requirements)
+    except IOError:
+        pass
+    else:
+        extensions.loadall(ui)
+
+    # Set of module names of extensions loaded for this repository.
+    extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
+
+    supportedrequirements = gathersupportedrequirements(ui)
+
+    # We first validate the requirements are known.
+    ensurerequirementsrecognized(requirements, supportedrequirements)
+
+    # Then we validate that the known set is reasonable to use together.
+    ensurerequirementscompatible(ui, requirements)
+
+    # TODO there are unhandled edge cases related to opening repositories with
+    # shared storage. If storage is shared, we should also test for requirements
+    # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
+    # that repo, as that repo may load extensions needed to open it. This is a
+    # bit complicated because we don't want the other hgrc to overwrite settings
+    # in this hgrc.
+    #
+    # This bug is somewhat mitigated by the fact that we copy the .hg/requires
+    # file when sharing repos. But if a requirement is added after the share is
+    # performed, thereby introducing a new requirement for the opener, we may
+    # will not see that and could encounter a run-time error interacting with
+    # that shared store since it has an unknown-to-us requirement.
+
+    # At this point, we know we should be capable of opening the repository.
+    # Now get on with doing that.
+
+    features = set()
+
+    # The "store" part of the repository holds versioned data. How it is
+    # accessed is determined by various requirements. The ``shared`` or
+    # ``relshared`` requirements indicate the store lives in the path contained
+    # in the ``.hg/sharedpath`` file. This is an absolute path for
+    # ``shared`` and relative to ``.hg/`` for ``relshared``.
+    if b'shared' in requirements or b'relshared' in requirements:
+        sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
+        if b'relshared' in requirements:
+            sharedpath = hgvfs.join(sharedpath)
+
+        sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
+
+        if not sharedvfs.exists():
+            raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
+                                    b'directory %s') % sharedvfs.base)
+
+        features.add(repository.REPO_FEATURE_SHARED_STORAGE)
+
+        storebasepath = sharedvfs.base
+        cachepath = sharedvfs.join(b'cache')
+    else:
+        storebasepath = hgvfs.base
+        cachepath = hgvfs.join(b'cache')
+
+    # The store has changed over time and the exact layout is dictated by
+    # requirements. The store interface abstracts differences across all
+    # of them.
+    store = makestore(requirements, storebasepath,
+                      lambda base: vfsmod.vfs(base, cacheaudited=True))
+    hgvfs.createmode = store.createmode
+
+    storevfs = store.vfs
+    storevfs.options = resolvestorevfsoptions(ui, requirements, features)
+
+    # The cache vfs is used to manage cache files.
+    cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
+    cachevfs.createmode = store.createmode
+
+    # Now resolve the type for the repository object. We do this by repeatedly
+    # calling a factory function to produces types for specific aspects of the
+    # repo's operation. The aggregate returned types are used as base classes
+    # for a dynamically-derived type, which will represent our new repository.
+
+    bases = []
+    extrastate = {}
+
+    for iface, fn in REPO_INTERFACES:
+        # We pass all potentially useful state to give extensions tons of
+        # flexibility.
+        typ = fn()(ui=ui,
+                 intents=intents,
+                 requirements=requirements,
+                 features=features,
+                 wdirvfs=wdirvfs,
+                 hgvfs=hgvfs,
+                 store=store,
+                 storevfs=storevfs,
+                 storeoptions=storevfs.options,
+                 cachevfs=cachevfs,
+                 extensionmodulenames=extensionmodulenames,
+                 extrastate=extrastate,
+                 baseclasses=bases)
+
+        if not isinstance(typ, type):
+            raise error.ProgrammingError('unable to construct type for %s' %
+                                         iface)
+
+        bases.append(typ)
+
+    # type() allows you to use characters in type names that wouldn't be
+    # recognized as Python symbols in source code. We abuse that to add
+    # rich information about our constructed repo.
+    name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
+        wdirvfs.base,
+        b','.join(sorted(requirements))))
+
+    cls = type(name, tuple(bases), {})
+
+    return cls(
+        baseui=baseui,
+        ui=ui,
+        origroot=path,
+        wdirvfs=wdirvfs,
+        hgvfs=hgvfs,
+        requirements=requirements,
+        supportedrequirements=supportedrequirements,
+        sharedpath=storebasepath,
+        store=store,
+        cachevfs=cachevfs,
+        features=features,
+        intents=intents)
+
+def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
+    """Perform additional actions after .hg/hgrc is loaded.
+
+    This function is called during repository loading immediately after
+    the .hg/hgrc file is loaded and before per-repo extensions are loaded.
+
+    The function can be used to validate configs, automatically add
+    options (including extensions) based on requirements, etc.
+    """
+
+    # Map of requirements to list of extensions to load automatically when
+    # requirement is present.
+    autoextensions = {
+        b'largefiles': [b'largefiles'],
+        b'lfs': [b'lfs'],
+    }
+
+    for requirement, names in sorted(autoextensions.items()):
+        if requirement not in requirements:
+            continue
+
+        for name in names:
+            if not ui.hasconfig(b'extensions', name):
+                ui.setconfig(b'extensions', name, b'', source='autoload')
+
+def gathersupportedrequirements(ui):
+    """Determine the complete set of recognized requirements."""
+    # Start with all requirements supported by this file.
+    supported = set(localrepository._basesupported)
+
+    # Execute ``featuresetupfuncs`` entries if they belong to an extension
+    # relevant to this ui instance.
+    modules = {m.__name__ for n, m in extensions.extensions(ui)}
+
+    for fn in featuresetupfuncs:
+        if fn.__module__ in modules:
+            fn(ui, supported)
+
+    # Add derived requirements from registered compression engines.
+    for name in util.compengines:
+        engine = util.compengines[name]
+        if engine.revlogheader():
+            supported.add(b'exp-compression-%s' % name)
+
+    return supported
+
+def ensurerequirementsrecognized(requirements, supported):
+    """Validate that a set of local requirements is recognized.
+
+    Receives a set of requirements. Raises an ``error.RepoError`` if there
+    exists any requirement in that set that currently loaded code doesn't
+    recognize.
+
+    Returns a set of supported requirements.
+    """
+    missing = set()
+
+    for requirement in requirements:
+        if requirement in supported:
+            continue
+
+        if not requirement or not requirement[0:1].isalnum():
+            raise error.RequirementError(_(b'.hg/requires file is corrupt'))
+
+        missing.add(requirement)
+
+    if missing:
+        raise error.RequirementError(
+            _(b'repository requires features unknown to this Mercurial: %s') %
+            b' '.join(sorted(missing)),
+            hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
+                   b'for more information'))
+
+def ensurerequirementscompatible(ui, requirements):
+    """Validates that a set of recognized requirements is mutually compatible.
+
+    Some requirements may not be compatible with others or require
+    config options that aren't enabled. This function is called during
+    repository opening to ensure that the set of requirements needed
+    to open a repository is sane and compatible with config options.
+
+    Extensions can monkeypatch this function to perform additional
+    checking.
+
+    ``error.RepoError`` should be raised on failure.
+    """
+    if b'exp-sparse' in requirements and not sparse.enabled:
+        raise error.RepoError(_(b'repository is using sparse feature but '
+                                b'sparse is not enabled; enable the '
+                                b'"sparse" extensions to access'))
+
+def makestore(requirements, path, vfstype):
+    """Construct a storage object for a repository."""
+    if b'store' in requirements:
+        if b'fncache' in requirements:
+            return storemod.fncachestore(path, vfstype,
+                                         b'dotencode' in requirements)
+
+        return storemod.encodedstore(path, vfstype)
+
+    return storemod.basicstore(path, vfstype)
+
+def resolvestorevfsoptions(ui, requirements, features):
+    """Resolve the options to pass to the store vfs opener.
+
+    The returned dict is used to influence behavior of the storage layer.
+    """
+    options = {}
+
+    if b'treemanifest' in requirements:
+        options[b'treemanifest'] = True
+
+    # experimental config: format.manifestcachesize
+    manifestcachesize = ui.configint(b'format', b'manifestcachesize')
+    if manifestcachesize is not None:
+        options[b'manifestcachesize'] = manifestcachesize
+
+    # In the absence of another requirement superseding a revlog-related
+    # requirement, we have to assume the repo is using revlog version 0.
+    # This revlog format is super old and we don't bother trying to parse
+    # opener options for it because those options wouldn't do anything
+    # meaningful on such old repos.
+    if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
+        options.update(resolverevlogstorevfsoptions(ui, requirements, features))
+
+    return options
+
+def resolverevlogstorevfsoptions(ui, requirements, features):
+    """Resolve opener options specific to revlogs."""
+
+    options = {}
+    options[b'flagprocessors'] = {}
+
+    if b'revlogv1' in requirements:
+        options[b'revlogv1'] = True
+    if REVLOGV2_REQUIREMENT in requirements:
+        options[b'revlogv2'] = True
+
+    if b'generaldelta' in requirements:
+        options[b'generaldelta'] = True
+
+    # experimental config: format.chunkcachesize
+    chunkcachesize = ui.configint(b'format', b'chunkcachesize')
+    if chunkcachesize is not None:
+        options[b'chunkcachesize'] = chunkcachesize
+
+    deltabothparents = ui.configbool(b'storage',
+                                     b'revlog.optimize-delta-parent-choice')
+    options[b'deltabothparents'] = deltabothparents
+
+    options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
+
+    chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
+    if 0 <= chainspan:
+        options[b'maxdeltachainspan'] = chainspan
+
+    mmapindexthreshold = ui.configbytes(b'experimental',
+                                        b'mmapindexthreshold')
+    if mmapindexthreshold is not None:
+        options[b'mmapindexthreshold'] = mmapindexthreshold
+
+    withsparseread = ui.configbool(b'experimental', b'sparse-read')
+    srdensitythres = float(ui.config(b'experimental',
+                                     b'sparse-read.density-threshold'))
+    srmingapsize = ui.configbytes(b'experimental',
+                                  b'sparse-read.min-gap-size')
+    options[b'with-sparse-read'] = withsparseread
+    options[b'sparse-read-density-threshold'] = srdensitythres
+    options[b'sparse-read-min-gap-size'] = srmingapsize
+
+    sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
+    options[b'sparse-revlog'] = sparserevlog
+    if sparserevlog:
+        options[b'generaldelta'] = True
+
+    maxchainlen = None
+    if sparserevlog:
+        maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
+    # experimental config: format.maxchainlen
+    maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
+    if maxchainlen is not None:
+        options[b'maxchainlen'] = maxchainlen
+
+    for r in requirements:
+        if r.startswith(b'exp-compression-'):
+            options[b'compengine'] = r[len(b'exp-compression-'):]
+
+    if repository.NARROW_REQUIREMENT in requirements:
+        options[b'enableellipsis'] = True
+
+    return options
+
+def makemain(**kwargs):
+    """Produce a type conforming to ``ilocalrepositorymain``."""
+    return localrepository
+
+@interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
+class revlogfilestorage(object):
+    """File storage when using revlogs."""
+
+    def file(self, path):
+        if path[0] == b'/':
+            path = path[1:]
+
+        return filelog.filelog(self.svfs, path)
+
+@interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
+class revlognarrowfilestorage(object):
+    """File storage when using revlogs and narrow files."""
+
+    def file(self, path):
+        if path[0] == b'/':
+            path = path[1:]
+
+        return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
+
+def makefilestorage(requirements, features, **kwargs):
+    """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
+    features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
+    features.add(repository.REPO_FEATURE_STREAM_CLONE)
+
+    if repository.NARROW_REQUIREMENT in requirements:
+        return revlognarrowfilestorage
+    else:
+        return revlogfilestorage
+
+# List of repository interfaces and factory functions for them. Each
+# will be called in order during ``makelocalrepository()`` to iteratively
+# derive the final type for a local repository instance. We capture the
+# function as a lambda so we don't hold a reference and the module-level
+# functions can be wrapped.
+REPO_INTERFACES = [
+    (repository.ilocalrepositorymain, lambda: makemain),
+    (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
+]
+
+@interfaceutil.implementer(repository.ilocalrepositorymain)
 class localrepository(object):
+    """Main class for representing local repositories.
+
+    All local repositories are instances of this class.
+
+    Constructed on its own, instances of this class are not usable as
+    repository objects. To obtain a usable repository object, call
+    ``hg.repository()``, ``localrepo.instance()``, or
+    ``localrepo.makelocalrepository()``. The latter is the lowest-level.
+    ``instance()`` adds support for creating new repositories.
+    ``hg.repository()`` adds more extension integration, including calling
+    ``reposetup()``. Generally speaking, ``hg.repository()`` should be
+    used.
+    """
 
     # obsolete experimental requirements:
     #  - manifestv2: An experimental new manifest format that allowed
@@ -394,11 +848,7 @@
         'relshared',
         'dotencode',
         'exp-sparse',
-    }
-    openerreqs = {
-        'revlogv1',
-        'generaldelta',
-        'treemanifest',
+        'internal-phase'
     }
 
     # list of prefix for file which can be written without 'wlock'
@@ -421,32 +871,76 @@
         'bisect.state',
     }
 
-    def __init__(self, baseui, path, create=False, intents=None):
-        self.requirements = set()
+    def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
+                 supportedrequirements, sharedpath, store, cachevfs,
+                 features, intents=None):
+        """Create a new local repository instance.
+
+        Most callers should use ``hg.repository()``, ``localrepo.instance()``,
+        or ``localrepo.makelocalrepository()`` for obtaining a new repository
+        object.
+
+        Arguments:
+
+        baseui
+           ``ui.ui`` instance that ``ui`` argument was based off of.
+
+        ui
+           ``ui.ui`` instance for use by the repository.
+
+        origroot
+           ``bytes`` path to working directory root of this repository.
+
+        wdirvfs
+           ``vfs.vfs`` rooted at the working directory.
+
+        hgvfs
+           ``vfs.vfs`` rooted at .hg/
+
+        requirements
+           ``set`` of bytestrings representing repository opening requirements.
+
+        supportedrequirements
+           ``set`` of bytestrings representing repository requirements that we
+           know how to open. May be a supetset of ``requirements``.
+
+        sharedpath
+           ``bytes`` Defining path to storage base directory. Points to a
+           ``.hg/`` directory somewhere.
+
+        store
+           ``store.basicstore`` (or derived) instance providing access to
+           versioned storage.
+
+        cachevfs
+           ``vfs.vfs`` used for cache files.
+
+        features
+           ``set`` of bytestrings defining features/capabilities of this
+           instance.
+
+        intents
+           ``set`` of system strings indicating what this repo will be used
+           for.
+        """
+        self.baseui = baseui
+        self.ui = ui
+        self.origroot = origroot
+        # vfs rooted at working directory.
+        self.wvfs = wdirvfs
+        self.root = wdirvfs.base
+        # vfs rooted at .hg/. Used to access most non-store paths.
+        self.vfs = hgvfs
+        self.path = hgvfs.base
+        self.requirements = requirements
+        self.supported = supportedrequirements
+        self.sharedpath = sharedpath
+        self.store = store
+        self.cachevfs = cachevfs
+        self.features = features
+
         self.filtername = None
-        # wvfs: rooted at the repository root, used to access the working copy
-        self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
-        # vfs: rooted at .hg, used to access repo files outside of .hg/store
-        self.vfs = None
-        # svfs: usually rooted at .hg/store, used to access repository history
-        # If this is a shared repository, this vfs may point to another
-        # repository's .hg/store directory.
-        self.svfs = None
-        self.root = self.wvfs.base
-        self.path = self.wvfs.join(".hg")
-        self.origroot = path
-        # This is only used by context.workingctx.match in order to
-        # detect files in subrepos.
-        self.auditor = pathutil.pathauditor(
-            self.root, callback=self._checknested)
-        # This is only used by context.basectx.match in order to detect
-        # files in subrepos.
-        self.nofsauditor = pathutil.pathauditor(
-            self.root, callback=self._checknested, realfs=False, cached=True)
-        self.baseui = baseui
-        self.ui = baseui.copy()
-        self.ui.copy = baseui.copy # prevent copying repo configuration
-        self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
+
         if (self.ui.configbool('devel', 'all-warnings') or
             self.ui.configbool('devel', 'check-locks')):
             self.vfs.audit = self._getvfsward(self.vfs.audit)
@@ -454,98 +948,18 @@
         # Callback are in the form: func(repo, roots) --> processed root.
         # This list it to be filled by extension during repo setup
         self._phasedefaults = []
-        try:
-            self.ui.readconfig(self.vfs.join("hgrc"), self.root)
-            self._loadextensions()
-        except IOError:
-            pass
-
-        if featuresetupfuncs:
-            self.supported = set(self._basesupported) # use private copy
-            extmods = set(m.__name__ for n, m
-                          in extensions.extensions(self.ui))
-            for setupfunc in featuresetupfuncs:
-                if setupfunc.__module__ in extmods:
-                    setupfunc(self.ui, self.supported)
-        else:
-            self.supported = self._basesupported
+
         color.setup(self.ui)
 
-        # Add compression engines.
-        for name in util.compengines:
-            engine = util.compengines[name]
-            if engine.revlogheader():
-                self.supported.add('exp-compression-%s' % name)
-
-        if not self.vfs.isdir():
-            if create:
-                self.requirements = newreporequirements(self)
-
-                if not self.wvfs.exists():
-                    self.wvfs.makedirs()
-                self.vfs.makedir(notindexed=True)
-
-                if 'store' in self.requirements:
-                    self.vfs.mkdir("store")
-
-                    # create an invalid changelog
-                    self.vfs.append(
-                        "00changelog.i",
-                        '\0\0\0\2' # represents revlogv2
-                        ' dummy changelog to prevent using the old repo layout'
-                    )
-            else:
-                raise error.RepoError(_("repository %s not found") % path)
-        elif create:
-            raise error.RepoError(_("repository %s already exists") % path)
-        else:
-            try:
-                self.requirements = scmutil.readrequires(
-                        self.vfs, self.supported)
-            except IOError as inst:
-                if inst.errno != errno.ENOENT:
-                    raise
-
-        cachepath = self.vfs.join('cache')
-        self.sharedpath = self.path
-        try:
-            sharedpath = self.vfs.read("sharedpath").rstrip('\n')
-            if 'relshared' in self.requirements:
-                sharedpath = self.vfs.join(sharedpath)
-            vfs = vfsmod.vfs(sharedpath, realpath=True)
-            cachepath = vfs.join('cache')
-            s = vfs.base
-            if not vfs.exists():
-                raise error.RepoError(
-                    _('.hg/sharedpath points to nonexistent directory %s') % s)
-            self.sharedpath = s
-        except IOError as inst:
-            if inst.errno != errno.ENOENT:
-                raise
-
-        if 'exp-sparse' in self.requirements and not sparse.enabled:
-            raise error.RepoError(_('repository is using sparse feature but '
-                                    'sparse is not enabled; enable the '
-                                    '"sparse" extensions to access'))
-
-        self.store = store.store(
-            self.requirements, self.sharedpath,
-            lambda base: vfsmod.vfs(base, cacheaudited=True))
         self.spath = self.store.path
         self.svfs = self.store.vfs
         self.sjoin = self.store.join
-        self.vfs.createmode = self.store.createmode
-        self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
-        self.cachevfs.createmode = self.store.createmode
         if (self.ui.configbool('devel', 'all-warnings') or
             self.ui.configbool('devel', 'check-locks')):
             if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
                 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
             else: # standard vfs
                 self.svfs.audit = self._getsvfsward(self.svfs.audit)
-        self._applyopenerreqs()
-        if create:
-            self._writerequirements()
 
         self._dirstatevalidatewarned = False
 
@@ -638,9 +1052,6 @@
     def close(self):
         self._writecaches()
 
-    def _loadextensions(self):
-        extensions.loadall(self.ui)
-
     def _writecaches(self):
         if self._revbranchcache:
             self._revbranchcache.write()
@@ -653,56 +1064,25 @@
             caps.add('bundle2=' + urlreq.quote(capsblob))
         return caps
 
-    def _applyopenerreqs(self):
-        self.svfs.options = dict((r, 1) for r in self.requirements
-                                           if r in self.openerreqs)
-        # experimental config: format.chunkcachesize
-        chunkcachesize = self.ui.configint('format', 'chunkcachesize')
-        if chunkcachesize is not None:
-            self.svfs.options['chunkcachesize'] = chunkcachesize
-        # experimental config: format.maxchainlen
-        maxchainlen = self.ui.configint('format', 'maxchainlen')
-        if maxchainlen is not None:
-            self.svfs.options['maxchainlen'] = maxchainlen
-        # experimental config: format.manifestcachesize
-        manifestcachesize = self.ui.configint('format', 'manifestcachesize')
-        if manifestcachesize is not None:
-            self.svfs.options['manifestcachesize'] = manifestcachesize
-        deltabothparents = self.ui.configbool('storage',
-            'revlog.optimize-delta-parent-choice')
-        self.svfs.options['deltabothparents'] = deltabothparents
-        self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
-        chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
-        if 0 <= chainspan:
-            self.svfs.options['maxdeltachainspan'] = chainspan
-        mmapindexthreshold = self.ui.configbytes('experimental',
-                                                 'mmapindexthreshold')
-        if mmapindexthreshold is not None:
-            self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
-        withsparseread = self.ui.configbool('experimental', 'sparse-read')
-        srdensitythres = float(self.ui.config('experimental',
-                                              'sparse-read.density-threshold'))
-        srmingapsize = self.ui.configbytes('experimental',
-                                           'sparse-read.min-gap-size')
-        self.svfs.options['with-sparse-read'] = withsparseread
-        self.svfs.options['sparse-read-density-threshold'] = srdensitythres
-        self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
-        sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
-        self.svfs.options['sparse-revlog'] = sparserevlog
-        if sparserevlog:
-            self.svfs.options['generaldelta'] = True
-
-        for r in self.requirements:
-            if r.startswith('exp-compression-'):
-                self.svfs.options['compengine'] = r[len('exp-compression-'):]
-
-        # TODO move "revlogv2" to openerreqs once finalized.
-        if REVLOGV2_REQUIREMENT in self.requirements:
-            self.svfs.options['revlogv2'] = True
-
     def _writerequirements(self):
         scmutil.writerequires(self.vfs, self.requirements)
 
+    # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
+    # self -> auditor -> self._checknested -> self
+
+    @property
+    def auditor(self):
+        # This is only used by context.workingctx.match in order to
+        # detect files in subrepos.
+        return pathutil.pathauditor(self.root, callback=self._checknested)
+
+    @property
+    def nofsauditor(self):
+        # This is only used by context.basectx.match in order to detect
+        # files in subrepos.
+        return pathutil.pathauditor(self.root, callback=self._checknested,
+                                    realfs=False, cached=True)
+
     def _checknested(self, path):
         """Determine if path is a legal nested repository."""
         if not path.startswith(self.root):
@@ -779,15 +1159,10 @@
         return changelog.changelog(self.svfs,
                                    trypending=txnutil.mayhavepending(self.root))
 
-    def _constructmanifest(self):
-        # This is a temporary function while we migrate from manifest to
-        # manifestlog. It allows bundlerepo and unionrepo to intercept the
-        # manifest creation.
-        return manifest.manifestrevlog(self.svfs)
-
     @storecache('00manifest.i')
     def manifestlog(self):
-        return manifest.manifestlog(self.svfs, self)
+        rootstore = manifest.manifestrevlog(self.svfs)
+        return manifest.manifestlog(self.svfs, self, rootstore)
 
     @repofilecache('dirstate')
     def dirstate(self):
@@ -811,35 +1186,42 @@
                                " working parent %s!\n") % short(node))
             return nullid
 
-    @repofilecache(narrowspec.FILENAME)
+    @storecache(narrowspec.FILENAME)
     def narrowpats(self):
         """matcher patterns for this repository's narrowspec
 
         A tuple of (includes, excludes).
         """
-        source = self
-        if self.shared():
-            from . import hg
-            source = hg.sharedreposource(self)
-        return narrowspec.load(source)
-
-    @repofilecache(narrowspec.FILENAME)
+        return narrowspec.load(self)
+
+    @storecache(narrowspec.FILENAME)
     def _narrowmatch(self):
-        if changegroup.NARROW_REQUIREMENT not in self.requirements:
+        if repository.NARROW_REQUIREMENT not in self.requirements:
             return matchmod.always(self.root, '')
         include, exclude = self.narrowpats
         return narrowspec.match(self.root, include=include, exclude=exclude)
 
-    # TODO(martinvonz): make this property-like instead?
-    def narrowmatch(self):
+    def narrowmatch(self, match=None, includeexact=False):
+        """matcher corresponding the the repo's narrowspec
+
+        If `match` is given, then that will be intersected with the narrow
+        matcher.
+
+        If `includeexact` is True, then any exact matches from `match` will
+        be included even if they're outside the narrowspec.
+        """
+        if match:
+            if includeexact and not self._narrowmatch.always():
+                # do not exclude explicitly-specified paths so that they can
+                # be warned later on
+                em = matchmod.exact(match._root, match._cwd, match.files())
+                nm = matchmod.unionmatcher([self._narrowmatch, em])
+                return matchmod.intersectmatchers(match, nm)
+            return matchmod.intersectmatchers(match, self._narrowmatch)
         return self._narrowmatch
 
     def setnarrowpats(self, newincludes, newexcludes):
-        target = self
-        if self.shared():
-            from . import hg
-            target = hg.sharedreposource(self)
-        narrowspec.save(target, newincludes, newexcludes)
+        narrowspec.save(self, newincludes, newexcludes)
         self.invalidate(clearfilecache=True)
 
     def __getitem__(self, changeid):
@@ -849,18 +1231,67 @@
             return changeid
         if isinstance(changeid, slice):
             # wdirrev isn't contiguous so the slice shouldn't include it
-            return [context.changectx(self, i)
-                    for i in xrange(*changeid.indices(len(self)))
+            return [self[i]
+                    for i in pycompat.xrange(*changeid.indices(len(self)))
                     if i not in self.changelog.filteredrevs]
         try:
-            return context.changectx(self, changeid)
+            if isinstance(changeid, int):
+                node = self.changelog.node(changeid)
+                rev = changeid
+            elif changeid == 'null':
+                node = nullid
+                rev = nullrev
+            elif changeid == 'tip':
+                node = self.changelog.tip()
+                rev = self.changelog.rev(node)
+            elif changeid == '.':
+                # this is a hack to delay/avoid loading obsmarkers
+                # when we know that '.' won't be hidden
+                node = self.dirstate.p1()
+                rev = self.unfiltered().changelog.rev(node)
+            elif len(changeid) == 20:
+                try:
+                    node = changeid
+                    rev = self.changelog.rev(changeid)
+                except error.FilteredLookupError:
+                    changeid = hex(changeid) # for the error message
+                    raise
+                except LookupError:
+                    # check if it might have come from damaged dirstate
+                    #
+                    # XXX we could avoid the unfiltered if we had a recognizable
+                    # exception for filtered changeset access
+                    if (self.local()
+                        and changeid in self.unfiltered().dirstate.parents()):
+                        msg = _("working directory has unknown parent '%s'!")
+                        raise error.Abort(msg % short(changeid))
+                    changeid = hex(changeid) # for the error message
+                    raise
+
+            elif len(changeid) == 40:
+                node = bin(changeid)
+                rev = self.changelog.rev(node)
+            else:
+                raise error.ProgrammingError(
+                        "unsupported changeid '%s' of type %s" %
+                        (changeid, type(changeid)))
+
+            return context.changectx(self, rev, node)
+
+        except (error.FilteredIndexError, error.FilteredLookupError):
+            raise error.FilteredRepoLookupError(_("filtered revision '%s'")
+                                                % pycompat.bytestr(changeid))
+        except (IndexError, LookupError):
+            raise error.RepoLookupError(
+                _("unknown revision '%s'") % pycompat.bytestr(changeid))
         except error.WdirUnsupported:
             return context.workingctx(self)
 
     def __contains__(self, changeid):
         """True if the given changeid exists
 
-        error.LookupError is raised if an ambiguous node specified.
+        error.AmbiguousPrefixLookupError is raised if an ambiguous node
+        specified.
         """
         try:
             self[changeid]
@@ -1122,11 +1553,6 @@
     def wjoin(self, f, *insidef):
         return self.vfs.reljoin(self.root, f, *insidef)
 
-    def file(self, f):
-        if f[0] == '/':
-            f = f[1:]
-        return filelog.filelog(self.svfs, f)
-
     def setparents(self, p1, p2=nullid):
         with self.dirstate.parentchange():
             copies = self.dirstate.setparents(p1, p2)
@@ -1263,7 +1689,7 @@
             rp = report
         else:
             rp = self.ui.warn
-        vfsmap = {'plain': self.vfs} # root of .hg/
+        vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
         # we must avoid cyclic reference between repo and transaction.
         reporef = weakref.ref(self)
         # Code to track tag movement
@@ -1372,6 +1798,7 @@
             else:
                 # discard all changes (including ones already written
                 # out) in this transaction
+                narrowspec.restorebackup(self, 'journal.narrowspec')
                 repo.dirstate.restorebackup(None, 'journal.dirstate')
 
                 repo.invalidate(clearfilecache=True)
@@ -1385,7 +1812,7 @@
                                      releasefn=releasefn,
                                      checkambigfiles=_cachedfiles,
                                      name=desc)
-        tr.changes['revs'] = xrange(0, 0)
+        tr.changes['origrepolen'] = len(self)
         tr.changes['obsmarkers'] = set()
         tr.changes['phases'] = {}
         tr.changes['bookmarks'] = {}
@@ -1460,6 +1887,7 @@
     @unfilteredmethod
     def _writejournal(self, desc):
         self.dirstate.savebackup(None, 'journal.dirstate')
+        narrowspec.savebackup(self, 'journal.narrowspec')
         self.vfs.write("journal.branch",
                           encoding.fromlocal(self.dirstate.branch()))
         self.vfs.write("journal.desc",
@@ -1547,6 +1975,7 @@
             # prevent dirstateguard from overwriting already restored one
             dsguard.close()
 
+            narrowspec.restorebackup(self, 'undo.narrowspec')
             self.dirstate.restorebackup(None, 'undo.dirstate')
             try:
                 branch = self.vfs.read('undo.branch')
@@ -1601,7 +2030,7 @@
             # later call to `destroyed` will refresh them.
             return
 
-        if tr is None or tr.changes['revs']:
+        if tr is None or tr.changes['origrepolen'] < len(self):
             # updating the unfiltered branchmap should refresh all the others,
             self.ui.debug('updating the branch cache\n')
             branchmap.updatecache(self.filtered('served'))
@@ -1612,11 +2041,15 @@
                 rbc.branchinfo(r)
             rbc.write()
 
+            # ensure the working copy parents are in the manifestfulltextcache
+            for ctx in self['.'].parents():
+                ctx.manifest()  # accessing the manifest is enough
+
     def invalidatecaches(self):
 
-        if '_tagscache' in vars(self):
+        if r'_tagscache' in vars(self):
             # can't use delattr on proxy
-            del self.__dict__['_tagscache']
+            del self.__dict__[r'_tagscache']
 
         self.unfiltered()._branchcaches.clear()
         self.invalidatevolatilesets()
@@ -1635,13 +2068,13 @@
         rereads the dirstate. Use dirstate.invalidate() if you want to
         explicitly read the dirstate again (i.e. restoring it to a previous
         known good state).'''
-        if hasunfilteredcache(self, 'dirstate'):
+        if hasunfilteredcache(self, r'dirstate'):
             for k in self.dirstate._filecache:
                 try:
                     delattr(self.dirstate, k)
                 except AttributeError:
                     pass
-            delattr(self.unfiltered(), 'dirstate')
+            delattr(self.unfiltered(), r'dirstate')
 
     def invalidate(self, clearfilecache=False):
         '''Invalidates both store and non-store parts other than dirstate
@@ -2026,6 +2459,11 @@
     def commitctx(self, ctx, error=False):
         """Add a new revision to current repository.
         Revision information is passed via the context argument.
+
+        ctx.files() should list all files involved in this commit, i.e.
+        modified/added/removed files. On merge, it may be wider than the
+        ctx.files() to be committed, since any file nodes derived directly
+        from p1 or p2 are excluded from the committed ctx.files().
         """
 
         tr = None
@@ -2039,6 +2477,7 @@
 
             if ctx.manifestnode():
                 # reuse an existing manifest revision
+                self.ui.debug('reusing known manifest\n')
                 mn = ctx.manifestnode()
                 files = ctx.files()
             elif ctx.files():
@@ -2077,16 +2516,38 @@
                         raise
 
                 # update manifest
-                self.ui.note(_("committing manifest\n"))
                 removed = [f for f in sorted(removed) if f in m1 or f in m2]
                 drop = [f for f in removed if f in m]
                 for f in drop:
                     del m[f]
-                mn = mctx.write(trp, linkrev,
-                                p1.manifestnode(), p2.manifestnode(),
-                                added, drop)
                 files = changed + removed
+                md = None
+                if not files:
+                    # if no "files" actually changed in terms of the changelog,
+                    # try hard to detect unmodified manifest entry so that the
+                    # exact same commit can be reproduced later on convert.
+                    md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
+                if not files and md:
+                    self.ui.debug('not reusing manifest (no file change in '
+                                  'changelog, but manifest differs)\n')
+                if files or md:
+                    self.ui.note(_("committing manifest\n"))
+                    # we're using narrowmatch here since it's already applied at
+                    # other stages (such as dirstate.walk), so we're already
+                    # ignoring things outside of narrowspec in most cases. The
+                    # one case where we might have files outside the narrowspec
+                    # at this point is merges, and we already error out in the
+                    # case where the merge has files outside of the narrowspec,
+                    # so this is safe.
+                    mn = mctx.write(trp, linkrev,
+                                    p1.manifestnode(), p2.manifestnode(),
+                                    added, drop, match=self.narrowmatch())
+                else:
+                    self.ui.debug('reusing manifest form p1 (listed files '
+                                  'actually unchanged)\n')
+                    mn = p1.manifestnode()
             else:
+                self.ui.debug('reusing manifest from p1 (no file change)\n')
                 mn = p1.manifestnode()
                 files = []
 
@@ -2345,20 +2806,55 @@
     assert name.startswith('journal')
     return os.path.join(base, name.replace('journal', 'undo', 1))
 
-def instance(ui, path, create, intents=None):
-    return localrepository(ui, util.urllocalpath(path), create,
-                           intents=intents)
+def instance(ui, path, create, intents=None, createopts=None):
+    localpath = util.urllocalpath(path)
+    if create:
+        createrepository(ui, localpath, createopts=createopts)
+
+    return makelocalrepository(ui, localpath, intents=intents)
 
 def islocal(path):
     return True
 
-def newreporequirements(repo):
+def defaultcreateopts(ui, createopts=None):
+    """Populate the default creation options for a repository.
+
+    A dictionary of explicitly requested creation options can be passed
+    in. Missing keys will be populated.
+    """
+    createopts = dict(createopts or {})
+
+    if 'backend' not in createopts:
+        # experimental config: storage.new-repo-backend
+        createopts['backend'] = ui.config('storage', 'new-repo-backend')
+
+    return createopts
+
+def newreporequirements(ui, createopts):
     """Determine the set of requirements for a new local repository.
 
     Extensions can wrap this function to specify custom requirements for
     new repositories.
     """
-    ui = repo.ui
+    # If the repo is being created from a shared repository, we copy
+    # its requirements.
+    if 'sharedrepo' in createopts:
+        requirements = set(createopts['sharedrepo'].requirements)
+        if createopts.get('sharedrelative'):
+            requirements.add('relshared')
+        else:
+            requirements.add('shared')
+
+        return requirements
+
+    if 'backend' not in createopts:
+        raise error.ProgrammingError('backend key not present in createopts; '
+                                     'was defaultcreateopts() called?')
+
+    if createopts['backend'] != 'revlogv1':
+        raise error.Abort(_('unable to determine repository requirements for '
+                            'storage backend: %s') % createopts['backend'])
+
     requirements = {'revlogv1'}
     if ui.configbool('format', 'usestore'):
         requirements.add('store')
@@ -2393,5 +2889,156 @@
         # generaldelta is implied by revlogv2.
         requirements.discard('generaldelta')
         requirements.add(REVLOGV2_REQUIREMENT)
+    # experimental config: format.internal-phase
+    if ui.configbool('format', 'internal-phase'):
+        requirements.add('internal-phase')
+
+    if createopts.get('narrowfiles'):
+        requirements.add(repository.NARROW_REQUIREMENT)
+
+    if createopts.get('lfs'):
+        requirements.add('lfs')
 
     return requirements
+
+def filterknowncreateopts(ui, createopts):
+    """Filters a dict of repo creation options against options that are known.
+
+    Receives a dict of repo creation options and returns a dict of those
+    options that we don't know how to handle.
+
+    This function is called as part of repository creation. If the
+    returned dict contains any items, repository creation will not
+    be allowed, as it means there was a request to create a repository
+    with options not recognized by loaded code.
+
+    Extensions can wrap this function to filter out creation options
+    they know how to handle.
+    """
+    known = {
+        'backend',
+        'lfs',
+        'narrowfiles',
+        'sharedrepo',
+        'sharedrelative',
+        'shareditems',
+        'shallowfilestore',
+    }
+
+    return {k: v for k, v in createopts.items() if k not in known}
+
+def createrepository(ui, path, createopts=None):
+    """Create a new repository in a vfs.
+
+    ``path`` path to the new repo's working directory.
+    ``createopts`` options for the new repository.
+
+    The following keys for ``createopts`` are recognized:
+
+    backend
+       The storage backend to use.
+    lfs
+       Repository will be created with ``lfs`` requirement. The lfs extension
+       will automatically be loaded when the repository is accessed.
+    narrowfiles
+       Set up repository to support narrow file storage.
+    sharedrepo
+       Repository object from which storage should be shared.
+    sharedrelative
+       Boolean indicating if the path to the shared repo should be
+       stored as relative. By default, the pointer to the "parent" repo
+       is stored as an absolute path.
+    shareditems
+       Set of items to share to the new repository (in addition to storage).
+    shallowfilestore
+       Indicates that storage for files should be shallow (not all ancestor
+       revisions are known).
+    """
+    createopts = defaultcreateopts(ui, createopts=createopts)
+
+    unknownopts = filterknowncreateopts(ui, createopts)
+
+    if not isinstance(unknownopts, dict):
+        raise error.ProgrammingError('filterknowncreateopts() did not return '
+                                     'a dict')
+
+    if unknownopts:
+        raise error.Abort(_('unable to create repository because of unknown '
+                            'creation option: %s') %
+                          ', '.join(sorted(unknownopts)),
+                          hint=_('is a required extension not loaded?'))
+
+    requirements = newreporequirements(ui, createopts=createopts)
+
+    wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
+
+    hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
+    if hgvfs.exists():
+        raise error.RepoError(_('repository %s already exists') % path)
+
+    if 'sharedrepo' in createopts:
+        sharedpath = createopts['sharedrepo'].sharedpath
+
+        if createopts.get('sharedrelative'):
+            try:
+                sharedpath = os.path.relpath(sharedpath, hgvfs.base)
+            except (IOError, ValueError) as e:
+                # ValueError is raised on Windows if the drive letters differ
+                # on each path.
+                raise error.Abort(_('cannot calculate relative path'),
+                                  hint=stringutil.forcebytestr(e))
+
+    if not wdirvfs.exists():
+        wdirvfs.makedirs()
+
+    hgvfs.makedir(notindexed=True)
+
+    if b'store' in requirements and 'sharedrepo' not in createopts:
+        hgvfs.mkdir(b'store')
+
+        # We create an invalid changelog outside the store so very old
+        # Mercurial versions (which didn't know about the requirements
+        # file) encounter an error on reading the changelog. This
+        # effectively locks out old clients and prevents them from
+        # mucking with a repo in an unknown format.
+        #
+        # The revlog header has version 2, which won't be recognized by
+        # such old clients.
+        hgvfs.append(b'00changelog.i',
+                     b'\0\0\0\2 dummy changelog to prevent using the old repo '
+                     b'layout')
+
+    scmutil.writerequires(hgvfs, requirements)
+
+    # Write out file telling readers where to find the shared store.
+    if 'sharedrepo' in createopts:
+        hgvfs.write(b'sharedpath', sharedpath)
+
+    if createopts.get('shareditems'):
+        shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
+        hgvfs.write(b'shared', shared)
+
+def poisonrepository(repo):
+    """Poison a repository instance so it can no longer be used."""
+    # Perform any cleanup on the instance.
+    repo.close()
+
+    # Our strategy is to replace the type of the object with one that
+    # has all attribute lookups result in error.
+    #
+    # But we have to allow the close() method because some constructors
+    # of repos call close() on repo references.
+    class poisonedrepository(object):
+        def __getattribute__(self, item):
+            if item == r'close':
+                return object.__getattribute__(self, item)
+
+            raise error.ProgrammingError('repo instances should not be used '
+                                         'after unshare')
+
+        def close(self):
+            pass
+
+    # We may have a repoview, which intercepts __setattr__. So be sure
+    # we operate at the lowest level possible.
+    object.__setattr__(repo, r'__class__', poisonedrepository)
--- a/mercurial/logcmdutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/logcmdutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -13,6 +13,8 @@
 from .i18n import _
 from .node import (
     nullid,
+    wdirid,
+    wdirrev,
 )
 
 from . import (
@@ -191,7 +193,6 @@
     def _show(self, ctx, copies, props):
         '''show a single changeset or file revision'''
         changenode = ctx.node()
-        rev = ctx.rev()
 
         if self.ui.quiet:
             self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
@@ -226,9 +227,13 @@
             self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
                           label=label)
 
-        if self.ui.debugflag and rev is not None:
+        if self.ui.debugflag:
             mnode = ctx.manifestnode()
-            mrev = self.repo.manifestlog.rev(mnode)
+            if mnode is None:
+                mnode = wdirid
+                mrev = wdirrev
+            else:
+                mrev = self.repo.manifestlog.rev(mnode)
             self.ui.write(columns['manifest']
                           % scmutil.formatrevnode(self.ui, mrev, mnode),
                           label='ui.debug log.manifest')
@@ -325,15 +330,9 @@
         '''show a single changeset or file revision'''
         fm = self._fm
         fm.startitem()
-
-        # TODO: maybe this should be wdirrev/wdirnode?
-        rev = ctx.rev()
-        if rev is None:
-            hexnode = None
-        else:
-            hexnode = fm.hexfunc(ctx.node())
-        fm.data(rev=rev,
-                node=hexnode)
+        fm.context(ctx=ctx)
+        fm.data(rev=scmutil.intrev(ctx),
+                node=fm.hexfunc(scmutil.binnode(ctx)))
 
         if self.ui.quiet:
             return
@@ -349,11 +348,7 @@
                                        for c in ctx.parents()], name='node'))
 
         if self.ui.debugflag:
-            if rev is None:
-                hexnode = None
-            else:
-                hexnode = fm.hexfunc(ctx.manifestnode())
-            fm.data(manifest=hexnode,
+            fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid),
                     extra=fm.formatdict(ctx.extra()))
 
             files = ctx.p1().status(ctx)
@@ -465,6 +460,8 @@
                 self.footer = self.t.render(self._parts['footer'], props)
 
 def templatespec(tmpl, mapfile):
+    if pycompat.ispy3:
+        assert not isinstance(tmpl, str), 'tmpl must not be a str'
     if mapfile:
         return formatter.templatespec('changeset', tmpl, mapfile)
     else:
--- a/mercurial/lsprof.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/lsprof.py	Mon Oct 22 14:46:06 2018 -0400
@@ -48,7 +48,7 @@
         d = self.data
         if top is not None:
             d = d[:top]
-        cols = "% 12s %12s %11.4f %11.4f   %s\n"
+        cols = "% 12d %12d %11.4f %11.4f   %s\n"
         hcols = "% 12s %12s %12s %12s %s\n"
         file.write(hcols % ("CallCount", "Recursive", "Total(s)",
                             "Inline(s)", "module:lineno(function)"))
@@ -91,6 +91,8 @@
 
 def label(code):
     if isinstance(code, str):
+        if sys.version_info.major >= 3:
+            code = code.encode('latin-1')
         return code
     try:
         mname = _fn2mod[code.co_filename]
@@ -104,10 +106,14 @@
                 mname = _fn2mod[code.co_filename] = k
                 break
         else:
-            mname = _fn2mod[code.co_filename] = '<%s>' % code.co_filename
+            mname = _fn2mod[code.co_filename] = r'<%s>' % code.co_filename
+
+    res = r'%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
 
-    return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
+    if sys.version_info.major >= 3:
+        res = res.encode('latin-1')
 
+    return res
 
 if __name__ == '__main__':
     import os
--- a/mercurial/lsprofcalltree.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/lsprofcalltree.py	Mon Oct 22 14:46:06 2018 -0400
@@ -10,14 +10,19 @@
 of the GNU General Public License, incorporated herein by reference.
 """
 
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import
+
+from . import (
+    pycompat,
+)
 
 def label(code):
     if isinstance(code, str):
-        return '~' + code    # built-in functions ('~' sorts at the end)
+        # built-in functions ('~' sorts at the end)
+        return '~' + pycompat.sysbytes(code)
     else:
-        return '%s %s:%d' % (code.co_name,
-                             code.co_filename,
+        return '%s %s:%d' % (pycompat.sysbytes(code.co_name),
+                             pycompat.sysbytes(code.co_filename),
                              code.co_firstlineno)
 
 class KCacheGrind(object):
@@ -27,7 +32,7 @@
 
     def output(self, out_file):
         self.out_file = out_file
-        print('events: Ticks', file=out_file)
+        out_file.write(b'events: Ticks\n')
         self._print_summary()
         for entry in self.data:
             self._entry(entry)
@@ -37,23 +42,24 @@
         for entry in self.data:
             totaltime = int(entry.totaltime * 1000)
             max_cost = max(max_cost, totaltime)
-        print('summary: %d' % max_cost, file=self.out_file)
+        self.out_file.write(b'summary: %d\n' % max_cost)
 
     def _entry(self, entry):
         out_file = self.out_file
 
         code = entry.code
         if isinstance(code, str):
-            print('fi=~', file=out_file)
+            out_file.write(b'fi=~\n')
         else:
-            print('fi=%s' % code.co_filename, file=out_file)
-        print('fn=%s' % label(code), file=out_file)
+            out_file.write(b'fi=%s\n' % pycompat.sysbytes(code.co_filename))
+
+        out_file.write(b'fn=%s\n' % label(code))
 
         inlinetime = int(entry.inlinetime * 1000)
         if isinstance(code, str):
-            print('0 ', inlinetime, file=out_file)
+            out_file.write(b'0 %d\n' % inlinetime)
         else:
-            print('%d %d' % (code.co_firstlineno, inlinetime), file=out_file)
+            out_file.write(b'%d %d\n' % (code.co_firstlineno, inlinetime))
 
         # recursive calls are counted in entry.calls
         if entry.calls:
@@ -68,19 +74,20 @@
 
         for subentry in calls:
             self._subentry(lineno, subentry)
-        print(file=out_file)
+
+        out_file.write(b'\n')
 
     def _subentry(self, lineno, subentry):
         out_file = self.out_file
         code = subentry.code
-        print('cfn=%s' % label(code), file=out_file)
+        out_file.write(b'cfn=%s\n' % label(code))
         if isinstance(code, str):
-            print('cfi=~', file=out_file)
-            print('calls=%d 0' % subentry.callcount, file=out_file)
+            out_file.write(b'cfi=~\n')
+            out_file.write(b'calls=%d 0\n' % subentry.callcount)
         else:
-            print('cfi=%s' % code.co_filename, file=out_file)
-            print('calls=%d %d' % (
-                subentry.callcount, code.co_firstlineno), file=out_file)
+            out_file.write(b'cfi=%s\n' % pycompat.sysbytes(code.co_filename))
+            out_file.write(b'calls=%d %d\n' % (
+                subentry.callcount, code.co_firstlineno))
 
         totaltime = int(subentry.totaltime * 1000)
-        print('%d %d' % (lineno, totaltime), file=out_file)
+        out_file.write(b'%d %d\n' % (lineno, totaltime))
--- a/mercurial/mail.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/mail.py	Mon Oct 22 14:46:06 2018 -0400
@@ -73,15 +73,24 @@
 
     def _get_socket(self, host, port, timeout):
         if self.debuglevel > 0:
-            self._ui.debug('connect: %r\n' % (host, port))
+            self._ui.debug('connect: %r\n' % ((host, port),))
         new_socket = socket.create_connection((host, port), timeout)
         new_socket = sslutil.wrapsocket(new_socket,
                                         self.keyfile, self.certfile,
                                         ui=self._ui,
                                         serverhostname=self._host)
-        self.file = smtplib.SSLFakeFile(new_socket)
+        self.file = new_socket.makefile(r'rb')
         return new_socket
 
+def _pyhastls():
+    """Returns true iff Python has TLS support, false otherwise."""
+    try:
+        import ssl
+        getattr(ssl, 'HAS_TLS', False)
+        return True
+    except ImportError:
+        return False
+
 def _smtp(ui):
     '''build an smtp connection and return a function to send mail'''
     local_hostname = ui.config('smtp', 'local_hostname')
@@ -89,7 +98,7 @@
     # backward compatible: when tls = true, we use starttls.
     starttls = tls == 'starttls' or stringutil.parsebool(tls)
     smtps = tls == 'smtps'
-    if (starttls or smtps) and not util.safehasattr(socket, 'ssl'):
+    if (starttls or smtps) and not _pyhastls():
         raise error.Abort(_("can't use TLS: Python SSL support not installed"))
     mailhost = ui.config('smtp', 'host')
     if not mailhost:
@@ -143,8 +152,9 @@
 def _sendmail(ui, sender, recipients, msg):
     '''send mail using sendmail.'''
     program = ui.config('email', 'method')
-    cmdline = '%s -f %s %s' % (program, stringutil.email(sender),
-                               ' '.join(map(stringutil.email, recipients)))
+    stremail = lambda x: stringutil.email(encoding.strtolocal(x))
+    cmdline = '%s -f %s %s' % (program, stremail(sender),
+                               ' '.join(map(stremail, recipients)))
     ui.note(_('sending mail: %s\n') % cmdline)
     fp = procutil.popen(cmdline, 'wb')
     fp.write(util.tonativeeol(msg))
@@ -160,7 +170,8 @@
     # Should be time.asctime(), but Windows prints 2-characters day
     # of month instead of one. Make them print the same thing.
     date = time.strftime(r'%a %b %d %H:%M:%S %Y', time.localtime())
-    fp.write('From %s %s\n' % (sender, date))
+    fp.write('From %s %s\n' % (encoding.strtolocal(sender),
+                               encoding.strtolocal(date)))
     fp.write(msg)
     fp.write('\n\n')
     fp.close()
@@ -209,7 +220,7 @@
 
     cs = ['us-ascii', 'utf-8', encoding.encoding, encoding.fallbackencoding]
     if display:
-        return mimetextqp(s, subtype, 'us-ascii')
+        cs = ['us-ascii']
     for charset in cs:
         try:
             s.decode(pycompat.sysstr(charset))
@@ -252,10 +263,27 @@
     order. Tries both encoding and fallbackencoding for input. Only as
     last resort send as is in fake ascii.
     Caveat: Do not use for mail parts containing patches!'''
+    sendcharsets = charsets or _charsets(ui)
+    if not isinstance(s, bytes):
+        # We have unicode data, which we need to try and encode to
+        # some reasonable-ish encoding. Try the encodings the user
+        # wants, and fall back to garbage-in-ascii.
+        for ocs in sendcharsets:
+            try:
+                return s.encode(pycompat.sysstr(ocs)), ocs
+            except UnicodeEncodeError:
+                pass
+            except LookupError:
+                ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
+        else:
+            # Everything failed, ascii-armor what we've got and send it.
+            return s.encode('ascii', 'backslashreplace')
+    # We have a bytes of unknown encoding. We'll try and guess a valid
+    # encoding, falling back to pretending we had ascii even though we
+    # know that's wrong.
     try:
         s.decode('ascii')
     except UnicodeDecodeError:
-        sendcharsets = charsets or _charsets(ui)
         for ics in (encoding.encoding, encoding.fallbackencoding):
             try:
                 u = s.decode(ics)
@@ -263,7 +291,7 @@
                 continue
             for ocs in sendcharsets:
                 try:
-                    return u.encode(ocs), ocs
+                    return u.encode(pycompat.sysstr(ocs)), ocs
                 except UnicodeEncodeError:
                     pass
                 except LookupError:
@@ -280,40 +308,46 @@
     return s
 
 def _addressencode(ui, name, addr, charsets=None):
+    assert isinstance(addr, bytes)
     name = headencode(ui, name, charsets)
     try:
         acc, dom = addr.split('@')
-        acc = acc.encode('ascii')
-        dom = dom.decode(encoding.encoding).encode('idna')
+        acc.decode('ascii')
+        dom = dom.decode(pycompat.sysstr(encoding.encoding)).encode('idna')
         addr = '%s@%s' % (acc, dom)
     except UnicodeDecodeError:
         raise error.Abort(_('invalid email address: %s') % addr)
     except ValueError:
         try:
             # too strict?
-            addr = addr.encode('ascii')
+            addr.decode('ascii')
         except UnicodeDecodeError:
             raise error.Abort(_('invalid local address: %s') % addr)
-    return email.utils.formataddr((name, addr))
+    return pycompat.bytesurl(
+        email.utils.formataddr((name, encoding.strfromlocal(addr))))
 
 def addressencode(ui, address, charsets=None, display=False):
     '''Turns address into RFC-2047 compliant header.'''
     if display or not address:
         return address or ''
-    name, addr = email.utils.parseaddr(address)
-    return _addressencode(ui, name, addr, charsets)
+    name, addr = email.utils.parseaddr(encoding.strfromlocal(address))
+    return _addressencode(ui, name, encoding.strtolocal(addr), charsets)
 
 def addrlistencode(ui, addrs, charsets=None, display=False):
     '''Turns a list of addresses into a list of RFC-2047 compliant headers.
     A single element of input list may contain multiple addresses, but output
     always has one address per item'''
+    for a in addrs:
+        assert isinstance(a, bytes), (r'%r unexpectedly not a bytestr' % a)
     if display:
         return [a.strip() for a in addrs if a.strip()]
 
     result = []
-    for name, addr in email.utils.getaddresses(addrs):
+    for name, addr in email.utils.getaddresses(
+            [encoding.strfromlocal(a) for a in addrs]):
         if name or addr:
-            result.append(_addressencode(ui, name, addr, charsets))
+            r = _addressencode(ui, name, encoding.strtolocal(addr), charsets)
+            result.append(r)
     return result
 
 def mimeencode(ui, s, charsets=None, display=False):
--- a/mercurial/manifest.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/manifest.py	Mon Oct 22 14:46:06 2018 -0400
@@ -10,11 +10,14 @@
 import heapq
 import itertools
 import struct
+import weakref
 
 from .i18n import _
 from .node import (
     bin,
     hex,
+    nullid,
+    nullrev,
 )
 from . import (
     error,
@@ -54,12 +57,11 @@
 def _text(it):
     files = []
     lines = []
-    _hex = revlog.hex
     for f, n, fl in it:
         files.append(f)
         # if this is changed to support newlines in filenames,
         # be sure to check the templates/ dir again (especially *-raw.tmpl)
-        lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
+        lines.append("%s\0%s%s\n" % (f, hex(n), fl))
 
     _checkforbidden(files)
     return ''.join(lines)
@@ -565,7 +567,7 @@
                 start, end = _msearch(addbuf, f, start)
                 if not todelete:
                     h, fl = self._lm[f]
-                    l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
+                    l = "%s\0%s%s\n" % (f, hex(h), fl)
                 else:
                     if start == end:
                         # item we want to delete was not found, error out
@@ -641,7 +643,7 @@
     """Check filenames for illegal characters."""
     for f in l:
         if '\n' in f or '\r' in f:
-            raise error.RevlogError(
+            raise error.StorageError(
                 _("'\\n' and '\\r' disallowed in filenames: %r")
                 % pycompat.bytestr(f))
 
@@ -679,11 +681,12 @@
 class treemanifest(object):
     def __init__(self, dir='', text=''):
         self._dir = dir
-        self._node = revlog.nullid
+        self._node = nullid
         self._loadfunc = _noop
         self._copyfunc = _noop
         self._dirty = False
         self._dirs = {}
+        self._lazydirs = {}
         # Using _lazymanifest here is a little slower than plain old dicts
         self._files = {}
         self._flags = {}
@@ -697,9 +700,63 @@
     def _subpath(self, path):
         return self._dir + path
 
+    def _loadalllazy(self):
+        selfdirs = self._dirs
+        for d, (path, node, readsubtree, docopy) in self._lazydirs.iteritems():
+            if docopy:
+                selfdirs[d] = readsubtree(path, node).copy()
+            else:
+                selfdirs[d] = readsubtree(path, node)
+        self._lazydirs = {}
+
+    def _loadlazy(self, d):
+        v = self._lazydirs.get(d)
+        if v:
+            path, node, readsubtree, docopy = v
+            if docopy:
+                self._dirs[d] = readsubtree(path, node).copy()
+            else:
+                self._dirs[d] = readsubtree(path, node)
+            del self._lazydirs[d]
+
+    def _loadchildrensetlazy(self, visit):
+        if not visit:
+            return None
+        if visit == 'all' or visit == 'this':
+            self._loadalllazy()
+            return None
+
+        loadlazy = self._loadlazy
+        for k in visit:
+            loadlazy(k + '/')
+        return visit
+
+    def _loaddifflazy(self, t1, t2):
+        """load items in t1 and t2 if they're needed for diffing.
+
+        The criteria currently is:
+        - if it's not present in _lazydirs in either t1 or t2, load it in the
+          other (it may already be loaded or it may not exist, doesn't matter)
+        - if it's present in _lazydirs in both, compare the nodeid; if it
+          differs, load it in both
+        """
+        toloadlazy = []
+        for d, v1 in t1._lazydirs.iteritems():
+            v2 = t2._lazydirs.get(d)
+            if not v2 or v2[1] != v1[1]:
+                toloadlazy.append(d)
+        for d, v1 in t2._lazydirs.iteritems():
+            if d not in t1._lazydirs:
+                toloadlazy.append(d)
+
+        for d in toloadlazy:
+            t1._loadlazy(d)
+            t2._loadlazy(d)
+
     def __len__(self):
         self._load()
         size = len(self._files)
+        self._loadalllazy()
         for m in self._dirs.values():
             size += m.__len__()
         return size
@@ -712,12 +769,17 @@
 
     def _isempty(self):
         self._load() # for consistency; already loaded by all callers
-        return (not self._files and (not self._dirs or
-                all(m._isempty() for m in self._dirs.values())))
+        # See if we can skip loading everything.
+        if self._files or (self._dirs and
+                           any(not m._isempty() for m in self._dirs.values())):
+            return False
+        self._loadalllazy()
+        return (not self._dirs or
+                all(m._isempty() for m in self._dirs.values()))
 
     def __repr__(self):
         return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
-                (self._dir, revlog.hex(self._node),
+                (self._dir, hex(self._node),
                  bool(self._loadfunc is _noop),
                  self._dirty, id(self)))
 
@@ -739,6 +801,7 @@
 
     def iterentries(self):
         self._load()
+        self._loadalllazy()
         for p, n in sorted(itertools.chain(self._dirs.items(),
                                            self._files.items())):
             if p in self._files:
@@ -749,6 +812,7 @@
 
     def items(self):
         self._load()
+        self._loadalllazy()
         for p, n in sorted(itertools.chain(self._dirs.items(),
                                            self._files.items())):
             if p in self._files:
@@ -761,6 +825,7 @@
 
     def iterkeys(self):
         self._load()
+        self._loadalllazy()
         for p in sorted(itertools.chain(self._dirs, self._files)):
             if p in self._files:
                 yield self._subpath(p)
@@ -780,8 +845,11 @@
         self._load()
         dir, subpath = _splittopdir(f)
         if dir:
+            self._loadlazy(dir)
+
             if dir not in self._dirs:
                 return False
+
             return self._dirs[dir].__contains__(subpath)
         else:
             return f in self._files
@@ -790,6 +858,8 @@
         self._load()
         dir, subpath = _splittopdir(f)
         if dir:
+            self._loadlazy(dir)
+
             if dir not in self._dirs:
                 return default
             return self._dirs[dir].get(subpath, default)
@@ -800,6 +870,8 @@
         self._load()
         dir, subpath = _splittopdir(f)
         if dir:
+            self._loadlazy(dir)
+
             return self._dirs[dir].__getitem__(subpath)
         else:
             return self._files[f]
@@ -808,11 +880,13 @@
         self._load()
         dir, subpath = _splittopdir(f)
         if dir:
+            self._loadlazy(dir)
+
             if dir not in self._dirs:
                 return ''
             return self._dirs[dir].flags(subpath)
         else:
-            if f in self._dirs:
+            if f in self._lazydirs or f in self._dirs:
                 return ''
             return self._flags.get(f, '')
 
@@ -820,6 +894,8 @@
         self._load()
         dir, subpath = _splittopdir(f)
         if dir:
+            self._loadlazy(dir)
+
             return self._dirs[dir].find(subpath)
         else:
             return self._files[f], self._flags.get(f, '')
@@ -828,6 +904,8 @@
         self._load()
         dir, subpath = _splittopdir(f)
         if dir:
+            self._loadlazy(dir)
+
             self._dirs[dir].__delitem__(subpath)
             # If the directory is now empty, remove it
             if self._dirs[dir]._isempty():
@@ -843,6 +921,7 @@
         self._load()
         dir, subpath = _splittopdir(f)
         if dir:
+            self._loadlazy(dir)
             if dir not in self._dirs:
                 self._dirs[dir] = treemanifest(self._subpath(dir))
             self._dirs[dir].__setitem__(subpath, n)
@@ -863,6 +942,7 @@
         self._load()
         dir, subpath = _splittopdir(f)
         if dir:
+            self._loadlazy(dir)
             if dir not in self._dirs:
                 self._dirs[dir] = treemanifest(self._subpath(dir))
             self._dirs[dir].setflag(subpath, flags)
@@ -877,8 +957,11 @@
         if self._copyfunc is _noop:
             def _copyfunc(s):
                 self._load()
-                for d in self._dirs:
-                    s._dirs[d] = self._dirs[d].copy()
+                s._lazydirs = {d: (p, n, r, True) for
+                               d, (p, n, r, c) in self._lazydirs.iteritems()}
+                sdirs = s._dirs
+                for d, v in self._dirs.iteritems():
+                    sdirs[d] = v.copy()
                 s._files = dict.copy(self._files)
                 s._flags = dict.copy(self._flags)
             if self._loadfunc is _noop:
@@ -891,7 +974,7 @@
 
     def filesnotin(self, m2, match=None):
         '''Set of files in this manifest that are not in the other'''
-        if match:
+        if match and not match.always():
             m1 = self.matches(match)
             m2 = m2.matches(match)
             return m1.filesnotin(m2)
@@ -902,6 +985,7 @@
                 return
             t1._load()
             t2._load()
+            self._loaddifflazy(t1, t2)
             for d, m1 in t1._dirs.iteritems():
                 if d in t2._dirs:
                     m2 = t2._dirs[d]
@@ -927,10 +1011,12 @@
         self._load()
         topdir, subdir = _splittopdir(dir)
         if topdir:
+            self._loadlazy(topdir)
             if topdir in self._dirs:
                 return self._dirs[topdir].hasdir(subdir)
             return False
-        return (dir + '/') in self._dirs
+        dirslash = dir + '/'
+        return dirslash in self._dirs or dirslash in self._lazydirs
 
     def walk(self, match):
         '''Generates matching file names.
@@ -963,19 +1049,22 @@
 
     def _walk(self, match):
         '''Recursively generates matching file names for walk().'''
-        if not match.visitdir(self._dir[:-1] or '.'):
+        visit = match.visitchildrenset(self._dir[:-1] or '.')
+        if not visit:
             return
 
         # yield this dir's files and walk its submanifests
         self._load()
+        visit = self._loadchildrensetlazy(visit)
         for p in sorted(list(self._dirs) + list(self._files)):
             if p in self._files:
                 fullp = self._subpath(p)
                 if match(fullp):
                     yield fullp
             else:
-                for f in self._dirs[p]._walk(match):
-                    yield f
+                if not visit or p[:-1] in visit:
+                    for f in self._dirs[p]._walk(match):
+                        yield f
 
     def matches(self, match):
         '''generate a new manifest filtered by the match argument'''
@@ -988,7 +1077,7 @@
         '''recursively generate a new manifest filtered by the match argument.
         '''
 
-        visit = match.visitdir(self._dir[:-1] or '.')
+        visit = match.visitchildrenset(self._dir[:-1] or '.')
         if visit == 'all':
             return self.copy()
         ret = treemanifest(self._dir)
@@ -997,14 +1086,26 @@
 
         self._load()
         for fn in self._files:
+            # While visitchildrenset *usually* lists only subdirs, this is
+            # actually up to the matcher and may have some files in the set().
+            # If visit == 'this', we should obviously look at the files in this
+            # directory; if visit is a set, and fn is in it, we should inspect
+            # fn (but no need to inspect things not in the set).
+            if visit != 'this' and fn not in visit:
+                continue
             fullp = self._subpath(fn)
+            # visitchildrenset isn't perfect, we still need to call the regular
+            # matcher code to further filter results.
             if not match(fullp):
                 continue
             ret._files[fn] = self._files[fn]
             if fn in self._flags:
                 ret._flags[fn] = self._flags[fn]
 
+        visit = self._loadchildrensetlazy(visit)
         for dir, subm in self._dirs.iteritems():
+            if visit and dir[:-1] not in visit:
+                continue
             m = subm._matches(match)
             if not m._isempty():
                 ret._dirs[dir] = m
@@ -1028,7 +1129,7 @@
         the nodeid will be None and the flags will be the empty
         string.
         '''
-        if match:
+        if match and not match.always():
             m1 = self.matches(match)
             m2 = m2.matches(match)
             return m1.diff(m2, clean=clean)
@@ -1039,6 +1140,8 @@
                 return
             t1._load()
             t2._load()
+            self._loaddifflazy(t1, t2)
+
             for d, m1 in t1._dirs.iteritems():
                 m2 = t2._dirs.get(d, emptytree)
                 _diff(m1, m2)
@@ -1068,10 +1171,14 @@
         return not self._dirty and not m2._dirty and self._node == m2._node
 
     def parse(self, text, readsubtree):
+        selflazy = self._lazydirs
+        subpath = self._subpath
         for f, n, fl in _parse(text):
             if fl == 't':
                 f = f + '/'
-                self._dirs[f] = readsubtree(self._subpath(f), n)
+                # False below means "doesn't need to be copied" and can use the
+                # cached value from readsubtree directly.
+                selflazy[f] = (subpath(f), n, readsubtree, False)
             elif '/' in f:
                 # This is a flat manifest, so use __setitem__ and setflag rather
                 # than assigning directly to _files and _flags, so we can
@@ -1098,9 +1205,10 @@
         """
         self._load()
         flags = self.flags
+        lazydirs = [(d[:-1], v[1], 't') for d, v in self._lazydirs.iteritems()]
         dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
         files = [(f, self._files[f], flags(f)) for f in self._files]
-        return _text(sorted(dirs + files))
+        return _text(sorted(dirs + files + lazydirs))
 
     def read(self, gettext, readsubtree):
         def _load_for_read(s):
@@ -1108,17 +1216,30 @@
             s._dirty = False
         self._loadfunc = _load_for_read
 
-    def writesubtrees(self, m1, m2, writesubtree):
+    def writesubtrees(self, m1, m2, writesubtree, match):
         self._load() # for consistency; should never have any effect here
         m1._load()
         m2._load()
         emptytree = treemanifest()
+        def getnode(m, d):
+            ld = m._lazydirs.get(d)
+            if ld:
+                return ld[1]
+            return m._dirs.get(d, emptytree)._node
+
+        # let's skip investigating things that `match` says we do not need.
+        visit = match.visitchildrenset(self._dir[:-1] or '.')
+        visit = self._loadchildrensetlazy(visit)
+        if visit == 'this' or visit == 'all':
+            visit = None
         for d, subm in self._dirs.iteritems():
-            subp1 = m1._dirs.get(d, emptytree)._node
-            subp2 = m2._dirs.get(d, emptytree)._node
-            if subp1 == revlog.nullid:
+            if visit and d[:-1] not in visit:
+                continue
+            subp1 = getnode(m1, d)
+            subp2 = getnode(m2, d)
+            if subp1 == nullid:
                 subp1, subp2 = subp2, subp1
-            writesubtree(subm, subp1, subp2)
+            writesubtree(subm, subp1, subp2, match)
 
     def walksubtrees(self, matcher=None):
         """Returns an iterator of the subtrees of this manifest, including this
@@ -1132,15 +1253,127 @@
             yield self
 
         self._load()
+        # OPT: use visitchildrenset to avoid loading everything.
+        self._loadalllazy()
         for d, subm in self._dirs.iteritems():
             for subtree in subm.walksubtrees(matcher=matcher):
                 yield subtree
 
-class manifestrevlog(revlog.revlog):
+class manifestfulltextcache(util.lrucachedict):
+    """File-backed LRU cache for the manifest cache
+
+    File consists of entries, up to EOF:
+
+    - 20 bytes node, 4 bytes length, <length> manifest data
+
+    These are written in reverse cache order (oldest to newest).
+
+    """
+    def __init__(self, max):
+        super(manifestfulltextcache, self).__init__(max)
+        self._dirty = False
+        self._read = False
+        self._opener = None
+
+    def read(self):
+        if self._read or self._opener is None:
+            return
+
+        try:
+            with self._opener('manifestfulltextcache') as fp:
+                set = super(manifestfulltextcache, self).__setitem__
+                # ignore trailing data, this is a cache, corruption is skipped
+                while True:
+                    node = fp.read(20)
+                    if len(node) < 20:
+                        break
+                    try:
+                        size = struct.unpack('>L', fp.read(4))[0]
+                    except struct.error:
+                        break
+                    value = bytearray(fp.read(size))
+                    if len(value) != size:
+                        break
+                    set(node, value)
+        except IOError:
+            # the file is allowed to be missing
+            pass
+
+        self._read = True
+        self._dirty = False
+
+    def write(self):
+        if not self._dirty or self._opener is None:
+            return
+        # rotate backwards to the first used node
+        with self._opener(
+                'manifestfulltextcache', 'w', atomictemp=True, checkambig=True
+            ) as fp:
+            node = self._head.prev
+            while True:
+                if node.key in self._cache:
+                    fp.write(node.key)
+                    fp.write(struct.pack('>L', len(node.value)))
+                    fp.write(node.value)
+                if node is self._head:
+                    break
+                node = node.prev
+
+    def __len__(self):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).__len__()
+
+    def __contains__(self, k):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).__contains__(k)
+
+    def __iter__(self):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).__iter__()
+
+    def __getitem__(self, k):
+        if not self._read:
+            self.read()
+        # the cache lru order can change on read
+        setdirty = self._cache.get(k) is not self._head
+        value = super(manifestfulltextcache, self).__getitem__(k)
+        if setdirty:
+            self._dirty = True
+        return value
+
+    def __setitem__(self, k, v):
+        if not self._read:
+            self.read()
+        super(manifestfulltextcache, self).__setitem__(k, v)
+        self._dirty = True
+
+    def __delitem__(self, k):
+        if not self._read:
+            self.read()
+        super(manifestfulltextcache, self).__delitem__(k)
+        self._dirty = True
+
+    def get(self, k, default=None):
+        if not self._read:
+            self.read()
+        return super(manifestfulltextcache, self).get(k, default=default)
+
+    def clear(self, clear_persisted_data=False):
+        super(manifestfulltextcache, self).clear()
+        if clear_persisted_data:
+            self._dirty = True
+            self.write()
+        self._read = False
+
+@interfaceutil.implementer(repository.imanifeststorage)
+class manifestrevlog(object):
     '''A revlog that stores manifest texts. This is responsible for caching the
     full-text manifest contents.
     '''
-    def __init__(self, opener, dir='', dirlogcache=None, indexfile=None,
+    def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
                  treemanifest=False):
         """Constructs a new manifest revlog
 
@@ -1164,36 +1397,63 @@
 
         self._treeondisk = optiontreemanifest or treemanifest
 
-        self._fulltextcache = util.lrucachedict(cachesize)
+        self._fulltextcache = manifestfulltextcache(cachesize)
 
-        if dir:
+        if tree:
             assert self._treeondisk, 'opts is %r' % opts
 
         if indexfile is None:
             indexfile = '00manifest.i'
-            if dir:
-                indexfile = "meta/" + dir + indexfile
+            if tree:
+                indexfile = "meta/" + tree + indexfile
 
-        self._dir = dir
+        self.tree = tree
+
         # The dirlogcache is kept on the root manifest log
-        if dir:
+        if tree:
             self._dirlogcache = dirlogcache
         else:
             self._dirlogcache = {'': self}
 
-        super(manifestrevlog, self).__init__(opener, indexfile,
-                                             # only root indexfile is cached
-                                             checkambig=not bool(dir),
-                                             mmaplargeindex=True)
+        self._revlog = revlog.revlog(opener, indexfile,
+                                     # only root indexfile is cached
+                                     checkambig=not bool(tree),
+                                     mmaplargeindex=True)
+
+        self.index = self._revlog.index
+        self.version = self._revlog.version
+        self._generaldelta = self._revlog._generaldelta
+
+    def _setupmanifestcachehooks(self, repo):
+        """Persist the manifestfulltextcache on lock release"""
+        if not util.safehasattr(repo, '_lockref'):
+            return
+
+        self._fulltextcache._opener = repo.cachevfs
+        reporef = weakref.ref(repo)
+        manifestrevlogref = weakref.ref(self)
+
+        def persistmanifestcache():
+            repo = reporef()
+            self = manifestrevlogref()
+            if repo is None or self is None:
+                return
+            if repo.manifestlog.getstorage(b'') is not self:
+                # there's a different manifest in play now, abort
+                return
+            self._fulltextcache.write()
+
+        if repo._currentlock(repo._lockref) is not None:
+            repo._afterlock(persistmanifestcache)
 
     @property
     def fulltextcache(self):
         return self._fulltextcache
 
-    def clearcaches(self):
-        super(manifestrevlog, self).clearcaches()
-        self._fulltextcache.clear()
-        self._dirlogcache = {'': self}
+    def clearcaches(self, clear_persisted_data=False):
+        self._revlog.clearcaches()
+        self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
+        self._dirlogcache = {self.tree: self}
 
     def dirlog(self, d):
         if d:
@@ -1205,7 +1465,8 @@
             self._dirlogcache[d] = mfrevlog
         return self._dirlogcache[d]
 
-    def add(self, m, transaction, link, p1, p2, added, removed, readtree=None):
+    def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
+            match=None):
         if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
             # If our first parent is in the manifest cache, we can
             # compute a delta here using properties we know about the
@@ -1218,9 +1479,10 @@
                                [(x, True) for x in removed])
 
             arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
-            cachedelta = self.rev(p1), deltatext
+            cachedelta = self._revlog.rev(p1), deltatext
             text = util.buffer(arraytext)
-            n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
+            n = self._revlog.addrevision(text, transaction, link, p1, p2,
+                                         cachedelta)
         else:
             # The first parent manifest isn't already loaded, so we'll
             # just encode a fulltext of the manifest and pass that
@@ -1228,13 +1490,15 @@
             # process.
             if self._treeondisk:
                 assert readtree, "readtree must be set for treemanifest writes"
-                m1 = readtree(self._dir, p1)
-                m2 = readtree(self._dir, p2)
-                n = self._addtree(m, transaction, link, m1, m2, readtree)
+                assert match, "match must be specified for treemanifest writes"
+                m1 = readtree(self.tree, p1)
+                m2 = readtree(self.tree, p2)
+                n = self._addtree(m, transaction, link, m1, m2, readtree,
+                                  match=match)
                 arraytext = None
             else:
                 text = m.text()
-                n = self.addrevision(text, transaction, link, p1, p2)
+                n = self._revlog.addrevision(text, transaction, link, p1, p2)
                 arraytext = bytearray(text)
 
         if arraytext is not None:
@@ -1242,19 +1506,20 @@
 
         return n
 
-    def _addtree(self, m, transaction, link, m1, m2, readtree):
+    def _addtree(self, m, transaction, link, m1, m2, readtree, match):
         # If the manifest is unchanged compared to one parent,
         # don't write a new revision
-        if self._dir != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(m2)):
+        if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
+            m2)):
             return m.node()
-        def writesubtree(subm, subp1, subp2):
+        def writesubtree(subm, subp1, subp2, match):
             sublog = self.dirlog(subm.dir())
             sublog.add(subm, transaction, link, subp1, subp2, None, None,
-                       readtree=readtree)
-        m.writesubtrees(m1, m2, writesubtree)
+                       readtree=readtree, match=match)
+        m.writesubtrees(m1, m2, writesubtree, match)
         text = m.dirtext()
         n = None
-        if self._dir != '':
+        if self.tree != '':
             # Double-check whether contents are unchanged to one parent
             if text == m1.dirtext():
                 n = m1.node()
@@ -1262,12 +1527,106 @@
                 n = m2.node()
 
         if not n:
-            n = self.addrevision(text, transaction, link, m1.node(), m2.node())
+            n = self._revlog.addrevision(text, transaction, link, m1.node(),
+                                         m2.node())
 
         # Save nodeid so parent manifest can calculate its nodeid
         m.setnode(n)
         return n
 
+    def __len__(self):
+        return len(self._revlog)
+
+    def __iter__(self):
+        return self._revlog.__iter__()
+
+    def rev(self, node):
+        return self._revlog.rev(node)
+
+    def node(self, rev):
+        return self._revlog.node(rev)
+
+    def lookup(self, value):
+        return self._revlog.lookup(value)
+
+    def parentrevs(self, rev):
+        return self._revlog.parentrevs(rev)
+
+    def parents(self, node):
+        return self._revlog.parents(node)
+
+    def linkrev(self, rev):
+        return self._revlog.linkrev(rev)
+
+    def checksize(self):
+        return self._revlog.checksize()
+
+    def revision(self, node, _df=None, raw=False):
+        return self._revlog.revision(node, _df=_df, raw=raw)
+
+    def revdiff(self, rev1, rev2):
+        return self._revlog.revdiff(rev1, rev2)
+
+    def cmp(self, node, text):
+        return self._revlog.cmp(node, text)
+
+    def deltaparent(self, rev):
+        return self._revlog.deltaparent(rev)
+
+    def emitrevisions(self, nodes, nodesorder=None,
+                      revisiondata=False, assumehaveparentrevisions=False,
+                      deltaprevious=False):
+        return self._revlog.emitrevisions(
+            nodes, nodesorder=nodesorder, revisiondata=revisiondata,
+            assumehaveparentrevisions=assumehaveparentrevisions,
+            deltaprevious=deltaprevious)
+
+    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
+        return self._revlog.addgroup(deltas, linkmapper, transaction,
+                                     addrevisioncb=addrevisioncb)
+
+    def rawsize(self, rev):
+        return self._revlog.rawsize(rev)
+
+    def getstrippoint(self, minlink):
+        return self._revlog.getstrippoint(minlink)
+
+    def strip(self, minlink, transaction):
+        return self._revlog.strip(minlink, transaction)
+
+    def files(self):
+        return self._revlog.files()
+
+    def clone(self, tr, destrevlog, **kwargs):
+        if not isinstance(destrevlog, manifestrevlog):
+            raise error.ProgrammingError('expected manifestrevlog to clone()')
+
+        return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
+
+    def storageinfo(self, exclusivefiles=False, sharedfiles=False,
+                    revisionscount=False, trackedsize=False,
+                    storedsize=False):
+        return self._revlog.storageinfo(
+            exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
+            revisionscount=revisionscount, trackedsize=trackedsize,
+            storedsize=storedsize)
+
+    @property
+    def indexfile(self):
+        return self._revlog.indexfile
+
+    @indexfile.setter
+    def indexfile(self, value):
+        self._revlog.indexfile = value
+
+    @property
+    def opener(self):
+        return self._revlog.opener
+
+    @opener.setter
+    def opener(self, value):
+        self._revlog.opener = value
+
 @interfaceutil.implementer(repository.imanifestlog)
 class manifestlog(object):
     """A collection class representing the collection of manifest snapshots
@@ -1277,7 +1636,7 @@
     of the list of files in the given commit. Consumers of the output of this
     class do not care about the implementation details of the actual manifests
     they receive (i.e. tree or flat or lazily loaded, etc)."""
-    def __init__(self, opener, repo):
+    def __init__(self, opener, repo, rootstore):
         usetreemanifest = False
         cachesize = 4
 
@@ -1285,9 +1644,11 @@
         if opts is not None:
             usetreemanifest = opts.get('treemanifest', usetreemanifest)
             cachesize = opts.get('manifestcachesize', cachesize)
-        self._treeinmem = usetreemanifest
+
+        self._treemanifests = usetreemanifest
 
-        self._revlog = repo._constructmanifest()
+        self._rootstore = rootstore
+        self._rootstore._setupmanifestcachehooks(repo)
         self._narrowmatch = repo.narrowmatch()
 
         # A cache of the manifestctx or treemanifestctx for each directory
@@ -1302,58 +1663,58 @@
         """
         return self.get('', node)
 
-    def get(self, dir, node, verify=True):
+    def get(self, tree, node, verify=True):
         """Retrieves the manifest instance for the given node. Throws a
         LookupError if not found.
 
         `verify` - if True an exception will be thrown if the node is not in
                    the revlog
         """
-        if node in self._dirmancache.get(dir, ()):
-            return self._dirmancache[dir][node]
+        if node in self._dirmancache.get(tree, ()):
+            return self._dirmancache[tree][node]
 
         if not self._narrowmatch.always():
-            if not self._narrowmatch.visitdir(dir[:-1] or '.'):
-                return excludeddirmanifestctx(dir, node)
-        if dir:
-            if self._revlog._treeondisk:
+            if not self._narrowmatch.visitdir(tree[:-1] or '.'):
+                return excludeddirmanifestctx(tree, node)
+        if tree:
+            if self._rootstore._treeondisk:
                 if verify:
-                    dirlog = self._revlog.dirlog(dir)
-                    if node not in dirlog.nodemap:
-                        raise LookupError(node, dirlog.indexfile,
-                                          _('no node'))
-                m = treemanifestctx(self, dir, node)
+                    # Side-effect is LookupError is raised if node doesn't
+                    # exist.
+                    self.getstorage(tree).rev(node)
+
+                m = treemanifestctx(self, tree, node)
             else:
                 raise error.Abort(
                         _("cannot ask for manifest directory '%s' in a flat "
-                          "manifest") % dir)
+                          "manifest") % tree)
         else:
             if verify:
-                if node not in self._revlog.nodemap:
-                    raise LookupError(node, self._revlog.indexfile,
-                                      _('no node'))
-            if self._treeinmem:
+                # Side-effect is LookupError is raised if node doesn't exist.
+                self._rootstore.rev(node)
+
+            if self._treemanifests:
                 m = treemanifestctx(self, '', node)
             else:
                 m = manifestctx(self, node)
 
-        if node != revlog.nullid:
-            mancache = self._dirmancache.get(dir)
+        if node != nullid:
+            mancache = self._dirmancache.get(tree)
             if not mancache:
                 mancache = util.lrucachedict(self._cachesize)
-                self._dirmancache[dir] = mancache
+                self._dirmancache[tree] = mancache
             mancache[node] = m
         return m
 
-    def clearcaches(self):
+    def getstorage(self, tree):
+        return self._rootstore.dirlog(tree)
+
+    def clearcaches(self, clear_persisted_data=False):
         self._dirmancache.clear()
-        self._revlog.clearcaches()
+        self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
 
     def rev(self, node):
-        return self._revlog.rev(node)
-
-    def addgroup(self, deltas, linkmapper, transaction):
-        return self._revlog.addgroup(deltas, linkmapper, transaction)
+        return self._rootstore.rev(node)
 
 @interfaceutil.implementer(repository.imanifestrevisionwritable)
 class memmanifestctx(object):
@@ -1361,8 +1722,8 @@
         self._manifestlog = manifestlog
         self._manifestdict = manifestdict()
 
-    def _revlog(self):
-        return self._manifestlog._revlog
+    def _storage(self):
+        return self._manifestlog.getstorage(b'')
 
     def new(self):
         return memmanifestctx(self._manifestlog)
@@ -1375,9 +1736,9 @@
     def read(self):
         return self._manifestdict
 
-    def write(self, transaction, link, p1, p2, added, removed):
-        return self._revlog().add(self._manifestdict, transaction, link, p1, p2,
-                                  added, removed)
+    def write(self, transaction, link, p1, p2, added, removed, match=None):
+        return self._storage().add(self._manifestdict, transaction, link,
+                                   p1, p2, added, removed, match=match)
 
 @interfaceutil.implementer(repository.imanifestrevisionstored)
 class manifestctx(object):
@@ -1393,12 +1754,12 @@
         # TODO: We eventually want p1, p2, and linkrev exposed on this class,
         # but let's add it later when something needs it and we can load it
         # lazily.
-        #self.p1, self.p2 = revlog.parents(node)
-        #rev = revlog.rev(node)
-        #self.linkrev = revlog.linkrev(rev)
+        #self.p1, self.p2 = store.parents(node)
+        #rev = store.rev(node)
+        #self.linkrev = store.linkrev(rev)
 
-    def _revlog(self):
-        return self._manifestlog._revlog
+    def _storage(self):
+        return self._manifestlog.getstorage(b'')
 
     def node(self):
         return self._node
@@ -1413,17 +1774,20 @@
 
     @propertycache
     def parents(self):
-        return self._revlog().parents(self._node)
+        return self._storage().parents(self._node)
 
     def read(self):
         if self._data is None:
-            if self._node == revlog.nullid:
+            if self._node == nullid:
                 self._data = manifestdict()
             else:
-                rl = self._revlog()
-                text = rl.revision(self._node)
-                arraytext = bytearray(text)
-                rl._fulltextcache[self._node] = arraytext
+                store = self._storage()
+                if self._node in store.fulltextcache:
+                    text = pycompat.bytestr(store.fulltextcache[self._node])
+                else:
+                    text = store.revision(self._node)
+                    arraytext = bytearray(text)
+                    store.fulltextcache[self._node] = arraytext
                 self._data = manifestdict(text)
         return self._data
 
@@ -1434,10 +1798,10 @@
 
         If `shallow` is True, nothing changes since this is a flat manifest.
         '''
-        rl = self._revlog()
-        r = rl.rev(self._node)
-        deltaparent = rl.deltaparent(r)
-        if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
+        store = self._storage()
+        r = store.rev(self._node)
+        deltaparent = store.deltaparent(r)
+        if deltaparent != nullrev and deltaparent in store.parentrevs(r):
             return self.readdelta()
         return self.read()
 
@@ -1448,9 +1812,9 @@
 
         Changing the value of `shallow` has no effect on flat manifests.
         '''
-        revlog = self._revlog()
-        r = revlog.rev(self._node)
-        d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
+        store = self._storage()
+        r = store.rev(self._node)
+        d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
         return manifestdict(d)
 
     def find(self, key):
@@ -1463,8 +1827,8 @@
         self._dir = dir
         self._treemanifest = treemanifest()
 
-    def _revlog(self):
-        return self._manifestlog._revlog
+    def _storage(self):
+        return self._manifestlog.getstorage(b'')
 
     def new(self, dir=''):
         return memtreemanifestctx(self._manifestlog, dir=dir)
@@ -1477,11 +1841,12 @@
     def read(self):
         return self._treemanifest
 
-    def write(self, transaction, link, p1, p2, added, removed):
+    def write(self, transaction, link, p1, p2, added, removed, match=None):
         def readtree(dir, node):
             return self._manifestlog.get(dir, node).read()
-        return self._revlog().add(self._treemanifest, transaction, link, p1, p2,
-                                  added, removed, readtree=readtree)
+        return self._storage().add(self._treemanifest, transaction, link,
+                                   p1, p2, added, removed, readtree=readtree,
+                                   match=match)
 
 @interfaceutil.implementer(repository.imanifestrevisionstored)
 class treemanifestctx(object):
@@ -1495,26 +1860,27 @@
         # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
         # we can instantiate treemanifestctx objects for directories we don't
         # have on disk.
-        #self.p1, self.p2 = revlog.parents(node)
-        #rev = revlog.rev(node)
-        #self.linkrev = revlog.linkrev(rev)
+        #self.p1, self.p2 = store.parents(node)
+        #rev = store.rev(node)
+        #self.linkrev = store.linkrev(rev)
 
-    def _revlog(self):
+    def _storage(self):
         narrowmatch = self._manifestlog._narrowmatch
         if not narrowmatch.always():
             if not narrowmatch.visitdir(self._dir[:-1] or '.'):
                 return excludedmanifestrevlog(self._dir)
-        return self._manifestlog._revlog.dirlog(self._dir)
+        return self._manifestlog.getstorage(self._dir)
 
     def read(self):
         if self._data is None:
-            rl = self._revlog()
-            if self._node == revlog.nullid:
+            store = self._storage()
+            if self._node == nullid:
                 self._data = treemanifest()
-            elif rl._treeondisk:
+            # TODO accessing non-public API
+            elif store._treeondisk:
                 m = treemanifest(dir=self._dir)
                 def gettext():
-                    return rl.revision(self._node)
+                    return store.revision(self._node)
                 def readsubtree(dir, subm):
                     # Set verify to False since we need to be able to create
                     # subtrees for trees that don't exist on disk.
@@ -1523,9 +1889,12 @@
                 m.setnode(self._node)
                 self._data = m
             else:
-                text = rl.revision(self._node)
-                arraytext = bytearray(text)
-                rl.fulltextcache[self._node] = arraytext
+                if self._node in store.fulltextcache:
+                    text = pycompat.bytestr(store.fulltextcache[self._node])
+                else:
+                    text = store.revision(self._node)
+                    arraytext = bytearray(text)
+                    store.fulltextcache[self._node] = arraytext
                 self._data = treemanifest(dir=self._dir, text=text)
 
         return self._data
@@ -1543,7 +1912,7 @@
 
     @propertycache
     def parents(self):
-        return self._revlog().parents(self._node)
+        return self._storage().parents(self._node)
 
     def readdelta(self, shallow=False):
         '''Returns a manifest containing just the entries that are present
@@ -1556,15 +1925,15 @@
         the subdirectory will be reported among files and distinguished only by
         its 't' flag.
         '''
-        revlog = self._revlog()
+        store = self._storage()
         if shallow:
-            r = revlog.rev(self._node)
-            d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
+            r = store.rev(self._node)
+            d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
             return manifestdict(d)
         else:
             # Need to perform a slow delta
-            r0 = revlog.deltaparent(revlog.rev(self._node))
-            m0 = self._manifestlog.get(self._dir, revlog.node(r0)).read()
+            r0 = store.deltaparent(store.rev(self._node))
+            m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
             m1 = self.read()
             md = treemanifest(dir=self._dir)
             for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
@@ -1582,15 +1951,15 @@
         If `shallow` is True, it only returns the entries from this manifest,
         and not any submanifests.
         '''
-        rl = self._revlog()
-        r = rl.rev(self._node)
-        deltaparent = rl.deltaparent(r)
-        if (deltaparent != revlog.nullrev and
-            deltaparent in rl.parentrevs(r)):
+        store = self._storage()
+        r = store.rev(self._node)
+        deltaparent = store.deltaparent(r)
+        if (deltaparent != nullrev and
+            deltaparent in store.parentrevs(r)):
             return self.readdelta(shallow=shallow)
 
         if shallow:
-            return manifestdict(rl.revision(self._node))
+            return manifestdict(store.revision(self._node))
         else:
             return self.read()
 
--- a/mercurial/match.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/match.py	Mon Oct 22 14:46:06 2018 -0400
@@ -8,6 +8,7 @@
 from __future__ import absolute_import, print_function
 
 import copy
+import itertools
 import os
 import re
 
@@ -331,6 +332,49 @@
         '''
         return True
 
+    def visitchildrenset(self, dir):
+        '''Decides whether a directory should be visited based on whether it
+        has potential matches in it or one of its subdirectories, and
+        potentially lists which subdirectories of that directory should be
+        visited. This is based on the match's primary, included, and excluded
+        patterns.
+
+        This function is very similar to 'visitdir', and the following mapping
+        can be applied:
+
+             visitdir | visitchildrenlist
+            ----------+-------------------
+             False    | set()
+             'all'    | 'all'
+             True     | 'this' OR non-empty set of subdirs -or files- to visit
+
+        Example:
+          Assume matchers ['path:foo/bar', 'rootfilesin:qux'], we would return
+          the following values (assuming the implementation of visitchildrenset
+          is capable of recognizing this; some implementations are not).
+
+          '.' -> {'foo', 'qux'}
+          'baz' -> set()
+          'foo' -> {'bar'}
+          # Ideally this would be 'all', but since the prefix nature of matchers
+          # is applied to the entire matcher, we have to downgrade this to
+          # 'this' due to the non-prefix 'rootfilesin'-kind matcher being mixed
+          # in.
+          'foo/bar' -> 'this'
+          'qux' -> 'this'
+
+        Important:
+          Most matchers do not know if they're representing files or
+          directories. They see ['path:dir/f'] and don't know whether 'f' is a
+          file or a directory, so visitchildrenset('dir') for most matchers will
+          return {'f'}, but if the matcher knows it's a file (like exactmatcher
+          does), it may return 'this'. Do not rely on the return being a set
+          indicating that there are no files in this dir to investigate (or
+          equivalently that if there are files to investigate in 'dir' that it
+          will always return 'this').
+        '''
+        return 'this'
+
     def always(self):
         '''Matcher will match everything and .files() will be empty --
         optimization might be possible.'''
@@ -367,6 +411,9 @@
     def visitdir(self, dir):
         return 'all'
 
+    def visitchildrenset(self, dir):
+        return 'all'
+
     def __repr__(self):
         return r'<alwaysmatcher>'
 
@@ -390,6 +437,9 @@
     def visitdir(self, dir):
         return False
 
+    def visitchildrenset(self, dir):
+        return set()
+
     def __repr__(self):
         return r'<nevermatcher>'
 
@@ -430,6 +480,15 @@
                 any(parentdir in self._fileset
                     for parentdir in util.finddirs(dir)))
 
+    def visitchildrenset(self, dir):
+        ret = self.visitdir(dir)
+        if ret is True:
+            return 'this'
+        elif not ret:
+            return set()
+        assert ret == 'all'
+        return 'all'
+
     def prefix(self):
         return self._prefix
 
@@ -437,6 +496,46 @@
     def __repr__(self):
         return ('<patternmatcher patterns=%r>' % pycompat.bytestr(self._pats))
 
+# This is basically a reimplementation of util.dirs that stores the children
+# instead of just a count of them, plus a small optional optimization to avoid
+# some directories we don't need.
+class _dirchildren(object):
+    def __init__(self, paths, onlyinclude=None):
+        self._dirs = {}
+        self._onlyinclude = onlyinclude or []
+        addpath = self.addpath
+        for f in paths:
+            addpath(f)
+
+    def addpath(self, path):
+        if path == '.':
+            return
+        dirs = self._dirs
+        findsplitdirs = _dirchildren._findsplitdirs
+        for d, b in findsplitdirs(path):
+            if d not in self._onlyinclude:
+                continue
+            dirs.setdefault(d, set()).add(b)
+
+    @staticmethod
+    def _findsplitdirs(path):
+        # yields (dirname, basename) tuples, walking back to the root.  This is
+        # very similar to util.finddirs, except:
+        #  - produces a (dirname, basename) tuple, not just 'dirname'
+        #  - includes root dir
+        # Unlike manifest._splittopdir, this does not suffix `dirname` with a
+        # slash, and produces '.' for the root instead of ''.
+        oldpos = len(path)
+        pos = path.rfind('/')
+        while pos != -1:
+            yield path[:pos], path[pos + 1:oldpos]
+            oldpos = pos
+            pos = path.rfind('/', 0, pos)
+        yield '.', path[:oldpos]
+
+    def get(self, path):
+        return self._dirs.get(path, set())
+
 class includematcher(basematcher):
 
     def __init__(self, root, cwd, kindpats, listsubrepos=False, badfn=None):
@@ -445,11 +544,14 @@
         self._pats, self.matchfn = _buildmatch(kindpats, '(?:/|$)',
                                                listsubrepos, root)
         self._prefix = _prefix(kindpats)
-        roots, dirs = _rootsanddirs(kindpats)
+        roots, dirs, parents = _rootsdirsandparents(kindpats)
         # roots are directories which are recursively included.
         self._roots = set(roots)
         # dirs are directories which are non-recursively included.
         self._dirs = set(dirs)
+        # parents are directories which are non-recursively included because
+        # they are needed to get to items in _dirs or _roots.
+        self._parents = set(parents)
 
     def visitdir(self, dir):
         if self._prefix and dir in self._roots:
@@ -457,9 +559,38 @@
         return ('.' in self._roots or
                 dir in self._roots or
                 dir in self._dirs or
+                dir in self._parents or
                 any(parentdir in self._roots
                     for parentdir in util.finddirs(dir)))
 
+    @propertycache
+    def _allparentschildren(self):
+        # It may seem odd that we add dirs, roots, and parents, and then
+        # restrict to only parents. This is to catch the case of:
+        #   dirs = ['foo/bar']
+        #   parents = ['foo']
+        # if we asked for the children of 'foo', but had only added
+        # self._parents, we wouldn't be able to respond ['bar'].
+        return _dirchildren(
+                itertools.chain(self._dirs, self._roots, self._parents),
+                onlyinclude=self._parents)
+
+    def visitchildrenset(self, dir):
+        if self._prefix and dir in self._roots:
+            return 'all'
+        # Note: this does *not* include the 'dir in self._parents' case from
+        # visitdir, that's handled below.
+        if ('.' in self._roots or
+            dir in self._roots or
+            dir in self._dirs or
+            any(parentdir in self._roots
+                for parentdir in util.finddirs(dir))):
+            return 'this'
+
+        if dir in self._parents:
+            return self._allparentschildren.get(dir) or set()
+        return set()
+
     @encoding.strmethod
     def __repr__(self):
         return ('<includematcher includes=%r>' % pycompat.bytestr(self._pats))
@@ -486,6 +617,26 @@
     def visitdir(self, dir):
         return dir in self._dirs
 
+    def visitchildrenset(self, dir):
+        if not self._fileset or dir not in self._dirs:
+            return set()
+
+        candidates = self._fileset | self._dirs - {'.'}
+        if dir != '.':
+            d = dir + '/'
+            candidates = set(c[len(d):] for c in candidates if
+                             c.startswith(d))
+        # self._dirs includes all of the directories, recursively, so if
+        # we're attempting to match foo/bar/baz.txt, it'll have '.', 'foo',
+        # 'foo/bar' in it. Thus we can safely ignore a candidate that has a
+        # '/' in it, indicating a it's for a subdir-of-a-subdir; the
+        # immediate subdir will be in there without a slash.
+        ret = {c for c in candidates if '/' not in c}
+        # We really do not expect ret to be empty, since that would imply that
+        # there's something in _dirs that didn't have a file in _fileset.
+        assert ret
+        return ret
+
     def isexact(self):
         return True
 
@@ -527,6 +678,31 @@
             return False
         return bool(self._m1.visitdir(dir))
 
+    def visitchildrenset(self, dir):
+        m2_set = self._m2.visitchildrenset(dir)
+        if m2_set == 'all':
+            return set()
+        m1_set = self._m1.visitchildrenset(dir)
+        # Possible values for m1: 'all', 'this', set(...), set()
+        # Possible values for m2:        'this', set(...), set()
+        # If m2 has nothing under here that we care about, return m1, even if
+        # it's 'all'. This is a change in behavior from visitdir, which would
+        # return True, not 'all', for some reason.
+        if not m2_set:
+            return m1_set
+        if m1_set in ['all', 'this']:
+            # Never return 'all' here if m2_set is any kind of non-empty (either
+            # 'this' or set(foo)), since m2 might return set() for a
+            # subdirectory.
+            return 'this'
+        # Possible values for m1:         set(...), set()
+        # Possible values for m2: 'this', set(...)
+        # We ignore m2's set results. They're possibly incorrect:
+        #  m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset('.'):
+        #    m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd
+        #    return set(), which is *not* correct, we still need to visit 'dir'!
+        return m1_set
+
     def isexact(self):
         return self._m1.isexact()
 
@@ -591,6 +767,25 @@
         # bool() because visit1=True + visit2='all' should not be 'all'
         return bool(visit1 and self._m2.visitdir(dir))
 
+    def visitchildrenset(self, dir):
+        m1_set = self._m1.visitchildrenset(dir)
+        if not m1_set:
+            return set()
+        m2_set = self._m2.visitchildrenset(dir)
+        if not m2_set:
+            return set()
+
+        if m1_set == 'all':
+            return m2_set
+        elif m2_set == 'all':
+            return m1_set
+
+        if m1_set == 'this' or m2_set == 'this':
+            return 'this'
+
+        assert isinstance(m1_set, set) and isinstance(m2_set, set)
+        return m1_set.intersection(m2_set)
+
     def always(self):
         return self._m1.always() and self._m2.always()
 
@@ -672,6 +867,13 @@
             dir = self._path + "/" + dir
         return self._matcher.visitdir(dir)
 
+    def visitchildrenset(self, dir):
+        if dir == '.':
+            dir = self._path
+        else:
+            dir = self._path + "/" + dir
+        return self._matcher.visitchildrenset(dir)
+
     def always(self):
         return self._always
 
@@ -744,6 +946,15 @@
             return self._matcher.visitdir(dir[len(self._pathprefix):])
         return dir in self._pathdirs
 
+    def visitchildrenset(self, dir):
+        if dir == self._path:
+            return self._matcher.visitchildrenset('.')
+        if dir.startswith(self._pathprefix):
+            return self._matcher.visitchildrenset(dir[len(self._pathprefix):])
+        if dir in self._pathdirs:
+            return 'this'
+        return set()
+
     def isexact(self):
         return self._matcher.isexact()
 
@@ -784,6 +995,25 @@
             r |= v
         return r
 
+    def visitchildrenset(self, dir):
+        r = set()
+        this = False
+        for m in self._matchers:
+            v = m.visitchildrenset(dir)
+            if not v:
+                continue
+            if v == 'all':
+                return v
+            if this or v == 'this':
+                this = True
+                # don't break, we might have an 'all' in here.
+                continue
+            assert isinstance(v, set)
+            r = r.union(v)
+        if this:
+            return 'this'
+        return r
+
     @encoding.strmethod
     def __repr__(self):
         return ('<unionmatcher matchers=%r>' % self._matchers)
@@ -934,8 +1164,20 @@
 
     regex = ''
     if kindpats:
-        regex, mf = _buildregexmatch(kindpats, globsuffix)
-        matchfuncs.append(mf)
+        if all(k == 'rootfilesin' for k, p, s in kindpats):
+            dirs = {p for k, p, s in kindpats}
+            def mf(f):
+                i = f.rfind('/')
+                if i >= 0:
+                    dir = f[:i]
+                else:
+                    dir = '.'
+                return dir in dirs
+            regex = b'rootfilesin: %s' % stringutil.pprint(list(sorted(dirs)))
+            matchfuncs.append(mf)
+        else:
+            regex, mf = _buildregexmatch(kindpats, globsuffix)
+            matchfuncs.append(mf)
 
     if len(matchfuncs) == 1:
         return regex, matchfuncs[0]
@@ -1004,40 +1246,46 @@
     roots, dirs = _patternrootsanddirs(kindpats)
     return roots
 
-def _rootsanddirs(kindpats):
+def _rootsdirsandparents(kindpats):
     '''Returns roots and exact directories from patterns.
 
-    roots are directories to match recursively, whereas exact directories should
-    be matched non-recursively. The returned (roots, dirs) tuple will also
-    include directories that need to be implicitly considered as either, such as
-    parent directories.
+    `roots` are directories to match recursively, `dirs` should
+    be matched non-recursively, and `parents` are the implicitly required
+    directories to walk to items in either roots or dirs.
 
-    >>> _rootsanddirs(
+    Returns a tuple of (roots, dirs, parents).
+
+    >>> _rootsdirsandparents(
     ...     [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
     ...      (b'glob', b'g*', b'')])
-    (['g/h', 'g/h', '.'], ['g', '.'])
-    >>> _rootsanddirs(
+    (['g/h', 'g/h', '.'], [], ['g', '.'])
+    >>> _rootsdirsandparents(
     ...     [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
-    ([], ['g/h', '.', 'g', '.'])
-    >>> _rootsanddirs(
+    ([], ['g/h', '.'], ['g', '.'])
+    >>> _rootsdirsandparents(
     ...     [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
     ...      (b'path', b'', b'')])
-    (['r', 'p/p', '.'], ['p', '.'])
-    >>> _rootsanddirs(
+    (['r', 'p/p', '.'], [], ['p', '.'])
+    >>> _rootsdirsandparents(
     ...     [(b'relglob', b'rg*', b''), (b're', b're/', b''),
     ...      (b'relre', b'rr', b'')])
-    (['.', '.', '.'], ['.'])
+    (['.', '.', '.'], [], ['.'])
     '''
     r, d = _patternrootsanddirs(kindpats)
 
+    p = []
     # Append the parents as non-recursive/exact directories, since they must be
     # scanned to get to either the roots or the other exact directories.
-    d.extend(util.dirs(d))
-    d.extend(util.dirs(r))
+    p.extend(util.dirs(d))
+    p.extend(util.dirs(r))
     # util.dirs() does not include the root directory, so add it manually
-    d.append('.')
+    p.append('.')
 
-    return r, d
+    # FIXME: all uses of this function convert these to sets, do so before
+    # returning.
+    # FIXME: all uses of this function do not need anything in 'roots' and
+    # 'dirs' to also be in 'parents', consider removing them before returning.
+    return r, d, p
 
 def _explicitfiles(kindpats):
     '''Returns the potential explicit filenames from the patterns.
--- a/mercurial/mdiff.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/mdiff.py	Mon Oct 22 14:46:06 2018 -0400
@@ -357,7 +357,7 @@
             # walk backwards from the start of the context up to the start of
             # the previous hunk context until we find a line starting with an
             # alphanumeric char.
-            for i in xrange(astart - 1, lastpos - 1, -1):
+            for i in pycompat.xrange(astart - 1, lastpos - 1, -1):
                 if l1[i][0:1].isalnum():
                     func = b' ' + l1[i].rstrip()
                     # split long function name if ASCII. otherwise we have no
@@ -381,7 +381,7 @@
         hunklines = (
             ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
             + delta
-            + [' ' + l1[x] for x in xrange(a2, aend)]
+            + [' ' + l1[x] for x in pycompat.xrange(a2, aend)]
         )
         # If either file ends without a newline and the last line of
         # that file is part of a hunk, a marker is printed. If the
@@ -390,7 +390,7 @@
         # which the hunk can end in a shared line without a newline.
         skip = False
         if not t1.endswith('\n') and astart + alen == len(l1) + 1:
-            for i in xrange(len(hunklines) - 1, -1, -1):
+            for i in pycompat.xrange(len(hunklines) - 1, -1, -1):
                 if hunklines[i].startswith(('-', ' ')):
                     if hunklines[i].startswith(' '):
                         skip = True
@@ -398,7 +398,7 @@
                     hunklines.insert(i + 1, _missing_newline_marker)
                     break
         if not skip and not t2.endswith('\n') and bstart + blen == len(l2) + 1:
-            for i in xrange(len(hunklines) - 1, -1, -1):
+            for i in pycompat.xrange(len(hunklines) - 1, -1, -1):
                 if hunklines[i].startswith('+'):
                     hunklines[i] += '\n'
                     hunklines.insert(i + 1, _missing_newline_marker)
--- a/mercurial/merge.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/merge.py	Mon Oct 22 14:46:06 2018 -0400
@@ -27,6 +27,7 @@
 )
 from . import (
     copies,
+    encoding,
     error,
     filemerge,
     match as matchmod,
@@ -1436,7 +1437,7 @@
 
 def _getcwd():
     try:
-        return pycompat.getcwd()
+        return encoding.getcwd()
     except OSError as err:
         if err.errno == errno.ENOENT:
             return None
@@ -2240,3 +2241,71 @@
         # fix up dirstate for copies and renames
         copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
     return stats
+
+def purge(repo, matcher, ignored=False, removeemptydirs=True,
+          removefiles=True, abortonerror=False, noop=False):
+    """Purge the working directory of untracked files.
+
+    ``matcher`` is a matcher configured to scan the working directory -
+    potentially a subset.
+
+    ``ignored`` controls whether ignored files should also be purged.
+
+    ``removeemptydirs`` controls whether empty directories should be removed.
+
+    ``removefiles`` controls whether files are removed.
+
+    ``abortonerror`` causes an exception to be raised if an error occurs
+    deleting a file or directory.
+
+    ``noop`` controls whether to actually remove files. If not defined, actions
+    will be taken.
+
+    Returns an iterable of relative paths in the working directory that were
+    or would be removed.
+    """
+
+    def remove(removefn, path):
+        try:
+            removefn(path)
+        except OSError:
+            m = _('%s cannot be removed') % path
+            if abortonerror:
+                raise error.Abort(m)
+            else:
+                repo.ui.warn(_('warning: %s\n') % m)
+
+    # There's no API to copy a matcher. So mutate the passed matcher and
+    # restore it when we're done.
+    oldexplicitdir = matcher.explicitdir
+    oldtraversedir = matcher.traversedir
+
+    res = []
+
+    try:
+        if removeemptydirs:
+            directories = []
+            matcher.explicitdir = matcher.traversedir = directories.append
+
+        status = repo.status(match=matcher, ignored=ignored, unknown=True)
+
+        if removefiles:
+            for f in sorted(status.unknown + status.ignored):
+                if not noop:
+                    repo.ui.note(_('removing file %s\n') % f)
+                    remove(repo.wvfs.unlink, f)
+                res.append(f)
+
+        if removeemptydirs:
+            for f in sorted(directories, reverse=True):
+                if matcher(f) and not repo.wvfs.listdir(f):
+                    if not noop:
+                        repo.ui.note(_('removing directory %s\n') % f)
+                        remove(repo.wvfs.rmdir, f)
+                    res.append(f)
+
+        return res
+
+    finally:
+        matcher.explicitdir = oldexplicitdir
+        matcher.traversedir = oldtraversedir
--- a/mercurial/minifileset.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/minifileset.py	Mon Oct 22 14:46:06 2018 -0400
@@ -11,20 +11,23 @@
 from . import (
     error,
     fileset,
+    filesetlang,
     pycompat,
 )
 
 def _sizep(x):
     # i18n: "size" is a keyword
-    expr = fileset.getstring(x, _("size requires an expression"))
+    expr = filesetlang.getstring(x, _("size requires an expression"))
     return fileset.sizematcher(expr)
 
 def _compile(tree):
     if not tree:
         raise error.ParseError(_("missing argument"))
     op = tree[0]
-    if op in {'symbol', 'string', 'kindpat'}:
-        name = fileset.getpattern(tree, {'path'}, _('invalid file pattern'))
+    if op == 'withstatus':
+        return _compile(tree[1])
+    elif op in {'symbol', 'string', 'kindpat'}:
+        name = filesetlang.getpattern(tree, {'path'}, _('invalid file pattern'))
         if name.startswith('**'): # file extension test, ex. "**.tar.gz"
             ext = name[2:]
             for c in pycompat.bytestr(ext):
@@ -39,18 +42,15 @@
             return f
         raise error.ParseError(_("unsupported file pattern: %s") % name,
                                hint=_('paths must be prefixed with "path:"'))
-    elif op == 'or':
-        func1 = _compile(tree[1])
-        func2 = _compile(tree[2])
-        return lambda n, s: func1(n, s) or func2(n, s)
+    elif op in {'or', 'patterns'}:
+        funcs = [_compile(x) for x in tree[1:]]
+        return lambda n, s: any(f(n, s) for f in funcs)
     elif op == 'and':
         func1 = _compile(tree[1])
         func2 = _compile(tree[2])
         return lambda n, s: func1(n, s) and func2(n, s)
     elif op == 'not':
         return lambda n, s: not _compile(tree[1])(n, s)
-    elif op == 'group':
-        return _compile(tree[1])
     elif op == 'func':
         symbols = {
             'all': lambda n, s: True,
@@ -58,7 +58,7 @@
             'size': lambda n, s: _sizep(tree[2])(s),
         }
 
-        name = fileset.getsymbol(tree[1])
+        name = filesetlang.getsymbol(tree[1])
         if name in symbols:
             return symbols[name]
 
@@ -67,11 +67,9 @@
         func1 = _compile(tree[1])
         func2 = _compile(tree[2])
         return lambda n, s: func1(n, s) and not func2(n, s)
-    elif op == 'negate':
-        raise error.ParseError(_("can't use negate operator in this context"))
     elif op == 'list':
         raise error.ParseError(_("can't use a list in this context"),
-                               hint=_('see hg help "filesets.x or y"'))
+                               hint=_('see \'hg help "filesets.x or y"\''))
     raise error.ProgrammingError('illegal tree: %r' % (tree,))
 
 def compile(text):
@@ -88,5 +86,7 @@
     files whose name ends with ".zip", and all files under "bin" in the repo
     root except for "bin/README".
     """
-    tree = fileset.parse(text)
+    tree = filesetlang.parse(text)
+    tree = filesetlang.analyze(tree)
+    tree = filesetlang.optimize(tree)
     return _compile(tree)
--- a/mercurial/minirst.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/minirst.py	Mon Oct 22 14:46:06 2018 -0400
@@ -316,7 +316,7 @@
 
             # column markers are ASCII so we can calculate column
             # position in bytes
-            columns = [x for x in xrange(len(div))
+            columns = [x for x in pycompat.xrange(len(div))
                        if div[x:x + 1] == '=' and (x == 0 or
                                                    div[x - 1:x] == ' ')]
             rows = []
@@ -663,69 +663,79 @@
     text = ''.join(formatblock(b, width) for b in blocks)
     return text
 
+def formatplain(blocks, width):
+    """Format parsed blocks as plain text"""
+    return ''.join(formatblock(b, width) for b in blocks)
+
 def format(text, width=80, indent=0, keep=None, style='plain', section=None):
     """Parse and format the text according to width."""
     blocks, pruned = parse(text, indent, keep or [])
-    parents = []
     if section:
-        sections = getsections(blocks)
-        blocks = []
-        i = 0
-        lastparents = []
-        synthetic = []
-        collapse = True
-        while i < len(sections):
-            name, nest, b = sections[i]
-            del parents[nest:]
-            parents.append(i)
-            if name == section:
-                if lastparents != parents:
-                    llen = len(lastparents)
-                    plen = len(parents)
-                    if llen and llen != plen:
-                        collapse = False
-                    s = []
-                    for j in xrange(3, plen - 1):
-                        parent = parents[j]
-                        if (j >= llen or
-                            lastparents[j] != parent):
-                            s.append(len(blocks))
-                            sec = sections[parent][2]
-                            blocks.append(sec[0])
-                            blocks.append(sec[-1])
-                    if s:
-                        synthetic.append(s)
+        blocks = filtersections(blocks, section)
+    if style == 'html':
+        return formathtml(blocks)
+    else:
+        return formatplain(blocks, width=width)
+
+def filtersections(blocks, section):
+    """Select parsed blocks under the specified section
 
-                lastparents = parents[:]
-                blocks.extend(b)
+    The section name is separated by a dot, and matches the suffix of the
+    full section path.
+    """
+    parents = []
+    sections = _getsections(blocks)
+    blocks = []
+    i = 0
+    lastparents = []
+    synthetic = []
+    collapse = True
+    while i < len(sections):
+        path, nest, b = sections[i]
+        del parents[nest:]
+        parents.append(i)
+        if path == section or path.endswith('.' + section):
+            if lastparents != parents:
+                llen = len(lastparents)
+                plen = len(parents)
+                if llen and llen != plen:
+                    collapse = False
+                s = []
+                for j in pycompat.xrange(3, plen - 1):
+                    parent = parents[j]
+                    if (j >= llen or
+                        lastparents[j] != parent):
+                        s.append(len(blocks))
+                        sec = sections[parent][2]
+                        blocks.append(sec[0])
+                        blocks.append(sec[-1])
+                if s:
+                    synthetic.append(s)
 
-                ## Also show all subnested sections
-                while i + 1 < len(sections) and sections[i + 1][1] > nest:
-                    i += 1
-                    blocks.extend(sections[i][2])
-            i += 1
-        if collapse:
-            synthetic.reverse()
-            for s in synthetic:
-                path = [blocks[syn]['lines'][0] for syn in s]
-                real = s[-1] + 2
-                realline = blocks[real]['lines']
-                realline[0] = ('"%s"' %
-                               '.'.join(path + [realline[0]]).replace('"', ''))
-                del blocks[s[0]:real]
+            lastparents = parents[:]
+            blocks.extend(b)
 
-    if style == 'html':
-        text = formathtml(blocks)
-    else:
-        text = ''.join(formatblock(b, width) for b in blocks)
-    if keep is None:
-        return text
-    else:
-        return text, pruned
+            ## Also show all subnested sections
+            while i + 1 < len(sections) and sections[i + 1][1] > nest:
+                i += 1
+                blocks.extend(sections[i][2])
+        i += 1
+    if collapse:
+        synthetic.reverse()
+        for s in synthetic:
+            path = [blocks[syn]['lines'][0] for syn in s]
+            real = s[-1] + 2
+            realline = blocks[real]['lines']
+            realline[0] = ('"%s"' %
+                           '.'.join(path + [realline[0]]).replace('"', ''))
+            del blocks[s[0]:real]
 
-def getsections(blocks):
-    '''return a list of (section name, nesting level, blocks) tuples'''
+    return blocks
+
+def _getsections(blocks):
+    '''return a list of (section path, nesting level, blocks) tuples'''
     nest = ""
+    names = ()
     level = 0
     secs = []
 
@@ -746,7 +756,8 @@
                 nest += i
             level = nest.index(i) + 1
             nest = nest[:level]
-            secs.append((getname(b), level, [b]))
+            names = names[:level] + (getname(b),)
+            secs.append(('.'.join(names), level, [b]))
         elif b['type'] in ('definition', 'field'):
             i = ' '
             if i not in nest:
@@ -767,7 +778,8 @@
                     elif siblingindent == indent:
                         level = sec[1]
                         break
-            secs.append((getname(b), level, [b]))
+            names = names[:level] + (getname(b),)
+            secs.append(('.'.join(names), level, [b]))
         else:
             if not secs:
                 # add an initial empty section
@@ -793,15 +805,6 @@
             secs[-1][2].append(b)
     return secs
 
-def decorateblocks(blocks, width):
-    '''generate a list of (section name, line text) pairs for search'''
-    lines = []
-    for s in getsections(blocks):
-        section = s[0]
-        text = formatblocks(s[2], width)
-        lines.append([(section, l) for l in text.splitlines(True)])
-    return lines
-
 def maketable(data, indent=0, header=False):
     '''Generate an RST table for the given table data as a list of lines'''
 
--- a/mercurial/narrowspec.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/narrowspec.py	Mon Oct 22 14:46:06 2018 -0400
@@ -13,54 +13,23 @@
 from . import (
     error,
     match as matchmod,
+    repository,
+    sparse,
     util,
 )
 
 FILENAME = 'narrowspec'
 
-def _parsestoredpatterns(text):
-    """Parses the narrowspec format that's stored on disk."""
-    patlist = None
-    includepats = []
-    excludepats = []
-    for l in text.splitlines():
-        if l == '[includes]':
-            if patlist is None:
-                patlist = includepats
-            else:
-                raise error.Abort(_('narrowspec includes section must appear '
-                                    'at most once, before excludes'))
-        elif l == '[excludes]':
-            if patlist is not excludepats:
-                patlist = excludepats
-            else:
-                raise error.Abort(_('narrowspec excludes section must appear '
-                                    'at most once'))
-        else:
-            patlist.append(l)
-
-    return set(includepats), set(excludepats)
-
-def parseserverpatterns(text):
-    """Parses the narrowspec format that's returned by the server."""
-    includepats = set()
-    excludepats = set()
-
-    # We get one entry per line, in the format "<key> <value>".
-    # It's OK for value to contain other spaces.
-    for kp in (l.split(' ', 1) for l in text.splitlines()):
-        if len(kp) != 2:
-            raise error.Abort(_('Invalid narrowspec pattern line: "%s"') % kp)
-        key = kp[0]
-        pat = kp[1]
-        if key == 'include':
-            includepats.add(pat)
-        elif key == 'exclude':
-            excludepats.add(pat)
-        else:
-            raise error.Abort(_('Invalid key "%s" in server response') % key)
-
-    return includepats, excludepats
+# Pattern prefixes that are allowed in narrow patterns. This list MUST
+# only contain patterns that are fast and safe to evaluate. Keep in mind
+# that patterns are supplied by clients and executed on remote servers
+# as part of wire protocol commands. That means that changes to this
+# data structure influence the wire protocol and should not be taken
+# lightly - especially removals.
+VALID_PREFIXES = (
+    b'path:',
+    b'rootfilesin:',
+)
 
 def normalizesplitpattern(kind, pat):
     """Returns the normalized version of a pattern and kind.
@@ -103,14 +72,48 @@
     return '%s:%s' % normalizesplitpattern(kind, pat)
 
 def parsepatterns(pats):
-    """Parses a list of patterns into a typed pattern set."""
-    return set(normalizepattern(p) for p in pats)
+    """Parses an iterable of patterns into a typed pattern set.
+
+    Patterns are assumed to be ``path:`` if no prefix is present.
+    For safety and performance reasons, only some prefixes are allowed.
+    See ``validatepatterns()``.
+
+    This function should be used on patterns that come from the user to
+    normalize and validate them to the internal data structure used for
+    representing patterns.
+    """
+    res = {normalizepattern(orig) for orig in pats}
+    validatepatterns(res)
+    return res
+
+def validatepatterns(pats):
+    """Validate that patterns are in the expected data structure and format.
+
+    And that is a set of normalized patterns beginning with ``path:`` or
+    ``rootfilesin:``.
+
+    This function should be used to validate internal data structures
+    and patterns that are loaded from sources that use the internal,
+    prefixed pattern representation (but can't necessarily be fully trusted).
+    """
+    if not isinstance(pats, set):
+        raise error.ProgrammingError('narrow patterns should be a set; '
+                                     'got %r' % pats)
+
+    for pat in pats:
+        if not pat.startswith(VALID_PREFIXES):
+            # Use a Mercurial exception because this can happen due to user
+            # bugs (e.g. manually updating spec file).
+            raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
+                              hint=_('narrow patterns must begin with one of '
+                                     'the following: %s') %
+                                   ', '.join(VALID_PREFIXES))
 
 def format(includes, excludes):
-    output = '[includes]\n'
+    output = '[include]\n'
     for i in sorted(includes - excludes):
         output += i + '\n'
-    output += '[excludes]\n'
+    output += '[exclude]\n'
     for e in sorted(excludes):
         output += e + '\n'
     return output
@@ -124,26 +127,49 @@
     return matchmod.match(root, '', [], include=include or [],
                           exclude=exclude or [])
 
-def needsexpansion(includes):
-    return [i for i in includes if i.startswith('include:')]
-
 def load(repo):
     try:
-        spec = repo.vfs.read(FILENAME)
+        spec = repo.svfs.read(FILENAME)
     except IOError as e:
         # Treat "narrowspec does not exist" the same as "narrowspec file exists
         # and is empty".
         if e.errno == errno.ENOENT:
-            # Without this the next call to load will use the cached
-            # non-existence of the file, which can cause some odd issues.
-            repo.invalidate(clearfilecache=True)
             return set(), set()
         raise
-    return _parsestoredpatterns(spec)
+    # maybe we should care about the profiles returned too
+    includepats, excludepats, profiles = sparse.parseconfig(repo.ui, spec,
+                                                            'narrow')
+    if profiles:
+        raise error.Abort(_("including other spec files using '%include' is not"
+                            " supported in narrowspec"))
+
+    validatepatterns(includepats)
+    validatepatterns(excludepats)
+
+    return includepats, excludepats
 
 def save(repo, includepats, excludepats):
+    validatepatterns(includepats)
+    validatepatterns(excludepats)
     spec = format(includepats, excludepats)
-    repo.vfs.write(FILENAME, spec)
+    repo.svfs.write(FILENAME, spec)
+
+def savebackup(repo, backupname):
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
+        return
+    vfs = repo.vfs
+    vfs.tryunlink(backupname)
+    util.copyfile(repo.svfs.join(FILENAME), vfs.join(backupname), hardlink=True)
+
+def restorebackup(repo, backupname):
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
+        return
+    util.rename(repo.vfs.join(backupname), repo.svfs.join(FILENAME))
+
+def clearbackup(repo, backupname):
+    if repository.NARROW_REQUIREMENT not in repo.requirements:
+        return
+    repo.vfs.unlink(backupname)
 
 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
     r""" Restricts the patterns according to repo settings,
--- a/mercurial/node.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/node.py	Mon Oct 22 14:46:06 2018 -0400
@@ -21,20 +21,25 @@
         raise TypeError(e)
 
 nullrev = -1
+# In hex, this is '0000000000000000000000000000000000000000'
 nullid = b"\0" * 20
 nullhex = hex(nullid)
 
 # Phony node value to stand-in for new files in some uses of
 # manifests.
-newnodeid = '!' * 20
-addednodeid = ('0' * 15) + 'added'
-modifiednodeid = ('0' * 12) + 'modified'
+# In hex, this is '2121212121212121212121212121212121212121'
+newnodeid = '!!!!!!!!!!!!!!!!!!!!'
+# In hex, this is '3030303030303030303030303030306164646564'
+addednodeid = '000000000000000added'
+# In hex, this is '3030303030303030303030306d6f646966696564'
+modifiednodeid = '000000000000modified'
 
 wdirfilenodeids = {newnodeid, addednodeid, modifiednodeid}
 
 # pseudo identifiers for working directory
 # (they are experimental, so don't add too many dependencies on them)
 wdirrev = 0x7fffffff
+# In hex, this is 'ffffffffffffffffffffffffffffffffffffffff'
 wdirid = b"\xff" * 20
 wdirhex = hex(wdirid)
 
--- a/mercurial/obsolete.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/obsolete.py	Mon Oct 22 14:46:06 2018 -0400
@@ -70,6 +70,7 @@
 from __future__ import absolute_import
 
 import errno
+import hashlib
 import struct
 
 from .i18n import _
@@ -277,7 +278,7 @@
     d = {}
     for l in data.split('\0'):
         if l:
-            key, value = l.split(':')
+            key, value = l.split(':', 1)
             d[key] = value
     return d
 
@@ -394,7 +395,7 @@
         off = o3 + metasize * nummeta
         metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
         metadata = []
-        for idx in xrange(0, len(metapairsize), 2):
+        for idx in pycompat.xrange(0, len(metapairsize), 2):
             o1 = off + metapairsize[idx]
             o2 = o1 + metapairsize[idx + 1]
             metadata.append((data[off:o1], data[o1:o2]))
@@ -598,7 +599,8 @@
             if len(succ) != 20:
                 raise ValueError(succ)
         if prec in succs:
-            raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
+            raise ValueError(
+                r'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec)))
 
         metadata = tuple(sorted(metadata.iteritems()))
         for k, v in metadata:
@@ -954,12 +956,21 @@
             toprocess.update(obsstore.predecessors.get(prec, ()))
     return divergent
 
+def makefoldid(relation, user):
+
+    folddigest = hashlib.sha1(user)
+    for p in relation[0] + relation[1]:
+        folddigest.update('%d' % p.rev())
+        folddigest.update(p.node())
+    # Since fold only has to compete against fold for the same successors, it
+    # seems fine to use a small ID. Smaller ID save space.
+    return node.hex(folddigest.digest())[:8]
 
 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
                   operation=None):
     """Add obsolete markers between changesets in a repo
 
-    <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
+    <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
     tuple. `old` and `news` are changectx. metadata is an optional dictionary
     containing metadata for this marker only. It is merged with the global
     metadata specified through the `metadata` argument of this function.
@@ -993,37 +1004,52 @@
     with repo.transaction('add-obsolescence-marker') as tr:
         markerargs = []
         for rel in relations:
-            prec = rel[0]
-            sucs = rel[1]
-            localmetadata = metadata.copy()
-            if 2 < len(rel):
-                localmetadata.update(rel[2])
+            predecessors = rel[0]
+            if not isinstance(predecessors, tuple):
+                # preserve compat with old API until all caller are migrated
+                predecessors = (predecessors,)
+            if len(predecessors) > 1 and len(rel[1]) != 1:
+                msg = 'Fold markers can only have 1 successors, not %d'
+                raise error.ProgrammingError(msg % len(rel[1]))
+            foldid = None
+            foldsize = len(predecessors)
+            if 1 < foldsize:
+                foldid = makefoldid(rel, metadata['user'])
+            for foldidx, prec in enumerate(predecessors, 1):
+                sucs = rel[1]
+                localmetadata = metadata.copy()
+                if len(rel) > 2:
+                    localmetadata.update(rel[2])
+                if foldid is not None:
+                    localmetadata['fold-id'] = foldid
+                    localmetadata['fold-idx'] = '%d' % foldidx
+                    localmetadata['fold-size'] = '%d' % foldsize
 
-            if not prec.mutable():
-                raise error.Abort(_("cannot obsolete public changeset: %s")
-                                 % prec,
-                                 hint="see 'hg help phases' for details")
-            nprec = prec.node()
-            nsucs = tuple(s.node() for s in sucs)
-            npare = None
-            if not nsucs:
-                npare = tuple(p.node() for p in prec.parents())
-            if nprec in nsucs:
-                raise error.Abort(_("changeset %s cannot obsolete itself")
-                                  % prec)
+                if not prec.mutable():
+                    raise error.Abort(_("cannot obsolete public changeset: %s")
+                                     % prec,
+                                     hint="see 'hg help phases' for details")
+                nprec = prec.node()
+                nsucs = tuple(s.node() for s in sucs)
+                npare = None
+                if not nsucs:
+                    npare = tuple(p.node() for p in prec.parents())
+                if nprec in nsucs:
+                    raise error.Abort(_("changeset %s cannot obsolete itself")
+                                      % prec)
 
-            # Effect flag can be different by relation
-            if saveeffectflag:
-                # The effect flag is saved in a versioned field name for future
-                # evolution
-                effectflag = obsutil.geteffectflag(rel)
-                localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
+                # Effect flag can be different by relation
+                if saveeffectflag:
+                    # The effect flag is saved in a versioned field name for
+                    # future evolution
+                    effectflag = obsutil.geteffectflag(prec, sucs)
+                    localmetadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
 
-            # Creating the marker causes the hidden cache to become invalid,
-            # which causes recomputation when we ask for prec.parents() above.
-            # Resulting in n^2 behavior.  So let's prepare all of the args
-            # first, then create the markers.
-            markerargs.append((nprec, nsucs, npare, localmetadata))
+                # Creating the marker causes the hidden cache to become
+                # invalid, which causes recomputation when we ask for
+                # prec.parents() above.  Resulting in n^2 behavior.  So let's
+                # prepare all of the args first, then create the markers.
+                markerargs.append((nprec, nsucs, npare, localmetadata))
 
         for args in markerargs:
             nprec, nsucs, npare, localmetadata = args
--- a/mercurial/obsutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/obsutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -413,15 +413,13 @@
             return False
     return True
 
-def geteffectflag(relation):
+def geteffectflag(source, successors):
     """ From an obs-marker relation, compute what changed between the
     predecessor and the successor.
     """
     effects = 0
 
-    source = relation[0]
-
-    for changectx in relation[1]:
+    for changectx in successors:
         # Check if description has changed
         if changectx.description() != source.description():
             effects |= DESCCHANGED
@@ -464,14 +462,14 @@
     phase = repo._phasecache.phase
     succsmarkers = repo.obsstore.successors.get
     public = phases.public
-    addedmarkers = tr.changes.get('obsmarkers')
-    addedrevs = tr.changes.get('revs')
+    addedmarkers = tr.changes['obsmarkers']
+    origrepolen = tr.changes['origrepolen']
     seenrevs = set()
     obsoleted = set()
     for mark in addedmarkers:
         node = mark[0]
         rev = torev(node)
-        if rev is None or rev in seenrevs or rev in addedrevs:
+        if rev is None or rev in seenrevs or rev >= origrepolen:
             continue
         seenrevs.add(rev)
         if phase(repo, rev) == public:
--- a/mercurial/parser.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/parser.py	Mon Oct 22 14:46:06 2018 -0400
@@ -20,7 +20,6 @@
 
 from .i18n import _
 from . import (
-    encoding,
     error,
     pycompat,
     util,
@@ -198,16 +197,11 @@
         # mangle Python's exception into our format
         raise error.ParseError(pycompat.bytestr(e).lower())
 
-def _brepr(obj):
-    if isinstance(obj, bytes):
-        return b"'%s'" % stringutil.escapestr(obj)
-    return encoding.strtolocal(repr(obj))
-
 def _prettyformat(tree, leafnodes, level, lines):
     if not isinstance(tree, tuple):
-        lines.append((level, _brepr(tree)))
+        lines.append((level, stringutil.pprint(tree)))
     elif tree[0] in leafnodes:
-        rs = map(_brepr, tree[1:])
+        rs = map(stringutil.pprint, tree[1:])
         lines.append((level, '(%s %s)' % (tree[0], ' '.join(rs))))
     else:
         lines.append((level, '(%s' % tree[0]))
--- a/mercurial/patch.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/patch.py	Mon Oct 22 14:46:06 2018 -0400
@@ -815,7 +815,7 @@
         for x, s in enumerate(self.lines):
             self.hash.setdefault(s, []).append(x)
 
-        for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
+        for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
             for toponly in [True, False]:
                 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
                 oldstart = oldstart + self.offset + self.skew
@@ -1286,7 +1286,7 @@
         self.lena = int(aend) - self.starta
         if self.starta:
             self.lena += 1
-        for x in xrange(self.lena):
+        for x in pycompat.xrange(self.lena):
             l = lr.readline()
             if l.startswith('---'):
                 # lines addition, old block is empty
@@ -1320,7 +1320,7 @@
         if self.startb:
             self.lenb += 1
         hunki = 1
-        for x in xrange(self.lenb):
+        for x in pycompat.xrange(self.lenb):
             l = lr.readline()
             if l.startswith('\ '):
                 # XXX: the only way to hit this is with an invalid line range.
@@ -1396,14 +1396,14 @@
             top = 0
             bot = 0
             hlen = len(self.hunk)
-            for x in xrange(hlen - 1):
+            for x in pycompat.xrange(hlen - 1):
                 # the hunk starts with the @@ line, so use x+1
                 if self.hunk[x + 1].startswith(' '):
                     top += 1
                 else:
                     break
             if not toponly:
-                for x in xrange(hlen - 1):
+                for x in pycompat.xrange(hlen - 1):
                     if self.hunk[hlen - bot - 1].startswith(' '):
                         bot += 1
                     else:
@@ -2326,7 +2326,7 @@
         relfiltered = True
 
     if not changes:
-        changes = repo.status(ctx1, ctx2, match=match)
+        changes = ctx1.status(ctx2, match=match)
     modified, added, removed = changes[:3]
 
     if not modified and not added and not removed:
@@ -2431,9 +2431,9 @@
     a = ''
     b = ''
     for line in hunklines:
-        if line[0] == '-':
+        if line[0:1] == '-':
             a += line[1:]
-        elif line[0] == '+':
+        elif line[0:1] == '+':
             b += line[1:]
         else:
             raise error.ProgrammingError('unexpected hunk line: %s' % line)
@@ -2480,7 +2480,7 @@
                 endspaces = chomp[len(token):]
             # scan tabs
             for maybetab in tabsplitter.findall(token):
-                if '\t' == maybetab[0]:
+                if b'\t' == maybetab[0:1]:
                     currentlabel = 'diff.tab'
                 else:
                     if changed:
--- a/mercurial/phases.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/phases.py	Mon Oct 22 14:46:06 2018 -0400
@@ -123,11 +123,26 @@
 
 _fphasesentry = struct.Struct('>i20s')
 
-allphases = public, draft, secret = range(3)
+INTERNAL_FLAG = 64 # Phases for mercurial internal usage only
+HIDEABLE_FLAG = 32 # Phases that are hideable
+
+# record phase index
+public, draft, secret = range(3)
+internal = INTERNAL_FLAG | HIDEABLE_FLAG
+allphases = range(internal + 1)
 trackedphases = allphases[1:]
-phasenames = ['public', 'draft', 'secret']
+# record phase names
+phasenames = [None] * len(allphases)
+phasenames[:3] = ['public', 'draft', 'secret']
+phasenames[internal] = 'internal'
+# record phase property
 mutablephases = tuple(allphases[1:])
 remotehiddenphases = tuple(allphases[2:])
+localhiddenphases = tuple(p for p in allphases if p & HIDEABLE_FLAG)
+
+def supportinternal(repo):
+    """True if the internal phase can be used on a repository"""
+    return 'internal-phase' in repo.requirements
 
 def _readroots(repo, phasedefaults=None):
     """Read phase roots from disk
@@ -272,19 +287,16 @@
         repo = repo.unfiltered()
         cl = repo.changelog
         self._phasesets = [set() for phase in allphases]
-        roots = pycompat.maplist(cl.rev, self.phaseroots[secret])
-        if roots:
-            ps = set(cl.descendants(roots))
-            for root in roots:
-                ps.add(root)
-            self._phasesets[secret] = ps
-        roots = pycompat.maplist(cl.rev, self.phaseroots[draft])
-        if roots:
-            ps = set(cl.descendants(roots))
-            for root in roots:
-                ps.add(root)
-            ps.difference_update(self._phasesets[secret])
-            self._phasesets[draft] = ps
+        lowerroots = set()
+        for phase in reversed(trackedphases):
+            roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
+            if roots:
+                ps = set(cl.descendants(roots))
+                for root in roots:
+                    ps.add(root)
+                ps.difference_update(lowerroots)
+                lowerroots.update(ps)
+                self._phasesets[phase] = ps
         self._loadedrevslen = len(cl)
 
     def loadphaserevs(self, repo):
@@ -374,7 +386,7 @@
 
         changes = set() # set of revisions to be changed
         delroots = [] # set of root deleted by this path
-        for phase in xrange(targetphase + 1, len(allphases)):
+        for phase in pycompat.xrange(targetphase + 1, len(allphases)):
             # filter nodes that are not in a compatible phase already
             nodes = [n for n in nodes
                      if self.phase(repo, repo[n].rev()) >= phase]
@@ -420,7 +432,7 @@
             affected = set(repo.revs('(%ln::) - (%ln::)', new, old))
 
             # find the phase of the affected revision
-            for phase in xrange(targetphase, -1, -1):
+            for phase in pycompat.xrange(targetphase, -1, -1):
                 if phase:
                     roots = oldroots[phase]
                     revs = set(repo.revs('%ln::%ld', roots, affected))
@@ -434,6 +446,9 @@
     def _retractboundary(self, repo, tr, targetphase, nodes):
         # Be careful to preserve shallow-copied values: do not update
         # phaseroots values, replace them.
+        if targetphase == internal and not supportinternal(repo):
+            msg = 'this repository does not support the internal phase'
+            raise error.ProgrammingError(msg)
 
         repo = repo.unfiltered()
         currentroots = self.phaseroots[targetphase]
@@ -589,7 +604,7 @@
     headsbyphase = [[] for i in allphases]
     # No need to keep track of secret phase; any heads in the subset that
     # are not mentioned are implicitly secret.
-    for phase in allphases[:-1]:
+    for phase in allphases[:secret]:
         revset = "heads(%%ln & %s())" % phasenames[phase]
         headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
     return headsbyphase
@@ -602,8 +617,8 @@
     # to update. This avoid creating empty transaction during no-op operation.
 
     for phase in allphases[:-1]:
-        revset = '%%ln - %s()' % phasenames[phase]
-        heads = [c.node() for c in repo.set(revset, headsbyphase[phase])]
+        revset = '%ln - _phase(%s)'
+        heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
         if heads:
             advanceboundary(repo, trgetter(), phase, heads)
 
--- a/mercurial/policy.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/policy.py	Mon Oct 22 14:46:06 2018 -0400
@@ -69,7 +69,7 @@
     (r'cext', r'bdiff'): 3,
     (r'cext', r'mpatch'): 1,
     (r'cext', r'osutil'): 4,
-    (r'cext', r'parsers'): 10,
+    (r'cext', r'parsers'): 11,
 }
 
 # map import request to other package or module
--- a/mercurial/posix.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/posix.py	Mon Oct 22 14:46:06 2018 -0400
@@ -43,6 +43,7 @@
     def oslink(src, dst):
         raise OSError(errno.EINVAL,
                       'hardlinks not supported: %s to %s' % (src, dst))
+readlink = os.readlink
 unlink = os.unlink
 rename = os.rename
 removedirs = os.removedirs
--- a/mercurial/profiling.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/profiling.py	Mon Oct 22 14:46:06 2018 -0400
@@ -61,7 +61,7 @@
         else:
             # format == 'text'
             stats = lsprof.Stats(p.getstats())
-            stats.sort(field)
+            stats.sort(pycompat.sysstr(field))
             stats.pprint(limit=limit, file=fp, climit=climit)
 
 @contextlib.contextmanager
--- a/mercurial/pure/osutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/pure/osutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -14,6 +14,7 @@
 import stat as statmod
 
 from .. import (
+    encoding,
     pycompat,
 )
 
@@ -150,7 +151,7 @@
         rfds = ctypes.cast(cmsg.cmsg_data, ctypes.POINTER(ctypes.c_int))
         rfdscount = ((cmsg.cmsg_len - _cmsghdr.cmsg_data.offset) /
                      ctypes.sizeof(ctypes.c_int))
-        return [rfds[i] for i in xrange(rfdscount)]
+        return [rfds[i] for i in pycompat.xrange(rfdscount)]
 
 else:
     import msvcrt
@@ -193,7 +194,8 @@
 
     def _raiseioerror(name):
         err = ctypes.WinError()
-        raise IOError(err.errno, '%s: %s' % (name, err.strerror))
+        raise IOError(err.errno, r'%s: %s' % (encoding.strfromlocal(name),
+                                              err.strerror))
 
     class posixfile(object):
         '''a file object aiming for POSIX-like semantics
@@ -207,14 +209,14 @@
         remains but cannot be opened again or be recreated under the same name,
         until all reading processes have closed the file.'''
 
-        def __init__(self, name, mode='r', bufsize=-1):
-            if 'b' in mode:
+        def __init__(self, name, mode=b'r', bufsize=-1):
+            if b'b' in mode:
                 flags = _O_BINARY
             else:
                 flags = _O_TEXT
 
-            m0 = mode[0]
-            if m0 == 'r' and '+' not in mode:
+            m0 = mode[0:1]
+            if m0 == b'r' and b'+' not in mode:
                 flags |= _O_RDONLY
                 access = _GENERIC_READ
             else:
@@ -223,15 +225,15 @@
                 flags |= _O_RDWR
                 access = _GENERIC_READ | _GENERIC_WRITE
 
-            if m0 == 'r':
+            if m0 == b'r':
                 creation = _OPEN_EXISTING
-            elif m0 == 'w':
+            elif m0 == b'w':
                 creation = _CREATE_ALWAYS
-            elif m0 == 'a':
+            elif m0 == b'a':
                 creation = _OPEN_ALWAYS
                 flags |= _O_APPEND
             else:
-                raise ValueError("invalid mode: %s" % mode)
+                raise ValueError(r"invalid mode: %s" % pycompat.sysstr(mode))
 
             fh = _kernel32.CreateFileA(name, access,
                     _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
--- a/mercurial/pure/parsers.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/pure/parsers.py	Mon Oct 22 14:46:06 2018 -0400
@@ -39,25 +39,21 @@
 
 class BaseIndexObject(object):
     def __len__(self):
-        return self._lgt + len(self._extra) + 1
+        return self._lgt + len(self._extra)
 
-    def insert(self, i, tup):
-        assert i == -1
+    def append(self, tup):
         self._extra.append(tup)
 
-    def _fix_index(self, i):
+    def _check_index(self, i):
         if not isinstance(i, int):
             raise TypeError("expecting int indexes")
-        if i < 0:
-            i = len(self) + i
         if i < 0 or i >= len(self):
             raise IndexError
-        return i
 
     def __getitem__(self, i):
-        i = self._fix_index(i)
-        if i == len(self) - 1:
+        if i == -1:
             return (0, 0, 0, -1, -1, -1, -1, nullid)
+        self._check_index(i)
         if i >= self._lgt:
             return self._extra[i - self._lgt]
         index = self._calculate_index(i)
@@ -82,7 +78,8 @@
     def __delitem__(self, i):
         if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
             raise ValueError("deleting slices only supports a:-1 with step 1")
-        i = self._fix_index(i.start)
+        i = i.start
+        self._check_index(i)
         if i < self._lgt:
             self._data = self._data[:i * indexsize]
             self._lgt = i
@@ -116,7 +113,8 @@
     def __delitem__(self, i):
         if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
             raise ValueError("deleting slices only supports a:-1 with step 1")
-        i = self._fix_index(i.start)
+        i = i.start
+        self._check_index(i)
         if i < self._lgt:
             self._offsets = self._offsets[:i]
             self._lgt = i
--- a/mercurial/pvec.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/pvec.py	Mon Oct 22 14:46:06 2018 -0400
@@ -52,6 +52,7 @@
 
 from .node import nullrev
 from . import (
+    pycompat,
     util,
 )
 
@@ -72,7 +73,7 @@
 
 def _str(v, l):
     bs = ""
-    for p in xrange(l):
+    for p in pycompat.xrange(l):
         bs = chr(v & 255) + bs
         v >>= 8
     return bs
@@ -91,7 +92,7 @@
             c += 1
         x >>= 1
     return c
-_htab = [_hweight(x) for x in xrange(256)]
+_htab = [_hweight(x) for x in pycompat.xrange(256)]
 
 def _hamming(a, b):
     '''find the hamming distance between two longs'''
@@ -152,7 +153,7 @@
     pvc = r._pveccache
     if ctx.rev() not in pvc:
         cl = r.changelog
-        for n in xrange(ctx.rev() + 1):
+        for n in pycompat.xrange(ctx.rev() + 1):
             if n not in pvc:
                 node = cl.node(n)
                 p1, p2 = cl.parentrevs(n)
--- a/mercurial/pycompat.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/pycompat.py	Mon Oct 22 14:46:06 2018 -0400
@@ -97,9 +97,7 @@
     osaltsep = os.altsep
     if osaltsep:
         osaltsep = osaltsep.encode('ascii')
-    # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
-    # returns bytes.
-    getcwd = os.getcwdb
+
     sysplatform = sys.platform.encode('ascii')
     sysexecutable = sys.executable
     if sysexecutable:
@@ -120,6 +118,8 @@
     rawinput = input
     getargspec = inspect.getfullargspec
 
+    long = int
+
     # TODO: .buffer might not exist if std streams were replaced; we'll need
     # a silly wrapper to make a bytes stream backed by a unicode one.
     stdin = sys.stdin.buffer
@@ -136,7 +136,7 @@
     if getattr(sys, 'argv', None) is not None:
         sysargv = list(map(os.fsencode, sys.argv))
 
-    bytechr = struct.Struct('>B').pack
+    bytechr = struct.Struct(r'>B').pack
     byterepr = b'%r'.__mod__
 
     class bytestr(bytes):
@@ -280,7 +280,7 @@
     xrange = builtins.range
     unicode = str
 
-    def open(name, mode='r', buffering=-1, encoding=None):
+    def open(name, mode=b'r', buffering=-1, encoding=None):
         return builtins.open(name, sysstr(mode), buffering, encoding)
 
     safehasattr = _wrapattrfunc(builtins.hasattr)
@@ -331,6 +331,7 @@
 else:
     import cStringIO
 
+    xrange = xrange
     unicode = unicode
     bytechr = chr
     byterepr = repr
@@ -356,7 +357,7 @@
             return filename
         else:
             raise TypeError(
-                "expect str, not %s" % type(filename).__name__)
+                r"expect str, not %s" % type(filename).__name__)
 
     # In Python 2, fsdecode() has a very chance to receive bytes. So it's
     # better not to touch Python 2 part as it's already working fine.
@@ -383,13 +384,13 @@
     ospardir = os.pardir
     ossep = os.sep
     osaltsep = os.altsep
+    long = long
     stdin = sys.stdin
     stdout = sys.stdout
     stderr = sys.stderr
     if getattr(sys, 'argv', None) is not None:
         sysargv = sys.argv
     sysplatform = sys.platform
-    getcwd = os.getcwd
     sysexecutable = sys.executable
     shlexsplit = shlex.split
     bytesio = cStringIO.StringIO
@@ -400,11 +401,11 @@
     rawinput = raw_input
     getargspec = inspect.getargspec
 
-isjython = sysplatform.startswith('java')
+isjython = sysplatform.startswith(b'java')
 
-isdarwin = sysplatform == 'darwin'
-isposix = osname == 'posix'
-iswindows = osname == 'nt'
+isdarwin = sysplatform == b'darwin'
+isposix = osname == b'posix'
+iswindows = osname == b'nt'
 
 def getoptb(args, shortlist, namelist):
     return _getoptbwrapper(getopt.getopt, args, shortlist, namelist)
--- a/mercurial/registrar.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/registrar.py	Mon Oct 22 14:46:06 2018 -0400
@@ -146,6 +146,10 @@
     to prevent the command from running if the requested intent could not be
     fulfilled.
 
+    If `helpcategory` is set (usually to one of the constants in the help
+    module), the command will be displayed under that category in the help's
+    list of commands.
+
     The following intents are defined:
 
     readonly
@@ -164,14 +168,30 @@
     descriptions and examples.
     """
 
+    # Command categories for grouping them in help output.
+    CATEGORY_REPO_CREATION = 'repo'
+    CATEGORY_REMOTE_REPO_MANAGEMENT = 'remote'
+    CATEGORY_COMMITTING = 'commit'
+    CATEGORY_CHANGE_MANAGEMENT = 'management'
+    CATEGORY_CHANGE_ORGANIZATION = 'organization'
+    CATEGORY_FILE_CONTENTS = 'files'
+    CATEGORY_CHANGE_NAVIGATION  = 'navigation'
+    CATEGORY_WORKING_DIRECTORY = 'wdir'
+    CATEGORY_IMPORT_EXPORT = 'import'
+    CATEGORY_MAINTENANCE = 'maintenance'
+    CATEGORY_HELP = 'help'
+    CATEGORY_MISC = 'misc'
+    CATEGORY_NONE = 'none'
+
     def _doregister(self, func, name, options=(), synopsis=None,
                     norepo=False, optionalrepo=False, inferrepo=False,
-                    intents=None):
-
+                    intents=None, helpcategory=None, helpbasic=False):
         func.norepo = norepo
         func.optionalrepo = optionalrepo
         func.inferrepo = inferrepo
         func.intents = intents or set()
+        func.helpcategory = helpcategory
+        func.helpbasic = helpbasic
         if synopsis:
             self._table[name] = func, list(options), synopsis
         else:
@@ -247,6 +267,19 @@
      implies 'matchctx.status()' at runtime or not (False, by
      default).
 
+    Optional argument 'weight' indicates the estimated run-time cost, useful
+    for static optimization, default is 1. Higher weight means more expensive.
+    There are predefined weights in the 'filesetlang' module.
+
+    ====== =============================================================
+    Weight Description and examples
+    ====== =============================================================
+    0.5    basic match patterns (e.g. a symbol)
+    10     computing status (e.g. added()) or accessing a few files
+    30     reading file content for each (e.g. grep())
+    50     scanning working directory (ignored())
+    ====== =============================================================
+
     'filesetpredicate' instance in example above can be used to
     decorate multiple functions.
 
@@ -259,8 +292,9 @@
     _getname = _funcregistrarbase._parsefuncdecl
     _docformat = "``%s``\n    %s"
 
-    def _extrasetup(self, name, func, callstatus=False):
+    def _extrasetup(self, name, func, callstatus=False, weight=1):
         func._callstatus = callstatus
+        func._weight = weight
 
 class _templateregistrarbase(_funcregistrarbase):
     """Base of decorator to register functions as template specific one
@@ -281,7 +315,7 @@
             '''
             pass
 
-        # old API
+        # old API (DEPRECATED)
         @templatekeyword('mykeyword')
         def mykeywordfunc(repo, ctx, templ, cache, revcache, **args):
             '''Explanation of this template keyword ....
@@ -385,7 +419,8 @@
         internalmerge = registrar.internalmerge()
 
         @internalmerge('mymerge', internalmerge.mergeonly,
-                       onfailure=None, precheck=None):
+                       onfailure=None, precheck=None,
+                       binary=False, symlink=False):
         def mymergefunc(repo, mynode, orig, fcd, fco, fca,
                         toolconf, files, labels=None):
             '''Explanation of this internal merge tool ....
@@ -416,6 +451,12 @@
     'files' and 'labels'. If it returns false value, merging is aborted
     immediately (and file is marked as "unresolved").
 
+    Optional argument 'binary' is a binary files capability of internal
+    merge tool. 'nomerge' merge type implies binary=True.
+
+    Optional argument 'symlink' is a symlinks capability of inetrnal
+    merge function. 'nomerge' merge type implies symlink=True.
+
     'internalmerge' instance in example above can be used to
     decorate multiple functions.
 
@@ -433,7 +474,14 @@
     fullmerge = 'fullmerge'  # both premerge and merge
 
     def _extrasetup(self, name, func, mergetype,
-                    onfailure=None, precheck=None):
+                    onfailure=None, precheck=None,
+                    binary=False, symlink=False):
         func.mergetype = mergetype
         func.onfailure = onfailure
         func.precheck = precheck
+
+        binarycap = binary or mergetype == self.nomerge
+        symlinkcap = symlink or mergetype == self.nomerge
+
+        # actual capabilities, which this internal merge tool has
+        func.capabilities = {"binary": binarycap, "symlink": symlinkcap}
--- a/mercurial/repair.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/repair.py	Mon Oct 22 14:46:06 2018 -0400
@@ -24,6 +24,8 @@
     exchange,
     obsolete,
     obsutil,
+    phases,
+    pycompat,
     util,
 )
 from .utils import (
@@ -70,7 +72,7 @@
     """find out the filelogs affected by the strip"""
     files = set()
 
-    for x in xrange(striprev, len(repo)):
+    for x in pycompat.xrange(striprev, len(repo)):
         files.update(repo[x].files())
 
     return sorted(files)
@@ -80,7 +82,7 @@
     return [revlog.linkrev(r) for r in brokenset]
 
 def _collectmanifest(repo, striprev):
-    return _collectrevlog(repo.manifestlog._revlog, striprev)
+    return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
 
 def _collectbrokencsets(repo, files, striprev):
     """return the changesets which will be broken by the truncation"""
@@ -189,7 +191,11 @@
     with ui.uninterruptable():
         try:
             with repo.transaction("strip") as tr:
-                offset = len(tr.entries)
+                # TODO this code violates the interface abstraction of the
+                # transaction and makes assumptions that file storage is
+                # using append-only files. We'll need some kind of storage
+                # API to handle stripping for us.
+                offset = len(tr._entries)
 
                 tr.startgroup()
                 cl.strip(striprev, tr)
@@ -199,8 +205,8 @@
                     repo.file(fn).strip(striprev, tr)
                 tr.endgroup()
 
-                for i in xrange(offset, len(tr.entries)):
-                    file, troffset, ignore = tr.entries[i]
+                for i in pycompat.xrange(offset, len(tr._entries)):
+                    file, troffset, ignore = tr._entries[i]
                     with repo.svfs(file, 'a', checkambig=True) as fp:
                         fp.truncate(troffset)
                     if troffset == 0:
@@ -271,7 +277,8 @@
     # orphaned = affected - wanted
     # affected = descendants(roots(wanted))
     # wanted = revs
-    tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
+    revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
+    tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
     notstrip = revs - tostrip
     if notstrip:
         nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
@@ -297,31 +304,31 @@
         if roots:
             strip(self.ui, self.repo, roots, self.backup, self.topic)
 
-def delayedstrip(ui, repo, nodelist, topic=None):
+def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
     """like strip, but works inside transaction and won't strip irreverent revs
 
     nodelist must explicitly contain all descendants. Otherwise a warning will
     be printed that some nodes are not stripped.
 
-    Always do a backup. The last non-None "topic" will be used as the backup
-    topic name. The default backup topic name is "backup".
+    Will do a backup if `backup` is True. The last non-None "topic" will be
+    used as the backup topic name. The default backup topic name is "backup".
     """
     tr = repo.currenttransaction()
     if not tr:
         nodes = safestriproots(ui, repo, nodelist)
-        return strip(ui, repo, nodes, True, topic)
+        return strip(ui, repo, nodes, backup=backup, topic=topic)
     # transaction postclose callbacks are called in alphabet order.
     # use '\xff' as prefix so we are likely to be called last.
     callback = tr.getpostclose('\xffstrip')
     if callback is None:
-        callback = stripcallback(ui, repo, True, topic)
+        callback = stripcallback(ui, repo, backup=backup, topic=topic)
         tr.addpostclose('\xffstrip', callback)
     if topic:
         callback.topic = topic
     callback.addnodes(nodelist)
 
 def stripmanifest(repo, striprev, tr, files):
-    revlog = repo.manifestlog._revlog
+    revlog = repo.manifestlog.getstorage(b'')
     revlog.strip(striprev, tr)
     striptrees(repo, tr, striprev, files)
 
@@ -332,7 +339,7 @@
             if (unencoded.startswith('meta/') and
                 unencoded.endswith('00manifest.i')):
                 dir = unencoded[5:-12]
-                repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
+                repo.manifestlog.getstorage(dir).strip(striprev, tr)
 
 def rebuildfncache(ui, repo):
     """Rebuilds the fncache file from repo history.
--- a/mercurial/repository.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/repository.py	Mon Oct 22 14:46:06 2018 -0400
@@ -15,6 +15,30 @@
     interfaceutil,
 )
 
+# When narrowing is finalized and no longer subject to format changes,
+# we should move this to just "narrow" or similar.
+NARROW_REQUIREMENT = 'narrowhg-experimental'
+
+# Local repository feature string.
+
+# Revlogs are being used for file storage.
+REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
+# The storage part of the repository is shared from an external source.
+REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
+# LFS supported for backing file storage.
+REPO_FEATURE_LFS = b'lfs'
+# Repository supports being stream cloned.
+REPO_FEATURE_STREAM_CLONE = b'streamclone'
+# Files storage may lack data for all ancestors.
+REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
+
+REVISION_FLAG_CENSORED = 1 << 15
+REVISION_FLAG_ELLIPSIS = 1 << 14
+REVISION_FLAG_EXTSTORED = 1 << 13
+
+REVISION_FLAGS_KNOWN = (
+    REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
+
 class ipeerconnection(interfaceutil.Interface):
     """Represents a "connection" to a repository.
 
@@ -290,6 +314,12 @@
     All peer instances must conform to this interface.
     """
 
+class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
+    """Unified peer interface for wire protocol version 2 peers."""
+
+    apidescriptor = interfaceutil.Attribute(
+        """Data structure holding description of server API.""")
+
 @interfaceutil.implementer(ipeerbase)
 class peer(object):
     """Base class for peer repositories."""
@@ -314,6 +344,79 @@
             _('cannot %s; remote repository does not support the %r '
               'capability') % (purpose, name))
 
+class iverifyproblem(interfaceutil.Interface):
+    """Represents a problem with the integrity of the repository.
+
+    Instances of this interface are emitted to describe an integrity issue
+    with a repository (e.g. corrupt storage, missing data, etc).
+
+    Instances are essentially messages associated with severity.
+    """
+    warning = interfaceutil.Attribute(
+        """Message indicating a non-fatal problem.""")
+
+    error = interfaceutil.Attribute(
+        """Message indicating a fatal problem.""")
+
+    node = interfaceutil.Attribute(
+        """Revision encountering the problem.
+
+        ``None`` means the problem doesn't apply to a single revision.
+        """)
+
+class irevisiondelta(interfaceutil.Interface):
+    """Represents a delta between one revision and another.
+
+    Instances convey enough information to allow a revision to be exchanged
+    with another repository.
+
+    Instances represent the fulltext revision data or a delta against
+    another revision. Therefore the ``revision`` and ``delta`` attributes
+    are mutually exclusive.
+
+    Typically used for changegroup generation.
+    """
+
+    node = interfaceutil.Attribute(
+        """20 byte node of this revision.""")
+
+    p1node = interfaceutil.Attribute(
+        """20 byte node of 1st parent of this revision.""")
+
+    p2node = interfaceutil.Attribute(
+        """20 byte node of 2nd parent of this revision.""")
+
+    linknode = interfaceutil.Attribute(
+        """20 byte node of the changelog revision this node is linked to.""")
+
+    flags = interfaceutil.Attribute(
+        """2 bytes of integer flags that apply to this revision.
+
+        This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
+        """)
+
+    basenode = interfaceutil.Attribute(
+        """20 byte node of the revision this data is a delta against.
+
+        ``nullid`` indicates that the revision is a full revision and not
+        a delta.
+        """)
+
+    baserevisionsize = interfaceutil.Attribute(
+        """Size of base revision this delta is against.
+
+        May be ``None`` if ``basenode`` is ``nullid``.
+        """)
+
+    revision = interfaceutil.Attribute(
+        """Raw fulltext of revision data for this node.""")
+
+    delta = interfaceutil.Attribute(
+        """Delta between ``basenode`` and ``node``.
+
+        Stored in the bdiff delta format.
+        """)
+
 class ifilerevisionssequence(interfaceutil.Interface):
     """Contains index data for all revisions of a file.
 
@@ -377,15 +480,22 @@
     * DAG data (storing and querying the relationship between nodes).
     * Metadata to facilitate storage.
     """
-    index = interfaceutil.Attribute(
-        """An ``ifilerevisionssequence`` instance.""")
-
     def __len__():
         """Obtain the number of revisions stored for this file."""
 
     def __iter__():
         """Iterate over revision numbers for this file."""
 
+    def hasnode(node):
+        """Returns a bool indicating if a node is known to this store.
+
+        Implementations must only return True for full, binary node values:
+        hex nodes, revision numbers, and partial node matches must be
+        rejected.
+
+        The null node is never present.
+        """
+
     def revs(start=0, stop=None):
         """Iterate over revision numbers for this file, with control."""
 
@@ -422,9 +532,6 @@
     def linkrev(rev):
         """Obtain the changeset revision number a revision is linked to."""
 
-    def flags(rev):
-        """Obtain flags used to affect storage of a revision."""
-
     def iscensored(rev):
         """Return whether a revision's content has been censored."""
 
@@ -440,14 +547,6 @@
         If ``nullrev`` is in the set, this is equivalent to ``revs()``.
         """
 
-    def headrevs():
-        """Obtain a list of revision numbers that are DAG heads.
-
-        The list is sorted oldest to newest.
-
-        TODO determine if sorting is required.
-        """
-
     def heads(start=None, stop=None):
         """Obtain a list of nodes that are DAG heads, with control.
 
@@ -464,32 +563,16 @@
         Returns a list of nodes.
         """
 
-    def deltaparent(rev):
-        """"Return the revision that is a suitable parent to delta against."""
-
-    def candelta(baserev, rev):
-        """"Whether a delta can be generated between two revisions."""
-
 class ifiledata(interfaceutil.Interface):
     """Storage interface for data storage of a specific file.
 
     This complements ``ifileindex`` and provides an interface for accessing
     data for a tracked file.
     """
-    def rawsize(rev):
-        """The size of the fulltext data for a revision as stored."""
-
     def size(rev):
         """Obtain the fulltext size of file data.
 
-        Any metadata is excluded from size measurements. Use ``rawsize()`` if
-        metadata size is important.
-        """
-
-    def checkhash(fulltext, node, p1=None, p2=None, rev=None):
-        """Validate the stored hash of a given fulltext and node.
-
-        Raises ``error.RevlogError`` is hash validation fails.
+        Any metadata is excluded from size measurements.
         """
 
     def revision(node, raw=False):
@@ -527,13 +610,57 @@
         TODO better document the copy metadata and censoring logic.
         """
 
-    def revdiff(rev1, rev2):
-        """Obtain a delta between two revision numbers.
-
-        Operates on raw data in the store (``revision(node, raw=True)``).
-
-        The returned data is the result of ``bdiff.bdiff`` on the raw
-        revision data.
+    def emitrevisions(nodes,
+                      nodesorder=None,
+                      revisiondata=False,
+                      assumehaveparentrevisions=False,
+                      deltaprevious=False):
+        """Produce ``irevisiondelta`` for revisions.
+
+        Given an iterable of nodes, emits objects conforming to the
+        ``irevisiondelta`` interface that describe revisions in storage.
+
+        This method is a generator.
+
+        The input nodes may be unordered. Implementations must ensure that a
+        node's parents are emitted before the node itself. Transitively, this
+        means that a node may only be emitted once all its ancestors in
+        ``nodes`` have also been emitted.
+
+        By default, emits "index" data (the ``node``, ``p1node``, and
+        ``p2node`` attributes). If ``revisiondata`` is set, revision data
+        will also be present on the emitted objects.
+
+        With default argument values, implementations can choose to emit
+        either fulltext revision data or a delta. When emitting deltas,
+        implementations must consider whether the delta's base revision
+        fulltext is available to the receiver.
+
+        The base revision fulltext is guaranteed to be available if any of
+        the following are met:
+
+        * Its fulltext revision was emitted by this method call.
+        * A delta for that revision was emitted by this method call.
+        * ``assumehaveparentrevisions`` is True and the base revision is a
+          parent of the node.
+
+        ``nodesorder`` can be used to control the order that revisions are
+        emitted. By default, revisions can be reordered as long as they are
+        in DAG topological order (see above). If the value is ``nodes``,
+        the iteration order from ``nodes`` should be used. If the value is
+        ``storage``, then the native order from the backing storage layer
+        is used. (Not all storage layers will have strong ordering and behavior
+        of this mode is storage-dependent.) ``nodes`` ordering can force
+        revisions to be emitted before their ancestors, so consumers should
+        use it with care.
+
+        The ``linknode`` attribute on the returned ``irevisiondelta`` may not
+        be set and it is the caller's responsibility to resolve it, if needed.
+
+        If ``deltaprevious`` is True and revision data is requested, all
+        revision data should be emitted as deltas against the revision
+        emitted just prior. The initial revision should be a delta against
+        its 1st parent.
         """
 
 class ifilemutation(interfaceutil.Interface):
@@ -559,7 +686,8 @@
         The data passed in already contains a metadata header, if any.
 
         ``node`` and ``flags`` can be used to define the expected node and
-        the flags to use with storage.
+        the flags to use with storage. ``flags`` is a bitwise value composed
+        of the various ``REVISION_FLAG_*`` constants.
 
         ``add()`` is usually called when adding files from e.g. the working
         directory. ``addrevision()`` is often called by ``add()`` and for
@@ -567,7 +695,8 @@
         applying raw data from a peer repo.
         """
 
-    def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
+    def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
+                 maybemissingparents=False):
         """Process a series of deltas for storage.
 
         ``deltas`` is an iterable of 7-tuples of
@@ -581,10 +710,32 @@
 
         ``addrevisioncb`` should be called for each node as it is committed.
 
+        ``maybemissingparents`` is a bool indicating whether the incoming
+        data may reference parents/ancestor revisions that aren't present.
+        This flag is set when receiving data into a "shallow" store that
+        doesn't hold all history.
+
         Returns a list of nodes that were processed. A node will be in the list
         even if it existed in the store previously.
         """
 
+    def censorrevision(tr, node, tombstone=b''):
+        """Remove the content of a single revision.
+
+        The specified ``node`` will have its content purged from storage.
+        Future attempts to access the revision data for this node will
+        result in failure.
+
+        A ``tombstone`` message can optionally be stored. This message may be
+        displayed to users when they attempt to access the missing revision
+        data.
+
+        Storage backends may have stored deltas against the previous content
+        in this revision. As part of censoring a revision, these storage
+        backends are expected to rewrite any internally stored deltas such
+        that they no longer reference the deleted content.
+        """
+
     def getstrippoint(minlink):
         """Find the minimum revision that must be stripped to strip a linkrev.
 
@@ -608,27 +759,6 @@
 class ifilestorage(ifileindex, ifiledata, ifilemutation):
     """Complete storage interface for a single tracked file."""
 
-    version = interfaceutil.Attribute(
-        """Version number of storage.
-
-        TODO this feels revlog centric and could likely be removed.
-        """)
-
-    storedeltachains = interfaceutil.Attribute(
-        """Whether the store stores deltas.
-
-        TODO deltachains are revlog centric. This can probably removed
-        once there are better abstractions for obtaining/writing
-        data.
-        """)
-
-    _generaldelta = interfaceutil.Attribute(
-        """Whether deltas can be against any parent revision.
-
-        TODO this is used by changegroup code and it could probably be
-        folded into another API.
-        """)
-
     def files():
         """Obtain paths that are backing storage for this file.
 
@@ -636,10 +766,54 @@
         be a better API for that.
         """
 
-    def checksize():
-        """Obtain the expected sizes of backing files.
-
-        TODO this is used by verify and it should not be part of the interface.
+    def storageinfo(exclusivefiles=False, sharedfiles=False,
+                    revisionscount=False, trackedsize=False,
+                    storedsize=False):
+        """Obtain information about storage for this file's data.
+
+        Returns a dict describing storage for this tracked path. The keys
+        in the dict map to arguments of the same. The arguments are bools
+        indicating whether to calculate and obtain that data.
+
+        exclusivefiles
+           Iterable of (vfs, path) describing files that are exclusively
+           used to back storage for this tracked path.
+
+        sharedfiles
+           Iterable of (vfs, path) describing files that are used to back
+           storage for this tracked path. Those files may also provide storage
+           for other stored entities.
+
+        revisionscount
+           Number of revisions available for retrieval.
+
+        trackedsize
+           Total size in bytes of all tracked revisions. This is a sum of the
+           length of the fulltext of all revisions.
+
+        storedsize
+           Total size in bytes used to store data for all tracked revisions.
+           This is commonly less than ``trackedsize`` due to internal usage
+           of deltas rather than fulltext revisions.
+
+        Not all storage backends may support all queries are have a reasonable
+        value to use. In that case, the value should be set to ``None`` and
+        callers are expected to handle this special value.
+        """
+
+    def verifyintegrity(state):
+        """Verifies the integrity of file storage.
+
+        ``state`` is a dict holding state of the verifier process. It can be
+        used to communicate data between invocations of multiple storage
+        primitives.
+
+        If individual revisions cannot have their revision content resolved,
+        the method is expected to set the ``skipread`` key to a set of nodes
+        that encountered problems.
+
+        The method yields objects conforming to the ``iverifyproblem``
+        interface.
         """
 
 class idirs(interfaceutil.Interface):
@@ -880,18 +1054,222 @@
 class imanifestrevisionwritable(imanifestrevisionbase):
     """Interface representing a manifest revision that can be committed."""
 
-    def write(transaction, linkrev, p1node, p2node, added, removed):
+    def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
         """Add this revision to storage.
 
         Takes a transaction object, the changeset revision number it will
         be associated with, its parent nodes, and lists of added and
         removed paths.
 
+        If match is provided, storage can choose not to inspect or write out
+        items that do not match. Storage is still required to be able to provide
+        the full manifest in the future for any directories written (these
+        manifests should not be "narrowed on disk").
+
         Returns the binary node of the created revision.
         """
 
+class imanifeststorage(interfaceutil.Interface):
+    """Storage interface for manifest data."""
+
+    tree = interfaceutil.Attribute(
+        """The path to the directory this manifest tracks.
+
+        The empty bytestring represents the root manifest.
+        """)
+
+    index = interfaceutil.Attribute(
+        """An ``ifilerevisionssequence`` instance.""")
+
+    indexfile = interfaceutil.Attribute(
+        """Path of revlog index file.
+
+        TODO this is revlog specific and should not be exposed.
+        """)
+
+    opener = interfaceutil.Attribute(
+        """VFS opener to use to access underlying files used for storage.
+
+        TODO this is revlog specific and should not be exposed.
+        """)
+
+    version = interfaceutil.Attribute(
+        """Revlog version number.
+
+        TODO this is revlog specific and should not be exposed.
+        """)
+
+    _generaldelta = interfaceutil.Attribute(
+        """Whether generaldelta storage is being used.
+
+        TODO this is revlog specific and should not be exposed.
+        """)
+
+    fulltextcache = interfaceutil.Attribute(
+        """Dict with cache of fulltexts.
+
+        TODO this doesn't feel appropriate for the storage interface.
+        """)
+
+    def __len__():
+        """Obtain the number of revisions stored for this manifest."""
+
+    def __iter__():
+        """Iterate over revision numbers for this manifest."""
+
+    def rev(node):
+        """Obtain the revision number given a binary node.
+
+        Raises ``error.LookupError`` if the node is not known.
+        """
+
+    def node(rev):
+        """Obtain the node value given a revision number.
+
+        Raises ``error.LookupError`` if the revision is not known.
+        """
+
+    def lookup(value):
+        """Attempt to resolve a value to a node.
+
+        Value can be a binary node, hex node, revision number, or a bytes
+        that can be converted to an integer.
+
+        Raises ``error.LookupError`` if a ndoe could not be resolved.
+        """
+
+    def parents(node):
+        """Returns a 2-tuple of parent nodes for a node.
+
+        Values will be ``nullid`` if the parent is empty.
+        """
+
+    def parentrevs(rev):
+        """Like parents() but operates on revision numbers."""
+
+    def linkrev(rev):
+        """Obtain the changeset revision number a revision is linked to."""
+
+    def revision(node, _df=None, raw=False):
+        """Obtain fulltext data for a node."""
+
+    def revdiff(rev1, rev2):
+        """Obtain a delta between two revision numbers.
+
+        The returned data is the result of ``bdiff.bdiff()`` on the raw
+        revision data.
+        """
+
+    def cmp(node, fulltext):
+        """Compare fulltext to another revision.
+
+        Returns True if the fulltext is different from what is stored.
+        """
+
+    def emitrevisions(nodes,
+                      nodesorder=None,
+                      revisiondata=False,
+                      assumehaveparentrevisions=False):
+        """Produce ``irevisiondelta`` describing revisions.
+
+        See the documentation for ``ifiledata`` for more.
+        """
+
+    def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
+        """Process a series of deltas for storage.
+
+        See the documentation in ``ifilemutation`` for more.
+        """
+
+    def rawsize(rev):
+        """Obtain the size of tracked data.
+
+        Is equivalent to ``len(m.revision(node, raw=True))``.
+
+        TODO this method is only used by upgrade code and may be removed.
+        """
+
+    def getstrippoint(minlink):
+        """Find minimum revision that must be stripped to strip a linkrev.
+
+        See the documentation in ``ifilemutation`` for more.
+        """
+
+    def strip(minlink, transaction):
+        """Remove storage of items starting at a linkrev.
+
+        See the documentation in ``ifilemutation`` for more.
+        """
+
+    def checksize():
+        """Obtain the expected sizes of backing files.
+
+        TODO this is used by verify and it should not be part of the interface.
+        """
+
+    def files():
+        """Obtain paths that are backing storage for this manifest.
+
+        TODO this is used by verify and there should probably be a better API
+        for this functionality.
+        """
+
+    def deltaparent(rev):
+        """Obtain the revision that a revision is delta'd against.
+
+        TODO delta encoding is an implementation detail of storage and should
+        not be exposed to the storage interface.
+        """
+
+    def clone(tr, dest, **kwargs):
+        """Clone this instance to another."""
+
+    def clearcaches(clear_persisted_data=False):
+        """Clear any caches associated with this instance."""
+
+    def dirlog(d):
+        """Obtain a manifest storage instance for a tree."""
+
+    def add(m, transaction, link, p1, p2, added, removed, readtree=None,
+            match=None):
+        """Add a revision to storage.
+
+        ``m`` is an object conforming to ``imanifestdict``.
+
+        ``link`` is the linkrev revision number.
+
+        ``p1`` and ``p2`` are the parent revision numbers.
+
+        ``added`` and ``removed`` are iterables of added and removed paths,
+        respectively.
+
+        ``readtree`` is a function that can be used to read the child tree(s)
+        when recursively writing the full tree structure when using
+        treemanifets.
+
+        ``match`` is a matcher that can be used to hint to storage that not all
+        paths must be inspected; this is an optimization and can be safely
+        ignored. Note that the storage must still be able to reproduce a full
+        manifest including files that did not match.
+        """
+
+    def storageinfo(exclusivefiles=False, sharedfiles=False,
+                    revisionscount=False, trackedsize=False,
+                    storedsize=False):
+        """Obtain information about storage for this manifest's data.
+
+        See ``ifilestorage.storageinfo()`` for a description of this method.
+        This one behaves the same way, except for manifest data.
+        """
+
 class imanifestlog(interfaceutil.Interface):
-    """Interface representing a collection of manifest snapshots."""
+    """Interface representing a collection of manifest snapshots.
+
+    Represents the root manifest in a repository.
+
+    Also serves as a means to access nested tree manifests and to cache
+    tree manifests.
+    """
 
     def __getitem__(node):
         """Obtain a manifest instance for a given binary node.
@@ -902,15 +1280,15 @@
         interface.
         """
 
-    def get(dir, node, verify=True):
+    def get(tree, node, verify=True):
         """Retrieve the manifest instance for a given directory and binary node.
 
         ``node`` always refers to the node of the root manifest (which will be
         the only manifest if flat manifests are being used).
 
-        If ``dir`` is the empty string, the root manifest is returned. Otherwise
-        the manifest for the specified directory will be returned (requires
-        tree manifests).
+        If ``tree`` is the empty string, the root manifest is returned.
+        Otherwise the manifest for the specified directory will be returned
+        (requires tree manifests).
 
         If ``verify`` is True, ``LookupError`` is raised if the node is not
         known.
@@ -919,6 +1297,15 @@
         interface.
         """
 
+    def getstorage(tree):
+        """Retrieve an interface to storage for a particular tree.
+
+        If ``tree`` is the empty bytestring, storage for the root manifest will
+        be returned. Otherwise storage for a tree manifest is returned.
+
+        TODO formalize interface for returned object.
+        """
+
     def clearcaches():
         """Clear caches associated with this collection."""
 
@@ -928,24 +1315,21 @@
         Raises ``error.LookupError`` if the node is not known.
         """
 
-    def addgroup(deltas, linkmapper, transaction):
-        """Process a series of deltas for storage.
-
-        ``deltas`` is an iterable of 7-tuples of
-        (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
-        to add.
-
-        The ``delta`` field contains ``mpatch`` data to apply to a base
-        revision, identified by ``deltabase``. The base node can be
-        ``nullid``, in which case the header from the delta can be ignored
-        and the delta used as the fulltext.
-
-        Returns a list of nodes that were processed. A node will be in the list
-        even if it existed in the store previously.
+class ilocalrepositoryfilestorage(interfaceutil.Interface):
+    """Local repository sub-interface providing access to tracked file storage.
+
+    This interface defines how a repository accesses storage for a single
+    tracked file path.
+    """
+
+    def file(f):
+        """Obtain a filelog for a tracked path.
+
+        The returned type conforms to the ``ifilestorage`` interface.
         """
 
-class completelocalrepository(interfaceutil.Interface):
-    """Monolithic interface for local repositories.
+class ilocalrepositorymain(interfaceutil.Interface):
+    """Main interface for local repositories.
 
     This currently captures the reality of things - not how things should be.
     """
@@ -956,18 +1340,30 @@
         This is actually a class attribute and is shared among all instances.
         """)
 
-    openerreqs = interfaceutil.Attribute(
-        """Set of requirements that are passed to the opener.
-
-        This is actually a class attribute and is shared among all instances.
-        """)
-
     supported = interfaceutil.Attribute(
         """Set of requirements that this repo is capable of opening.""")
 
     requirements = interfaceutil.Attribute(
         """Set of requirements this repo uses.""")
 
+    features = interfaceutil.Attribute(
+        """Set of "features" this repository supports.
+
+        A "feature" is a loosely-defined term. It can refer to a feature
+        in the classical sense or can describe an implementation detail
+        of the repository. For example, a ``readonly`` feature may denote
+        the repository as read-only. Or a ``revlogfilestore`` feature may
+        denote that the repository is using revlogs for file storage.
+
+        The intent of features is to provide a machine-queryable mechanism
+        for repo consumers to test for various repository characteristics.
+
+        Features are similar to ``requirements``. The main difference is that
+        requirements are stored on-disk and represent requirements to open the
+        repository. Features are more run-time capabilities of the repository
+        and more granular capabilities (which may be derived from requirements).
+        """)
+
     filtername = interfaceutil.Attribute(
         """Name of the repoview that is active on this repo.""")
 
@@ -1167,12 +1563,6 @@
     def wjoin(f, *insidef):
         """Calls self.vfs.reljoin(self.root, f, *insidef)"""
 
-    def file(f):
-        """Obtain a filelog for a tracked path.
-
-        The returned type conforms to the ``ifilestorage`` interface.
-        """
-
     def setparents(p1, p2):
         """Set the parent nodes of the working directory."""
 
@@ -1300,3 +1690,164 @@
 
     def savecommitmessage(text):
         pass
+
+class completelocalrepository(ilocalrepositorymain,
+                              ilocalrepositoryfilestorage):
+    """Complete interface for a local repository."""
+
+class iwireprotocolcommandcacher(interfaceutil.Interface):
+    """Represents a caching backend for wire protocol commands.
+
+    Wire protocol version 2 supports transparent caching of many commands.
+    To leverage this caching, servers can activate objects that cache
+    command responses. Objects handle both cache writing and reading.
+    This interface defines how that response caching mechanism works.
+
+    Wire protocol version 2 commands emit a series of objects that are
+    serialized and sent to the client. The caching layer exists between
+    the invocation of the command function and the sending of its output
+    objects to an output layer.
+
+    Instances of this interface represent a binding to a cache that
+    can serve a response (in place of calling a command function) and/or
+    write responses to a cache for subsequent use.
+
+    When a command request arrives, the following happens with regards
+    to this interface:
+
+    1. The server determines whether the command request is cacheable.
+    2. If it is, an instance of this interface is spawned.
+    3. The cacher is activated in a context manager (``__enter__`` is called).
+    4. A cache *key* for that request is derived. This will call the
+       instance's ``adjustcachekeystate()`` method so the derivation
+       can be influenced.
+    5. The cacher is informed of the derived cache key via a call to
+       ``setcachekey()``.
+    6. The cacher's ``lookup()`` method is called to test for presence of
+       the derived key in the cache.
+    7. If ``lookup()`` returns a hit, that cached result is used in place
+       of invoking the command function. ``__exit__`` is called and the instance
+       is discarded.
+    8. The command function is invoked.
+    9. ``onobject()`` is called for each object emitted by the command
+       function.
+    10. After the final object is seen, ``onfinished()`` is called.
+    11. ``__exit__`` is called to signal the end of use of the instance.
+
+    Cache *key* derivation can be influenced by the instance.
+
+    Cache keys are initially derived by a deterministic representation of
+    the command request. This includes the command name, arguments, protocol
+    version, etc. This initial key derivation is performed by CBOR-encoding a
+    data structure and feeding that output into a hasher.
+
+    Instances of this interface can influence this initial key derivation
+    via ``adjustcachekeystate()``.
+
+    The instance is informed of the derived cache key via a call to
+    ``setcachekey()``. The instance must store the key locally so it can
+    be consulted on subsequent operations that may require it.
+
+    When constructed, the instance has access to a callable that can be used
+    for encoding response objects. This callable receives as its single
+    argument an object emitted by a command function. It returns an iterable
+    of bytes chunks representing the encoded object. Unless the cacher is
+    caching native Python objects in memory or has a way of reconstructing
+    the original Python objects, implementations typically call this function
+    to produce bytes from the output objects and then store those bytes in
+    the cache. When it comes time to re-emit those bytes, they are wrapped
+    in a ``wireprototypes.encodedresponse`` instance to tell the output
+    layer that they are pre-encoded.
+
+    When receiving the objects emitted by the command function, instances
+    can choose what to do with those objects. The simplest thing to do is
+    re-emit the original objects. They will be forwarded to the output
+    layer and will be processed as if the cacher did not exist.
+
+    Implementations could also choose to not emit objects - instead locally
+    buffering objects or their encoded representation. They could then emit
+    a single "coalesced" object when ``onfinished()`` is called. In
+    this way, the implementation would function as a filtering layer of
+    sorts.
+
+    When caching objects, typically the encoded form of the object will
+    be stored. Keep in mind that if the original object is forwarded to
+    the output layer, it will need to be encoded there as well. For large
+    output, this redundant encoding could add overhead. Implementations
+    could wrap the encoded object data in ``wireprototypes.encodedresponse``
+    instances to avoid this overhead.
+    """
+    def __enter__():
+        """Marks the instance as active.
+
+        Should return self.
+        """
+
+    def __exit__(exctype, excvalue, exctb):
+        """Called when cacher is no longer used.
+
+        This can be used by implementations to perform cleanup actions (e.g.
+        disconnecting network sockets, aborting a partially cached response.
+        """
+
+    def adjustcachekeystate(state):
+        """Influences cache key derivation by adjusting state to derive key.
+
+        A dict defining the state used to derive the cache key is passed.
+
+        Implementations can modify this dict to record additional state that
+        is wanted to influence key derivation.
+
+        Implementations are *highly* encouraged to not modify or delete
+        existing keys.
+        """
+
+    def setcachekey(key):
+        """Record the derived cache key for this request.
+
+        Instances may mutate the key for internal usage, as desired. e.g.
+        instances may wish to prepend the repo name, introduce path
+        components for filesystem or URL addressing, etc. Behavior is up to
+        the cache.
+
+        Returns a bool indicating if the request is cacheable by this
+        instance.
+        """
+
+    def lookup():
+        """Attempt to resolve an entry in the cache.
+
+        The instance is instructed to look for the cache key that it was
+        informed about via the call to ``setcachekey()``.
+
+        If there's no cache hit or the cacher doesn't wish to use the cached
+        entry, ``None`` should be returned.
+
+        Else, a dict defining the cached result should be returned. The
+        dict may have the following keys:
+
+        objs
+           An iterable of objects that should be sent to the client. That
+           iterable of objects is expected to be what the command function
+           would return if invoked or an equivalent representation thereof.
+        """
+
+    def onobject(obj):
+        """Called when a new object is emitted from the command function.
+
+        Receives as its argument the object that was emitted from the
+        command function.
+
+        This method returns an iterator of objects to forward to the output
+        layer. The easiest implementation is a generator that just
+        ``yield obj``.
+        """
+
+    def onfinished():
+        """Called after all objects have been emitted from the command function.
+
+        Implementations should return an iterator of objects to forward to
+        the output layer.
+
+        This method can be a generator.
+        """
--- a/mercurial/repoview.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/repoview.py	Mon Oct 22 14:46:06 2018 -0400
@@ -28,7 +28,10 @@
     branchmap (see mercurial.branchmap.subsettable), you cannot set "public"
     changesets as "hideable". Doing so would break multiple code assertions and
     lead to crashes."""
-    return obsolete.getrevs(repo, 'obsolete')
+    obsoletes = obsolete.getrevs(repo, 'obsolete')
+    internals = repo._phasecache.getrevset(repo, phases.localhiddenphases)
+    internals = frozenset(internals)
+    return obsoletes | internals
 
 def pinnedrevs(repo):
     """revisions blocking hidden changesets from being filtered
@@ -128,7 +131,7 @@
             firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
     # protect from nullrev root
     firstmutable = max(0, firstmutable)
-    return frozenset(xrange(firstmutable, len(cl)))
+    return frozenset(pycompat.xrange(firstmutable, len(cl)))
 
 # function to compute filtered set
 #
@@ -210,7 +213,7 @@
         unfichangelog = unfi.changelog
         # bypass call to changelog.method
         unfiindex = unfichangelog.index
-        unfilen = len(unfiindex) - 1
+        unfilen = len(unfiindex)
         unfinode = unfiindex[unfilen - 1][7]
 
         revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
--- a/mercurial/revlog.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/revlog.py	Mon Oct 22 14:46:06 2018 -0400
@@ -16,10 +16,7 @@
 import collections
 import contextlib
 import errno
-import hashlib
-import heapq
 import os
-import re
 import struct
 import zlib
 
@@ -27,100 +24,107 @@
 from .node import (
     bin,
     hex,
+    nullhex,
     nullid,
     nullrev,
+    short,
     wdirfilenodeids,
     wdirhex,
     wdirid,
     wdirrev,
 )
 from .i18n import _
+from .revlogutils.constants import (
+    FLAG_GENERALDELTA,
+    FLAG_INLINE_DATA,
+    REVIDX_DEFAULT_FLAGS,
+    REVIDX_ELLIPSIS,
+    REVIDX_EXTSTORED,
+    REVIDX_FLAGS_ORDER,
+    REVIDX_ISCENSORED,
+    REVIDX_KNOWN_FLAGS,
+    REVIDX_RAWTEXT_CHANGING_FLAGS,
+    REVLOGV0,
+    REVLOGV1,
+    REVLOGV1_FLAGS,
+    REVLOGV2,
+    REVLOGV2_FLAGS,
+    REVLOG_DEFAULT_FLAGS,
+    REVLOG_DEFAULT_FORMAT,
+    REVLOG_DEFAULT_VERSION,
+)
 from .thirdparty import (
     attr,
 )
 from . import (
     ancestor,
+    dagop,
     error,
     mdiff,
     policy,
     pycompat,
+    repository,
     templatefilters,
     util,
 )
+from .revlogutils import (
+    deltas as deltautil,
+)
 from .utils import (
+    interfaceutil,
+    storageutil,
     stringutil,
 )
 
+# blanked usage of all the name to prevent pyflakes constraints
+# We need these name available in the module for extensions.
+REVLOGV0
+REVLOGV1
+REVLOGV2
+FLAG_INLINE_DATA
+FLAG_GENERALDELTA
+REVLOG_DEFAULT_FLAGS
+REVLOG_DEFAULT_FORMAT
+REVLOG_DEFAULT_VERSION
+REVLOGV1_FLAGS
+REVLOGV2_FLAGS
+REVIDX_ISCENSORED
+REVIDX_ELLIPSIS
+REVIDX_EXTSTORED
+REVIDX_DEFAULT_FLAGS
+REVIDX_FLAGS_ORDER
+REVIDX_KNOWN_FLAGS
+REVIDX_RAWTEXT_CHANGING_FLAGS
+
 parsers = policy.importmod(r'parsers')
 
 # Aliased for performance.
 _zlibdecompress = zlib.decompress
 
-# revlog header flags
-REVLOGV0 = 0
-REVLOGV1 = 1
-# Dummy value until file format is finalized.
-# Reminder: change the bounds check in revlog.__init__ when this is changed.
-REVLOGV2 = 0xDEAD
-FLAG_INLINE_DATA = (1 << 16)
-FLAG_GENERALDELTA = (1 << 17)
-REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
-REVLOG_DEFAULT_FORMAT = REVLOGV1
-REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
-REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
-REVLOGV2_FLAGS = REVLOGV1_FLAGS
-
-# revlog index flags
-REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
-REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
-REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
-REVIDX_DEFAULT_FLAGS = 0
-# stable order in which flags need to be processed and their processors applied
-REVIDX_FLAGS_ORDER = [
-    REVIDX_ISCENSORED,
-    REVIDX_ELLIPSIS,
-    REVIDX_EXTSTORED,
-]
-REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
-# bitmark for flags that could cause rawdata content change
-REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
-
 # max size of revlog with inline data
 _maxinline = 131072
 _chunksize = 1048576
 
-RevlogError = error.RevlogError
-LookupError = error.LookupError
-CensoredNodeError = error.CensoredNodeError
-ProgrammingError = error.ProgrammingError
-
 # Store flag processors (cf. 'addflagprocessor()' to register)
 _flagprocessors = {
     REVIDX_ISCENSORED: None,
 }
 
-_mdre = re.compile('\1\n')
-def parsemeta(text):
-    """return (metadatadict, metadatasize)"""
-    # text can be buffer, so we can't use .startswith or .index
-    if text[:2] != '\1\n':
-        return None, None
-    s = _mdre.search(text, 2).start()
-    mtext = text[2:s]
-    meta = {}
-    for l in mtext.splitlines():
-        k, v = l.split(": ", 1)
-        meta[k] = v
-    return meta, (s + 2)
+# Flag processors for REVIDX_ELLIPSIS.
+def ellipsisreadprocessor(rl, text):
+    return text, False
+
+def ellipsiswriteprocessor(rl, text):
+    return text, False
 
-def packmeta(meta, text):
-    keys = sorted(meta)
-    metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
-    return "\1\n%s\1\n%s" % (metatext, text)
+def ellipsisrawprocessor(rl, text):
+    return False
 
-def _censoredtext(text):
-    m, offs = parsemeta(text)
-    return m and "censored" in m
+ellipsisprocessor = (
+    ellipsisreadprocessor,
+    ellipsiswriteprocessor,
+    ellipsisrawprocessor,
+)
 
 def addflagprocessor(flag, processor):
     """Register a flag processor on a revision data flag.
@@ -147,16 +151,19 @@
       debug commands. In this case the transform only indicates whether the
       contents can be used for hash integrity checks.
     """
+    _insertflagprocessor(flag, processor, _flagprocessors)
+
+def _insertflagprocessor(flag, processor, flagprocessors):
     if not flag & REVIDX_KNOWN_FLAGS:
         msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
-        raise ProgrammingError(msg)
+        raise error.ProgrammingError(msg)
     if flag not in REVIDX_FLAGS_ORDER:
         msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
-        raise ProgrammingError(msg)
-    if flag in _flagprocessors:
+        raise error.ProgrammingError(msg)
+    if flag in flagprocessors:
         msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
         raise error.Abort(msg)
-    _flagprocessors[flag] = processor
+    flagprocessors[flag] = processor
 
 def getoffset(q):
     return int(q >> 16)
@@ -169,606 +176,6 @@
         raise ValueError('unknown revlog index flags')
     return int(int(offset) << 16 | type)
 
-_nullhash = hashlib.sha1(nullid)
-
-def hash(text, p1, p2):
-    """generate a hash from the given text and its parent hashes
-
-    This hash combines both the current file contents and its history
-    in a manner that makes it easy to distinguish nodes with the same
-    content in the revision graph.
-    """
-    # As of now, if one of the parent node is null, p2 is null
-    if p2 == nullid:
-        # deep copy of a hash is faster than creating one
-        s = _nullhash.copy()
-        s.update(p1)
-    else:
-        # none of the parent nodes are nullid
-        if p1 < p2:
-            a = p1
-            b = p2
-        else:
-            a = p2
-            b = p1
-        s = hashlib.sha1(a)
-        s.update(b)
-    s.update(text)
-    return s.digest()
-
-class _testrevlog(object):
-    """minimalist fake revlog to use in doctests"""
-
-    def __init__(self, data, density=0.5, mingap=0):
-        """data is an list of revision payload boundaries"""
-        self._data = data
-        self._srdensitythreshold = density
-        self._srmingapsize = mingap
-
-    def start(self, rev):
-        if rev == 0:
-            return 0
-        return self._data[rev - 1]
-
-    def end(self, rev):
-        return self._data[rev]
-
-    def length(self, rev):
-        return self.end(rev) - self.start(rev)
-
-    def __len__(self):
-        return len(self._data)
-
-def _trimchunk(revlog, revs, startidx, endidx=None):
-    """returns revs[startidx:endidx] without empty trailing revs
-
-    Doctest Setup
-    >>> revlog = _testrevlog([
-    ...  5,  #0
-    ...  10, #1
-    ...  12, #2
-    ...  12, #3 (empty)
-    ...  17, #4
-    ...  21, #5
-    ...  21, #6 (empty)
-    ... ])
-
-    Contiguous cases:
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
-    [0, 1, 2, 3, 4, 5]
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
-    [0, 1, 2, 3, 4]
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
-    [0, 1, 2]
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
-    [2]
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
-    [3, 4, 5]
-    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
-    [3, 4]
-
-    Discontiguous cases:
-    >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
-    [1, 3, 5]
-    >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
-    [1]
-    >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
-    [3, 5]
-    >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
-    [3, 5]
-    """
-    length = revlog.length
-
-    if endidx is None:
-        endidx = len(revs)
-
-    # If we have a non-emtpy delta candidate, there are nothing to trim
-    if revs[endidx - 1] < len(revlog):
-        # Trim empty revs at the end, except the very first revision of a chain
-        while (endidx > 1
-                and endidx > startidx
-                and length(revs[endidx - 1]) == 0):
-            endidx -= 1
-
-    return revs[startidx:endidx]
-
-def _segmentspan(revlog, revs, deltainfo=None):
-    """Get the byte span of a segment of revisions
-
-    revs is a sorted array of revision numbers
-
-    >>> revlog = _testrevlog([
-    ...  5,  #0
-    ...  10, #1
-    ...  12, #2
-    ...  12, #3 (empty)
-    ...  17, #4
-    ... ])
-
-    >>> _segmentspan(revlog, [0, 1, 2, 3, 4])
-    17
-    >>> _segmentspan(revlog, [0, 4])
-    17
-    >>> _segmentspan(revlog, [3, 4])
-    5
-    >>> _segmentspan(revlog, [1, 2, 3,])
-    7
-    >>> _segmentspan(revlog, [1, 3])
-    7
-    """
-    if not revs:
-        return 0
-    if deltainfo is not None and len(revlog) <= revs[-1]:
-        if len(revs) == 1:
-            return deltainfo.deltalen
-        offset = revlog.end(len(revlog) - 1)
-        end = deltainfo.deltalen + offset
-    else:
-        end = revlog.end(revs[-1])
-    return end - revlog.start(revs[0])
-
-def _slicechunk(revlog, revs, deltainfo=None, targetsize=None):
-    """slice revs to reduce the amount of unrelated data to be read from disk.
-
-    ``revs`` is sliced into groups that should be read in one time.
-    Assume that revs are sorted.
-
-    The initial chunk is sliced until the overall density (payload/chunks-span
-    ratio) is above `revlog._srdensitythreshold`. No gap smaller than
-    `revlog._srmingapsize` is skipped.
-
-    If `targetsize` is set, no chunk larger than `targetsize` will be yield.
-    For consistency with other slicing choice, this limit won't go lower than
-    `revlog._srmingapsize`.
-
-    If individual revisions chunk are larger than this limit, they will still
-    be raised individually.
-
-    >>> revlog = _testrevlog([
-    ...  5,  #00 (5)
-    ...  10, #01 (5)
-    ...  12, #02 (2)
-    ...  12, #03 (empty)
-    ...  27, #04 (15)
-    ...  31, #05 (4)
-    ...  31, #06 (empty)
-    ...  42, #07 (11)
-    ...  47, #08 (5)
-    ...  47, #09 (empty)
-    ...  48, #10 (1)
-    ...  51, #11 (3)
-    ...  74, #12 (23)
-    ...  85, #13 (11)
-    ...  86, #14 (1)
-    ...  91, #15 (5)
-    ... ])
-
-    >>> list(_slicechunk(revlog, list(range(16))))
-    [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
-    >>> list(_slicechunk(revlog, [0, 15]))
-    [[0], [15]]
-    >>> list(_slicechunk(revlog, [0, 11, 15]))
-    [[0], [11], [15]]
-    >>> list(_slicechunk(revlog, [0, 11, 13, 15]))
-    [[0], [11, 13, 15]]
-    >>> list(_slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
-    [[1, 2], [5, 8, 10, 11], [14]]
-
-    Slicing with a maximum chunk size
-    >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
-    [[0], [11], [13], [15]]
-    >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
-    [[0], [11], [13, 15]]
-    """
-    if targetsize is not None:
-        targetsize = max(targetsize, revlog._srmingapsize)
-    # targetsize should not be specified when evaluating delta candidates:
-    # * targetsize is used to ensure we stay within specification when reading,
-    # * deltainfo is used to pick are good delta chain when writing.
-    if not (deltainfo is None or targetsize is None):
-        msg = 'cannot use `targetsize` with a `deltainfo`'
-        raise error.ProgrammingError(msg)
-    for chunk in _slicechunktodensity(revlog, revs,
-                                      deltainfo,
-                                      revlog._srdensitythreshold,
-                                      revlog._srmingapsize):
-        for subchunk in _slicechunktosize(revlog, chunk, targetsize):
-            yield subchunk
-
-def _slicechunktosize(revlog, revs, targetsize=None):
-    """slice revs to match the target size
-
-    This is intended to be used on chunk that density slicing selected by that
-    are still too large compared to the read garantee of revlog. This might
-    happens when "minimal gap size" interrupted the slicing or when chain are
-    built in a way that create large blocks next to each other.
-
-    >>> revlog = _testrevlog([
-    ...  3,  #0 (3)
-    ...  5,  #1 (2)
-    ...  6,  #2 (1)
-    ...  8,  #3 (2)
-    ...  8,  #4 (empty)
-    ...  11, #5 (3)
-    ...  12, #6 (1)
-    ...  13, #7 (1)
-    ...  14, #8 (1)
-    ... ])
-
-    Cases where chunk is already small enough
-    >>> list(_slicechunktosize(revlog, [0], 3))
-    [[0]]
-    >>> list(_slicechunktosize(revlog, [6, 7], 3))
-    [[6, 7]]
-    >>> list(_slicechunktosize(revlog, [0], None))
-    [[0]]
-    >>> list(_slicechunktosize(revlog, [6, 7], None))
-    [[6, 7]]
-
-    cases where we need actual slicing
-    >>> list(_slicechunktosize(revlog, [0, 1], 3))
-    [[0], [1]]
-    >>> list(_slicechunktosize(revlog, [1, 3], 3))
-    [[1], [3]]
-    >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
-    [[1, 2], [3]]
-    >>> list(_slicechunktosize(revlog, [3, 5], 3))
-    [[3], [5]]
-    >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
-    [[3], [5]]
-    >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
-    [[5], [6, 7, 8]]
-    >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
-    [[0], [1, 2], [3], [5], [6, 7, 8]]
-
-    Case with too large individual chunk (must return valid chunk)
-    >>> list(_slicechunktosize(revlog, [0, 1], 2))
-    [[0], [1]]
-    >>> list(_slicechunktosize(revlog, [1, 3], 1))
-    [[1], [3]]
-    >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
-    [[3], [5]]
-    """
-    assert targetsize is None or 0 <= targetsize
-    if targetsize is None or _segmentspan(revlog, revs) <= targetsize:
-        yield revs
-        return
-
-    startrevidx = 0
-    startdata = revlog.start(revs[0])
-    endrevidx = 0
-    iterrevs = enumerate(revs)
-    next(iterrevs) # skip first rev.
-    for idx, r in iterrevs:
-        span = revlog.end(r) - startdata
-        if span <= targetsize:
-            endrevidx = idx
-        else:
-            chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
-            if chunk:
-                yield chunk
-            startrevidx = idx
-            startdata = revlog.start(r)
-            endrevidx = idx
-    yield _trimchunk(revlog, revs, startrevidx)
-
-def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
-                         mingapsize=0):
-    """slice revs to reduce the amount of unrelated data to be read from disk.
-
-    ``revs`` is sliced into groups that should be read in one time.
-    Assume that revs are sorted.
-
-    ``deltainfo`` is a _deltainfo instance of a revision that we would append
-    to the top of the revlog.
-
-    The initial chunk is sliced until the overall density (payload/chunks-span
-    ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
-    skipped.
-
-    >>> revlog = _testrevlog([
-    ...  5,  #00 (5)
-    ...  10, #01 (5)
-    ...  12, #02 (2)
-    ...  12, #03 (empty)
-    ...  27, #04 (15)
-    ...  31, #05 (4)
-    ...  31, #06 (empty)
-    ...  42, #07 (11)
-    ...  47, #08 (5)
-    ...  47, #09 (empty)
-    ...  48, #10 (1)
-    ...  51, #11 (3)
-    ...  74, #12 (23)
-    ...  85, #13 (11)
-    ...  86, #14 (1)
-    ...  91, #15 (5)
-    ... ])
-
-    >>> list(_slicechunktodensity(revlog, list(range(16))))
-    [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
-    >>> list(_slicechunktodensity(revlog, [0, 15]))
-    [[0], [15]]
-    >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
-    [[0], [11], [15]]
-    >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
-    [[0], [11, 13, 15]]
-    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
-    [[1, 2], [5, 8, 10, 11], [14]]
-    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
-    ...                           mingapsize=20))
-    [[1, 2, 3, 5, 8, 10, 11], [14]]
-    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
-    ...                           targetdensity=0.95))
-    [[1, 2], [5], [8, 10, 11], [14]]
-    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
-    ...                           targetdensity=0.95, mingapsize=12))
-    [[1, 2], [5, 8, 10, 11], [14]]
-    """
-    start = revlog.start
-    length = revlog.length
-
-    if len(revs) <= 1:
-        yield revs
-        return
-
-    nextrev = len(revlog)
-    nextoffset = revlog.end(nextrev - 1)
-
-    if deltainfo is None:
-        deltachainspan = _segmentspan(revlog, revs)
-        chainpayload = sum(length(r) for r in revs)
-    else:
-        deltachainspan = deltainfo.distance
-        chainpayload = deltainfo.compresseddeltalen
-
-    if deltachainspan < mingapsize:
-        yield revs
-        return
-
-    readdata = deltachainspan
-
-    if deltachainspan:
-        density = chainpayload / float(deltachainspan)
-    else:
-        density = 1.0
-
-    if density >= targetdensity:
-        yield revs
-        return
-
-    if deltainfo is not None and deltainfo.deltalen:
-        revs = list(revs)
-        revs.append(nextrev)
-
-    # Store the gaps in a heap to have them sorted by decreasing size
-    gapsheap = []
-    heapq.heapify(gapsheap)
-    prevend = None
-    for i, rev in enumerate(revs):
-        if rev < nextrev:
-            revstart = start(rev)
-            revlen = length(rev)
-        else:
-            revstart = nextoffset
-            revlen = deltainfo.deltalen
-
-        # Skip empty revisions to form larger holes
-        if revlen == 0:
-            continue
-
-        if prevend is not None:
-            gapsize = revstart - prevend
-            # only consider holes that are large enough
-            if gapsize > mingapsize:
-                heapq.heappush(gapsheap, (-gapsize, i))
-
-        prevend = revstart + revlen
-
-    # Collect the indices of the largest holes until the density is acceptable
-    indicesheap = []
-    heapq.heapify(indicesheap)
-    while gapsheap and density < targetdensity:
-        oppgapsize, gapidx = heapq.heappop(gapsheap)
-
-        heapq.heappush(indicesheap, gapidx)
-
-        # the gap sizes are stored as negatives to be sorted decreasingly
-        # by the heap
-        readdata -= (-oppgapsize)
-        if readdata > 0:
-            density = chainpayload / float(readdata)
-        else:
-            density = 1.0
-
-    # Cut the revs at collected indices
-    previdx = 0
-    while indicesheap:
-        idx = heapq.heappop(indicesheap)
-
-        chunk = _trimchunk(revlog, revs, previdx, idx)
-        if chunk:
-            yield chunk
-
-        previdx = idx
-
-    chunk = _trimchunk(revlog, revs, previdx)
-    if chunk:
-        yield chunk
-
-@attr.s(slots=True, frozen=True)
-class _deltainfo(object):
-    distance = attr.ib()
-    deltalen = attr.ib()
-    data = attr.ib()
-    base = attr.ib()
-    chainbase = attr.ib()
-    chainlen = attr.ib()
-    compresseddeltalen = attr.ib()
-
-class _deltacomputer(object):
-    def __init__(self, revlog):
-        self.revlog = revlog
-
-    def _getcandidaterevs(self, p1, p2, cachedelta):
-        """
-        Provides revisions that present an interest to be diffed against,
-        grouped by level of easiness.
-        """
-        revlog = self.revlog
-        gdelta = revlog._generaldelta
-        curr = len(revlog)
-        prev = curr - 1
-        p1r, p2r = revlog.rev(p1), revlog.rev(p2)
-
-        # should we try to build a delta?
-        if prev != nullrev and revlog.storedeltachains:
-            tested = set()
-            # This condition is true most of the time when processing
-            # changegroup data into a generaldelta repo. The only time it
-            # isn't true is if this is the first revision in a delta chain
-            # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
-            if cachedelta and gdelta and revlog._lazydeltabase:
-                # Assume what we received from the server is a good choice
-                # build delta will reuse the cache
-                yield (cachedelta[0],)
-                tested.add(cachedelta[0])
-
-            if gdelta:
-                # exclude already lazy tested base if any
-                parents = [p for p in (p1r, p2r)
-                           if p != nullrev and p not in tested]
-
-                if not revlog._deltabothparents and len(parents) == 2:
-                    parents.sort()
-                    # To minimize the chance of having to build a fulltext,
-                    # pick first whichever parent is closest to us (max rev)
-                    yield (parents[1],)
-                    # then the other one (min rev) if the first did not fit
-                    yield (parents[0],)
-                    tested.update(parents)
-                elif len(parents) > 0:
-                    # Test all parents (1 or 2), and keep the best candidate
-                    yield parents
-                    tested.update(parents)
-
-            if prev not in tested:
-                # other approach failed try against prev to hopefully save us a
-                # fulltext.
-                yield (prev,)
-                tested.add(prev)
-
-    def buildtext(self, revinfo, fh):
-        """Builds a fulltext version of a revision
-
-        revinfo: _revisioninfo instance that contains all needed info
-        fh:      file handle to either the .i or the .d revlog file,
-                 depending on whether it is inlined or not
-        """
-        btext = revinfo.btext
-        if btext[0] is not None:
-            return btext[0]
-
-        revlog = self.revlog
-        cachedelta = revinfo.cachedelta
-        flags = revinfo.flags
-        node = revinfo.node
-
-        baserev = cachedelta[0]
-        delta = cachedelta[1]
-        # special case deltas which replace entire base; no need to decode
-        # base revision. this neatly avoids censored bases, which throw when
-        # they're decoded.
-        hlen = struct.calcsize(">lll")
-        if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
-                                                   len(delta) - hlen):
-            btext[0] = delta[hlen:]
-        else:
-            # deltabase is rawtext before changed by flag processors, which is
-            # equivalent to non-raw text
-            basetext = revlog.revision(baserev, _df=fh, raw=False)
-            btext[0] = mdiff.patch(basetext, delta)
-
-        try:
-            res = revlog._processflags(btext[0], flags, 'read', raw=True)
-            btext[0], validatehash = res
-            if validatehash:
-                revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
-            if flags & REVIDX_ISCENSORED:
-                raise RevlogError(_('node %s is not censored') % node)
-        except CensoredNodeError:
-            # must pass the censored index flag to add censored revisions
-            if not flags & REVIDX_ISCENSORED:
-                raise
-        return btext[0]
-
-    def _builddeltadiff(self, base, revinfo, fh):
-        revlog = self.revlog
-        t = self.buildtext(revinfo, fh)
-        if revlog.iscensored(base):
-            # deltas based on a censored revision must replace the
-            # full content in one patch, so delta works everywhere
-            header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
-            delta = header + t
-        else:
-            ptext = revlog.revision(base, _df=fh, raw=True)
-            delta = mdiff.textdiff(ptext, t)
-
-        return delta
-
-    def _builddeltainfo(self, revinfo, base, fh):
-        # can we use the cached delta?
-        if revinfo.cachedelta and revinfo.cachedelta[0] == base:
-            delta = revinfo.cachedelta[1]
-        else:
-            delta = self._builddeltadiff(base, revinfo, fh)
-        revlog = self.revlog
-        header, data = revlog.compress(delta)
-        deltalen = len(header) + len(data)
-        chainbase = revlog.chainbase(base)
-        offset = revlog.end(len(revlog) - 1)
-        dist = deltalen + offset - revlog.start(chainbase)
-        if revlog._generaldelta:
-            deltabase = base
-        else:
-            deltabase = chainbase
-        chainlen, compresseddeltalen = revlog._chaininfo(base)
-        chainlen += 1
-        compresseddeltalen += deltalen
-        return _deltainfo(dist, deltalen, (header, data), deltabase,
-                         chainbase, chainlen, compresseddeltalen)
-
-    def finddeltainfo(self, revinfo, fh):
-        """Find an acceptable delta against a candidate revision
-
-        revinfo: information about the revision (instance of _revisioninfo)
-        fh:      file handle to either the .i or the .d revlog file,
-                 depending on whether it is inlined or not
-
-        Returns the first acceptable candidate revision, as ordered by
-        _getcandidaterevs
-        """
-        cachedelta = revinfo.cachedelta
-        p1 = revinfo.p1
-        p2 = revinfo.p2
-        revlog = self.revlog
-
-        deltainfo = None
-        for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
-            nominateddeltas = []
-            for candidaterev in candidaterevs:
-                # no delta for rawtext-changing revs (see "candelta" for why)
-                if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
-                    continue
-                candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
-                if revlog._isgooddeltainfo(candidatedelta, revinfo):
-                    nominateddeltas.append(candidatedelta)
-            if nominateddeltas:
-                deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
-                break
-
-        return deltainfo
-
 @attr.s(slots=True, frozen=True)
 class _revisioninfo(object):
     """Information about a revision that allows building its fulltext
@@ -788,6 +195,26 @@
     cachedelta = attr.ib()
     flags = attr.ib()
 
+@interfaceutil.implementer(repository.irevisiondelta)
+@attr.s(slots=True)
+class revlogrevisiondelta(object):
+    node = attr.ib()
+    p1node = attr.ib()
+    p2node = attr.ib()
+    basenode = attr.ib()
+    flags = attr.ib()
+    baserevisionsize = attr.ib()
+    revision = attr.ib()
+    delta = attr.ib()
+    linknode = attr.ib(default=None)
+
+@interfaceutil.implementer(repository.iverifyproblem)
+@attr.s(frozen=True)
+class revlogproblem(object):
+    warning = attr.ib(default=None)
+    error = attr.ib(default=None)
+    node = attr.ib(default=None)
+
 # index v0:
 #  4 bytes: offset
 #  4 bytes: compressed length
@@ -800,6 +227,12 @@
 indexformatv0_pack = indexformatv0.pack
 indexformatv0_unpack = indexformatv0.unpack
 
+class revlogoldindex(list):
+    def __getitem__(self, i):
+        if i == -1:
+            return (0, 0, 0, -1, -1, -1, -1, nullid)
+        return list.__getitem__(self, i)
+
 class revlogoldio(object):
     def __init__(self):
         self.size = indexformatv0.size
@@ -821,14 +254,12 @@
             nodemap[e[6]] = n
             n += 1
 
-        # add the magic null revision at -1
-        index.append((0, 0, 0, -1, -1, -1, -1, nullid))
-
-        return index, nodemap, None
+        return revlogoldindex(index), nodemap, None
 
     def packentry(self, entry, node, version, rev):
         if gettype(entry[0]):
-            raise RevlogError(_('index entry flags need revlog version 1'))
+            raise error.RevlogError(_('index entry flags need revlog '
+                                      'version 1'))
         e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
               node(entry[5]), node(entry[6]), entry[7])
         return indexformatv0_pack(*e2)
@@ -918,7 +349,7 @@
         self._checkambig = checkambig
         self._censorable = censorable
         # 3-tuple of (node, rev, text) for a raw revision.
-        self._cache = None
+        self._revisioncache = None
         # Maps rev to chain base rev.
         self._chainbasecache = util.lrucachedict(100)
         # 2-tuple of (offset, data) of raw data from the revlog at an offset.
@@ -940,6 +371,10 @@
         self._srdensitythreshold = 0.50
         self._srmingapsize = 262144
 
+        # Make copy of flag processors so each revlog instance can support
+        # custom flags.
+        self._flagprocessors = dict(_flagprocessors)
+
         mmapindexthreshold = None
         v = REVLOG_DEFAULT_VERSION
         opts = getattr(opener, 'options', None)
@@ -973,20 +408,29 @@
                 self._srdensitythreshold = opts['sparse-read-density-threshold']
             if 'sparse-read-min-gap-size' in opts:
                 self._srmingapsize = opts['sparse-read-min-gap-size']
+            if opts.get('enableellipsis'):
+                self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
+
+            # revlog v0 doesn't have flag processors
+            for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
+                _insertflagprocessor(flag, processor, self._flagprocessors)
 
         if self._chunkcachesize <= 0:
-            raise RevlogError(_('revlog chunk cache size %r is not greater '
-                                'than 0') % self._chunkcachesize)
+            raise error.RevlogError(_('revlog chunk cache size %r is not '
+                                      'greater than 0') % self._chunkcachesize)
         elif self._chunkcachesize & (self._chunkcachesize - 1):
-            raise RevlogError(_('revlog chunk cache size %r is not a power '
-                                'of 2') % self._chunkcachesize)
+            raise error.RevlogError(_('revlog chunk cache size %r is not a '
+                                      'power of 2') % self._chunkcachesize)
 
+        self._loadindex(v, mmapindexthreshold)
+
+    def _loadindex(self, v, mmapindexthreshold):
         indexdata = ''
         self._initempty = True
         try:
             with self._indexfp() as f:
                 if (mmapindexthreshold is not None and
-                        self.opener.fstat(f).st_size >= mmapindexthreshold):
+                    self.opener.fstat(f).st_size >= mmapindexthreshold):
                     indexdata = util.buffer(util.mmapread(f))
                 else:
                     indexdata = f.read()
@@ -1004,24 +448,24 @@
         fmt = v & 0xFFFF
         if fmt == REVLOGV0:
             if flags:
-                raise RevlogError(_('unknown flags (%#04x) in version %d '
-                                    'revlog %s') %
-                                  (flags >> 16, fmt, self.indexfile))
+                raise error.RevlogError(_('unknown flags (%#04x) in version %d '
+                                          'revlog %s') %
+                                        (flags >> 16, fmt, self.indexfile))
         elif fmt == REVLOGV1:
             if flags & ~REVLOGV1_FLAGS:
-                raise RevlogError(_('unknown flags (%#04x) in version %d '
-                                    'revlog %s') %
-                                  (flags >> 16, fmt, self.indexfile))
+                raise error.RevlogError(_('unknown flags (%#04x) in version %d '
+                                          'revlog %s') %
+                                        (flags >> 16, fmt, self.indexfile))
         elif fmt == REVLOGV2:
             if flags & ~REVLOGV2_FLAGS:
-                raise RevlogError(_('unknown flags (%#04x) in version %d '
-                                    'revlog %s') %
-                                  (flags >> 16, fmt, self.indexfile))
+                raise error.RevlogError(_('unknown flags (%#04x) in version %d '
+                                          'revlog %s') %
+                                        (flags >> 16, fmt, self.indexfile))
         else:
-            raise RevlogError(_('unknown version (%d) in revlog %s') %
-                              (fmt, self.indexfile))
+            raise error.RevlogError(_('unknown version (%d) in revlog %s') %
+                                    (fmt, self.indexfile))
 
-        self.storedeltachains = True
+        self._storedeltachains = True
 
         self._io = revlogio()
         if self.version == REVLOGV0:
@@ -1029,7 +473,8 @@
         try:
             d = self._io.parseindex(indexdata, self._inline)
         except (ValueError, IndexError):
-            raise RevlogError(_("index %s is corrupted") % (self.indexfile))
+            raise error.RevlogError(_("index %s is corrupted") %
+                                    self.indexfile)
         self.index, nodemap, self._chunkcache = d
         if nodemap is not None:
             self.nodemap = self._nodecache = nodemap
@@ -1071,27 +516,23 @@
                 yield fp
 
     def tip(self):
-        return self.node(len(self.index) - 2)
+        return self.node(len(self.index) - 1)
     def __contains__(self, rev):
         return 0 <= rev < len(self)
     def __len__(self):
-        return len(self.index) - 1
+        return len(self.index)
     def __iter__(self):
-        return iter(xrange(len(self)))
+        return iter(pycompat.xrange(len(self)))
     def revs(self, start=0, stop=None):
         """iterate over all rev in this revlog (from start to stop)"""
-        step = 1
-        if stop is not None:
-            if start > stop:
-                step = -1
-            stop += step
-        else:
-            stop = len(self)
-        return xrange(start, stop, step)
+        return storageutil.iterrevs(len(self), start=start, stop=stop)
 
     @util.propertycache
     def nodemap(self):
-        self.rev(self.node(0))
+        if self.index:
+            # populate mapping down to the initial node
+            node0 = self.index[0][7]  # get around changelog filtering
+            self.rev(node0)
         return self._nodecache
 
     def hasnode(self, node):
@@ -1114,7 +555,7 @@
         return True
 
     def clearcaches(self):
-        self._cache = None
+        self._revisioncache = None
         self._chainbasecache.clear()
         self._chunkcache = (0, '')
         self._pcache = {}
@@ -1130,21 +571,21 @@
             return self._nodecache[node]
         except TypeError:
             raise
-        except RevlogError:
+        except error.RevlogError:
             # parsers.c radix tree lookup failed
             if node == wdirid or node in wdirfilenodeids:
                 raise error.WdirUnsupported
-            raise LookupError(node, self.indexfile, _('no node'))
+            raise error.LookupError(node, self.indexfile, _('no node'))
         except KeyError:
             # pure python cache lookup failed
             n = self._nodecache
             i = self.index
             p = self._nodepos
             if p is None:
-                p = len(i) - 2
+                p = len(i) - 1
             else:
                 assert p < len(i)
-            for r in xrange(p, -1, -1):
+            for r in pycompat.xrange(p, -1, -1):
                 v = i[r][7]
                 n[v] = r
                 if v == node:
@@ -1152,7 +593,7 @@
                     return r
             if node == wdirid or node in wdirfilenodeids:
                 raise error.WdirUnsupported
-            raise LookupError(node, self.indexfile, _('no node'))
+            raise error.LookupError(node, self.indexfile, _('no node'))
 
     # Accessors for index entries.
 
@@ -1214,6 +655,9 @@
 
         return entry[5], entry[6]
 
+    # fast parentrevs(rev) where rev isn't filtered
+    _uncheckedparentrevs = parentrevs
+
     def node(self, rev):
         try:
             return self.index[rev][7]
@@ -1313,29 +757,21 @@
 
         See the documentation for ancestor.lazyancestors for more details."""
 
-        return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
-                                      inclusive=inclusive)
+        # first, make sure start revisions aren't filtered
+        revs = list(revs)
+        checkrev = self.node
+        for r in revs:
+            checkrev(r)
+        # and we're sure ancestors aren't filtered as well
+        if util.safehasattr(parsers, 'rustlazyancestors'):
+            return ancestor.rustlazyancestors(
+                self.index, revs,
+                stoprev=stoprev, inclusive=inclusive)
+        return ancestor.lazyancestors(self._uncheckedparentrevs, revs,
+                                      stoprev=stoprev, inclusive=inclusive)
 
     def descendants(self, revs):
-        """Generate the descendants of 'revs' in revision order.
-
-        Yield a sequence of revision numbers starting with a child of
-        some rev in revs, i.e., each revision is *not* considered a
-        descendant of itself.  Results are ordered by revision number (a
-        topological sort)."""
-        first = min(revs)
-        if first == nullrev:
-            for i in self:
-                yield i
-            return
-
-        seen = set(revs)
-        for i in self.revs(start=first + 1):
-            for x in self.parentrevs(i):
-                if x != nullrev and x in seen:
-                    seen.add(i)
-                    yield i
-                    break
+        return dagop.descendantrevs(revs, self.revs, self.parentrevs)
 
     def findcommonmissing(self, common=None, heads=None):
         """Return a tuple of the ancestors of common and the ancestors of heads
@@ -1656,25 +1092,16 @@
             return [self.node(r) for r in self.headrevs()]
 
         if start is None:
-            start = nullid
-        if stop is None:
-            stop = []
-        stoprevs = set([self.rev(n) for n in stop])
-        startrev = self.rev(start)
-        reachable = {startrev}
-        heads = {startrev}
+            start = nullrev
+        else:
+            start = self.rev(start)
 
-        parentrevs = self.parentrevs
-        for r in self.revs(start=startrev + 1):
-            for p in parentrevs(r):
-                if p in reachable:
-                    if r not in stoprevs:
-                        reachable.add(r)
-                    heads.add(r)
-                if p in heads and p not in stoprevs:
-                    heads.remove(p)
+        stoprevs = set(self.rev(n) for n in stop or [])
 
-        return [self.node(r) for r in heads]
+        revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
+                                    stoprevs=stoprevs)
+
+        return [self.node(rev) for rev in revs]
 
     def children(self, node):
         """find the children of a given node"""
@@ -1711,11 +1138,6 @@
         a, b = self.rev(a), self.rev(b)
         return self.isancestorrev(a, b)
 
-    def descendant(self, a, b):
-        msg = 'revlog.descendant is deprecated, use revlog.isancestorrev'
-        util.nouideprecwarn(msg, '4.7')
-        return self.isancestorrev(a, b)
-
     def isancestorrev(self, a, b):
         """return True if revision a is an ancestor of revision b
 
@@ -1755,7 +1177,7 @@
                 node = id
                 self.rev(node) # quick search the index
                 return node
-            except LookupError:
+            except error.LookupError:
                 pass # may be partial hex id
         try:
             # str(rev)
@@ -1775,7 +1197,7 @@
                 node = bin(id)
                 self.rev(node)
                 return node
-            except (TypeError, LookupError):
+            except (TypeError, error.LookupError):
                 pass
 
     def _partialmatch(self, id):
@@ -1786,18 +1208,18 @@
             if partial and self.hasnode(partial):
                 if maybewdir:
                     # single 'ff...' match in radix tree, ambiguous with wdir
-                    raise RevlogError
+                    raise error.RevlogError
                 return partial
             if maybewdir:
                 # no 'ff...' match in radix tree, wdir identified
                 raise error.WdirUnsupported
             return None
-        except RevlogError:
+        except error.RevlogError:
             # parsers.c radix tree lookup gave multiple matches
             # fast path: for unfiltered changelog, radix tree is accurate
             if not getattr(self, 'filteredrevs', None):
-                raise LookupError(id, self.indexfile,
-                                  _('ambiguous identifier'))
+                raise error.AmbiguousPrefixLookupError(
+                    id, self.indexfile, _('ambiguous identifier'))
             # fall through to slow path that filters hidden revisions
         except (AttributeError, ValueError):
             # we are pure python, or key was too short to search radix tree
@@ -1814,12 +1236,14 @@
                 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
                 nl = [n for n in nl if hex(n).startswith(id) and
                       self.hasnode(n)]
+                if nullhex.startswith(id):
+                    nl.append(nullid)
                 if len(nl) > 0:
                     if len(nl) == 1 and not maybewdir:
                         self._pcache[id] = nl[0]
                         return nl[0]
-                    raise LookupError(id, self.indexfile,
-                                      _('ambiguous identifier'))
+                    raise error.AmbiguousPrefixLookupError(
+                        id, self.indexfile, _('ambiguous identifier'))
                 if maybewdir:
                     raise error.WdirUnsupported
                 return None
@@ -1838,20 +1262,20 @@
         if n:
             return n
 
-        raise LookupError(id, self.indexfile, _('no match found'))
+        raise error.LookupError(id, self.indexfile, _('no match found'))
 
     def shortest(self, node, minlength=1):
         """Find the shortest unambiguous prefix that matches node."""
         def isvalid(prefix):
             try:
                 node = self._partialmatch(prefix)
-            except error.RevlogError:
+            except error.AmbiguousPrefixLookupError:
                 return False
             except error.WdirUnsupported:
                 # single 'ff...' match
                 return True
             if node is None:
-                raise LookupError(node, self.indexfile, _('no node'))
+                raise error.LookupError(node, self.indexfile, _('no node'))
             return True
 
         def maybewdir(prefix):
@@ -1870,9 +1294,9 @@
             try:
                 length = max(self.index.shortest(node), minlength)
                 return disambiguate(hexnode, length)
-            except RevlogError:
+            except error.RevlogError:
                 if node != wdirid:
-                    raise LookupError(node, self.indexfile, _('no node'))
+                    raise error.LookupError(node, self.indexfile, _('no node'))
             except AttributeError:
                 # Fall through to pure code
                 pass
@@ -1894,7 +1318,7 @@
         returns True if text is different than what is stored.
         """
         p1, p2 = self.parents(node)
-        return hash(text, p1, p2) != node
+        return storageutil.hashrevisionsha1(text, p1, p2) != node
 
     def _cachesegment(self, offset, data):
         """Add a segment to the revlog cache.
@@ -2030,7 +1454,8 @@
         if not self._withsparseread:
             slicedchunks = (revs,)
         else:
-            slicedchunks = _slicechunk(self, revs, targetsize=targetsize)
+            slicedchunks = deltautil.slicechunk(self, revs,
+                                                targetsize=targetsize)
 
         for revschunk in slicedchunks:
             firstrev = revschunk[0]
@@ -2070,6 +1495,25 @@
         else:
             return rev - 1
 
+    def issnapshot(self, rev):
+        """tells whether rev is a snapshot
+        """
+        if rev == nullrev:
+            return True
+        deltap = self.deltaparent(rev)
+        if deltap == nullrev:
+            return True
+        p1, p2 = self.parentrevs(rev)
+        if deltap in (p1, p2):
+            return False
+        return self.issnapshot(deltap)
+
+    def snapshotdepth(self, rev):
+        """number of snapshot in the chain before this one"""
+        if not self.issnapshot(rev):
+            raise error.ProgrammingError('revision %d not a snapshot')
+        return len(self._deltachain(rev)[0]) - 1
+
     def revdiff(self, rev1, rev2):
         """return or calculate a delta between two revisions
 
@@ -2103,11 +1547,11 @@
         rawtext = None
         if node == nullid:
             return ""
-        if self._cache:
-            if self._cache[0] == node:
+        if self._revisioncache:
+            if self._revisioncache[0] == node:
                 # _cache only stores rawtext
                 if raw:
-                    return self._cache[2]
+                    return self._revisioncache[2]
                 # duplicated, but good for perf
                 if rev is None:
                     rev = self.rev(node)
@@ -2115,11 +1559,11 @@
                     flags = self.flags(rev)
                 # no extra flags set, no flag processor runs, text = rawtext
                 if flags == REVIDX_DEFAULT_FLAGS:
-                    return self._cache[2]
+                    return self._revisioncache[2]
                 # rawtext is reusable. need to run flag processor
-                rawtext = self._cache[2]
+                rawtext = self._revisioncache[2]
 
-            cachedrev = self._cache[1]
+            cachedrev = self._revisioncache[1]
 
         # look up what we need to read
         if rawtext is None:
@@ -2128,10 +1572,10 @@
 
             chain, stopped = self._deltachain(rev, stoprev=cachedrev)
             if stopped:
-                rawtext = self._cache[2]
+                rawtext = self._revisioncache[2]
 
             # drop cache to save memory
-            self._cache = None
+            self._revisioncache = None
 
             targetsize = None
             rawsize = self.index[rev][2]
@@ -2144,7 +1588,7 @@
                 bins = bins[1:]
 
             rawtext = mdiff.patches(rawtext, bins)
-            self._cache = (node, rev, rawtext)
+            self._revisioncache = (node, rev, rawtext)
 
         if flags is None:
             if rev is None:
@@ -2163,7 +1607,7 @@
         Available as a function so that subclasses can replace the hash
         as needed.
         """
-        return hash(text, p1, p2)
+        return storageutil.hashrevisionsha1(text, p1, p2)
 
     def _processflags(self, text, flags, operation, raw=False):
         """Inspect revision data flags and applies transforms defined by
@@ -2191,11 +1635,12 @@
         if flags == 0:
             return text, True
         if not operation in ('read', 'write'):
-            raise ProgrammingError(_("invalid '%s' operation ") % (operation))
+            raise error.ProgrammingError(_("invalid '%s' operation") %
+                                         operation)
         # Check all flags are known.
         if flags & ~REVIDX_KNOWN_FLAGS:
-            raise RevlogError(_("incompatible revision flag '%#x'") %
-                              (flags & ~REVIDX_KNOWN_FLAGS))
+            raise error.RevlogError(_("incompatible revision flag '%#x'") %
+                                    (flags & ~REVIDX_KNOWN_FLAGS))
         validatehash = True
         # Depending on the operation (read or write), the order might be
         # reversed due to non-commutative transforms.
@@ -2209,11 +1654,11 @@
             if flag & flags:
                 vhash = True
 
-                if flag not in _flagprocessors:
+                if flag not in self._flagprocessors:
                     message = _("missing processor for flag '%#x'") % (flag)
-                    raise RevlogError(message)
+                    raise error.RevlogError(message)
 
-                processor = _flagprocessors[flag]
+                processor = self._flagprocessors[flag]
                 if processor is not None:
                     readtransform, writetransform, rawtransform = processor
 
@@ -2237,13 +1682,22 @@
             if p1 is None and p2 is None:
                 p1, p2 = self.parents(node)
             if node != self.hash(text, p1, p2):
+                # Clear the revision cache on hash failure. The revision cache
+                # only stores the raw revision and clearing the cache does have
+                # the side-effect that we won't have a cache hit when the raw
+                # revision data is accessed. But this case should be rare and
+                # it is extra work to teach the cache about the hash
+                # verification state.
+                if self._revisioncache and self._revisioncache[0] == node:
+                    self._revisioncache = None
+
                 revornode = rev
                 if revornode is None:
                     revornode = templatefilters.short(hex(node))
-                raise RevlogError(_("integrity check failed on %s:%s")
+                raise error.RevlogError(_("integrity check failed on %s:%s")
                     % (self.indexfile, pycompat.bytestr(revornode)))
-        except RevlogError:
-            if self._censorable and _censoredtext(text):
+        except error.RevlogError:
+            if self._censorable and storageutil.iscensoredtext(text):
                 raise error.CensoredNodeError(self.indexfile, node, text)
             raise
 
@@ -2254,13 +1708,15 @@
         revlog has grown too large to be an inline revlog, it will convert it
         to use multiple index and data files.
         """
-        if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
+        tiprev = len(self) - 1
+        if (not self._inline or
+            (self.start(tiprev) + self.length(tiprev)) < _maxinline):
             return
 
         trinfo = tr.find(self.indexfile)
         if trinfo is None:
-            raise RevlogError(_("%s not found in the transaction")
-                              % self.indexfile)
+            raise error.RevlogError(_("%s not found in the transaction")
+                                    % self.indexfile)
 
         trindex = trinfo[2]
         if trindex is not None:
@@ -2268,7 +1724,7 @@
         else:
             # revlog was stripped at start of transaction, use all leftover data
             trindex = len(self) - 1
-            dataoff = self.end(-2)
+            dataoff = self.end(tiprev)
 
         tr.add(self.datafile, dataoff)
 
@@ -2294,6 +1750,10 @@
         tr.replace(self.indexfile, trindex * self._io.size)
         self._chunkclear()
 
+    def _nodeduplicatecallback(self, transaction, node):
+        """called when trying to add a node already stored.
+        """
+
     def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
                     node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
         """add a revision to the log
@@ -2307,12 +1767,12 @@
             computed by default as hash(text, p1, p2), however subclasses might
             use different hashing method (and override checkhash() in such case)
         flags - the known flags to set on the revision
-        deltacomputer - an optional _deltacomputer instance shared between
+        deltacomputer - an optional deltacomputer instance shared between
             multiple calls
         """
         if link == nullrev:
-            raise RevlogError(_("attempted to add linkrev -1 to %s")
-                              % self.indexfile)
+            raise error.RevlogError(_("attempted to add linkrev -1 to %s")
+                                    % self.indexfile)
 
         if flags:
             node = node or self.hash(text, p1, p2)
@@ -2325,7 +1785,7 @@
             cachedelta = None
 
         if len(rawtext) > _maxentrysize:
-            raise RevlogError(
+            raise error.RevlogError(
                 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
                 % (self.indexfile, len(rawtext)))
 
@@ -2410,8 +1870,8 @@
             try:
                 return _zlibdecompress(data)
             except zlib.error as e:
-                raise RevlogError(_('revlog decompress error: %s') %
-                                  stringutil.forcebytestr(e))
+                raise error.RevlogError(_('revlog decompress error: %s') %
+                                        stringutil.forcebytestr(e))
         # '\0' is more common than 'u' so it goes first.
         elif t == '\0':
             return data
@@ -2426,58 +1886,10 @@
                 compressor = engine.revlogcompressor()
                 self._decompressors[t] = compressor
             except KeyError:
-                raise RevlogError(_('unknown compression type %r') % t)
+                raise error.RevlogError(_('unknown compression type %r') % t)
 
         return compressor.decompress(data)
 
-    def _isgooddeltainfo(self, deltainfo, revinfo):
-        """Returns True if the given delta is good. Good means that it is within
-        the disk span, disk size, and chain length bounds that we know to be
-        performant."""
-        if deltainfo is None:
-            return False
-
-        # - 'deltainfo.distance' is the distance from the base revision --
-        #   bounding it limits the amount of I/O we need to do.
-        # - 'deltainfo.compresseddeltalen' is the sum of the total size of
-        #   deltas we need to apply -- bounding it limits the amount of CPU
-        #   we consume.
-
-        if self._sparserevlog:
-            # As sparse-read will be used, we can consider that the distance,
-            # instead of being the span of the whole chunk,
-            # is the span of the largest read chunk
-            base = deltainfo.base
-
-            if base != nullrev:
-                deltachain = self._deltachain(base)[0]
-            else:
-                deltachain = []
-
-            chunks = _slicechunk(self, deltachain, deltainfo)
-            all_span = [_segmentspan(self, revs, deltainfo) for revs in chunks]
-            distance = max(all_span)
-        else:
-            distance = deltainfo.distance
-
-        textlen = revinfo.textlen
-        defaultmax = textlen * 4
-        maxdist = self._maxdeltachainspan
-        if not maxdist:
-            maxdist = distance # ensure the conditional pass
-        maxdist = max(maxdist, defaultmax)
-        if self._sparserevlog and maxdist < self._srmingapsize:
-            # In multiple place, we are ignoring irrelevant data range below a
-            # certain size. Be also apply this tradeoff here and relax span
-            # constraint for small enought content.
-            maxdist = self._srmingapsize
-        if (distance > maxdist or deltainfo.deltalen > textlen or
-            deltainfo.compresseddeltalen > textlen * 2 or
-            (self._maxchainlen and deltainfo.chainlen > self._maxchainlen)):
-            return False
-
-        return True
-
     def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
                      cachedelta, ifh, dfh, alwayscache=False,
                      deltacomputer=None):
@@ -2495,11 +1907,11 @@
           if both are set, they must correspond to each other.
         """
         if node == nullid:
-            raise RevlogError(_("%s: attempt to add null revision") %
-                              (self.indexfile))
+            raise error.RevlogError(_("%s: attempt to add null revision") %
+                                    self.indexfile)
         if node == wdirid or node in wdirfilenodeids:
-            raise RevlogError(_("%s: attempt to add wdir revision") %
-                              (self.indexfile))
+            raise error.RevlogError(_("%s: attempt to add wdir revision") %
+                                    self.indexfile)
 
         if self._inline:
             fh = ifh
@@ -2525,43 +1937,34 @@
             textlen = len(rawtext)
 
         if deltacomputer is None:
-            deltacomputer = _deltacomputer(self)
+            deltacomputer = deltautil.deltacomputer(self)
 
         revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
 
-        # no delta for flag processor revision (see "candelta" for why)
-        # not calling candelta since only one revision needs test, also to
-        # avoid overhead fetching flags again.
-        if flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
-            deltainfo = None
-        else:
-            deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
+        deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
 
-        if deltainfo is not None:
-            base = deltainfo.base
-            chainbase = deltainfo.chainbase
-            data = deltainfo.data
-            l = deltainfo.deltalen
-        else:
-            rawtext = deltacomputer.buildtext(revinfo, fh)
-            data = self.compress(rawtext)
-            l = len(data[1]) + len(data[0])
-            base = chainbase = curr
-
-        e = (offset_type(offset, flags), l, textlen,
-             base, link, p1r, p2r, node)
-        self.index.insert(-1, e)
+        e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
+             deltainfo.base, link, p1r, p2r, node)
+        self.index.append(e)
         self.nodemap[node] = curr
 
+        # Reset the pure node cache start lookup offset to account for new
+        # revision.
+        if self._nodepos is not None:
+            self._nodepos = curr
+
         entry = self._io.packentry(e, self.node, self.version, curr)
-        self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
+        self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
+                         link, offset)
+
+        rawtext = btext[0]
 
         if alwayscache and rawtext is None:
-            rawtext = deltacomputer._buildtext(revinfo, fh)
+            rawtext = deltacomputer.buildtext(revinfo, fh)
 
         if type(rawtext) == bytes: # only accept immutable objects
-            self._cache = (node, curr, rawtext)
-        self._chainbasecache[curr] = chainbase
+            self._revisioncache = (node, curr, rawtext)
+        self._chainbasecache[curr] = deltainfo.chainbase
         return node
 
     def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
@@ -2627,7 +2030,7 @@
                 dfh.flush()
             ifh.flush()
         try:
-            deltacomputer = _deltacomputer(self)
+            deltacomputer = deltautil.deltacomputer(self)
             # loop through our set of deltas
             for data in deltas:
                 node, p1, p2, linknode, deltabase, delta, flags = data
@@ -2637,17 +2040,18 @@
                 nodes.append(node)
 
                 if node in self.nodemap:
+                    self._nodeduplicatecallback(transaction, node)
                     # this can happen if two branches make the same change
                     continue
 
                 for p in (p1, p2):
                     if p not in self.nodemap:
-                        raise LookupError(p, self.indexfile,
-                                          _('unknown parent'))
+                        raise error.LookupError(p, self.indexfile,
+                                                _('unknown parent'))
 
                 if deltabase not in self.nodemap:
-                    raise LookupError(deltabase, self.indexfile,
-                                      _('unknown delta base'))
+                    raise error.LookupError(deltabase, self.indexfile,
+                                            _('unknown delta base'))
 
                 baserev = self.rev(deltabase)
 
@@ -2705,23 +2109,7 @@
         if not self._censorable:
             return False
 
-        # Fragile heuristic: unless new file meta keys are added alphabetically
-        # preceding "censored", all censored revisions are prefixed by
-        # "\1\ncensored:". A delta producing such a censored revision must be a
-        # full-replacement delta, so we inspect the first and only patch in the
-        # delta for this prefix.
-        hlen = struct.calcsize(">lll")
-        if len(delta) <= hlen:
-            return False
-
-        oldlen = self.rawsize(baserev)
-        newlen = len(delta) - hlen
-        if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
-            return False
-
-        add = "\1\ncensored:"
-        addlen = len(add)
-        return newlen >= addlen and delta[hlen:hlen + addlen] == add
+        return storageutil.deltaiscensored(delta, baserev, self.rawsize)
 
     def getstrippoint(self, minlink):
         """find the minimum rev that must be stripped to strip the linkrev
@@ -2729,39 +2117,9 @@
         Returns a tuple containing the minimum rev and a set of all revs that
         have linkrevs that will be broken by this strip.
         """
-        brokenrevs = set()
-        strippoint = len(self)
-
-        heads = {}
-        futurelargelinkrevs = set()
-        for head in self.headrevs():
-            headlinkrev = self.linkrev(head)
-            heads[head] = headlinkrev
-            if headlinkrev >= minlink:
-                futurelargelinkrevs.add(headlinkrev)
-
-        # This algorithm involves walking down the rev graph, starting at the
-        # heads. Since the revs are topologically sorted according to linkrev,
-        # once all head linkrevs are below the minlink, we know there are
-        # no more revs that could have a linkrev greater than minlink.
-        # So we can stop walking.
-        while futurelargelinkrevs:
-            strippoint -= 1
-            linkrev = heads.pop(strippoint)
-
-            if linkrev < minlink:
-                brokenrevs.add(strippoint)
-            else:
-                futurelargelinkrevs.remove(linkrev)
-
-            for p in self.parentrevs(strippoint):
-                if p != nullrev:
-                    plinkrev = self.linkrev(p)
-                    heads[p] = plinkrev
-                    if plinkrev >= minlink:
-                        futurelargelinkrevs.add(plinkrev)
-
-        return strippoint, brokenrevs
+        return storageutil.resolvestripinfo(minlink, len(self) - 1,
+                                            self.headrevs(),
+                                            self.linkrev, self.parentrevs)
 
     def strip(self, minlink, transaction):
         """truncate the revlog on the first revision with a linkrev >= minlink
@@ -2795,10 +2153,10 @@
         transaction.add(self.indexfile, end)
 
         # then reset internal state in memory to forget those revisions
-        self._cache = None
+        self._revisioncache = None
         self._chaininfocache = {}
         self._chunkclear()
-        for x in xrange(rev, len(self)):
+        for x in pycompat.xrange(rev, len(self)):
             del self.nodemap[self.node(x)]
 
         del self.index[rev:-1]
@@ -2846,6 +2204,27 @@
             res.append(self.datafile)
         return res
 
+    def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
+                      assumehaveparentrevisions=False, deltaprevious=False):
+        if nodesorder not in ('nodes', 'storage', None):
+            raise error.ProgrammingError('unhandled value for nodesorder: %s' %
+                                         nodesorder)
+
+        if nodesorder is None and not self._generaldelta:
+            nodesorder = 'storage'
+
+        return storageutil.emitrevisions(
+            self, nodes, nodesorder, revlogrevisiondelta,
+            deltaparentfn=self.deltaparent,
+            candeltafn=self.candelta,
+            rawsizefn=self.rawsize,
+            revdifffn=self.revdiff,
+            flagsfn=self.flags,
+            sendfulltext=not self._storedeltachains,
+            revisiondata=revisiondata,
+            assumehaveparentrevisions=assumehaveparentrevisions,
+            deltaprevious=deltaprevious)
+
     DELTAREUSEALWAYS = 'always'
     DELTAREUSESAMEREVS = 'samerevs'
     DELTAREUSENEVER = 'never'
@@ -2919,7 +2298,7 @@
             populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
                                                 self.DELTAREUSESAMEREVS)
 
-            deltacomputer = _deltacomputer(destrevlog)
+            deltacomputer = deltautil.deltacomputer(destrevlog)
             index = self.index
             for rev in self:
                 entry = index[rev]
@@ -2970,3 +2349,196 @@
         finally:
             destrevlog._lazydeltabase = oldlazydeltabase
             destrevlog._deltabothparents = oldamd
+
+    def censorrevision(self, tr, censornode, tombstone=b''):
+        if (self.version & 0xFFFF) == REVLOGV0:
+            raise error.RevlogError(_('cannot censor with version %d revlogs') %
+                                    self.version)
+
+        censorrev = self.rev(censornode)
+        tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
+
+        if len(tombstone) > self.rawsize(censorrev):
+            raise error.Abort(_('censor tombstone must be no longer than '
+                                'censored data'))
+
+        # Rewriting the revlog in place is hard. Our strategy for censoring is
+        # to create a new revlog, copy all revisions to it, then replace the
+        # revlogs on transaction close.
+
+        newindexfile = self.indexfile + b'.tmpcensored'
+        newdatafile = self.datafile + b'.tmpcensored'
+
+        # This is a bit dangerous. We could easily have a mismatch of state.
+        newrl = revlog(self.opener, newindexfile, newdatafile,
+                       censorable=True)
+        newrl.version = self.version
+        newrl._generaldelta = self._generaldelta
+        newrl._io = self._io
+
+        for rev in self.revs():
+            node = self.node(rev)
+            p1, p2 = self.parents(node)
+
+            if rev == censorrev:
+                newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
+                                     p1, p2, censornode, REVIDX_ISCENSORED)
+
+                if newrl.deltaparent(rev) != nullrev:
+                    raise error.Abort(_('censored revision stored as delta; '
+                                        'cannot censor'),
+                                      hint=_('censoring of revlogs is not '
+                                             'fully implemented; please report '
+                                             'this bug'))
+                continue
+
+            if self.iscensored(rev):
+                if self.deltaparent(rev) != nullrev:
+                    raise error.Abort(_('cannot censor due to censored '
+                                        'revision having delta stored'))
+                rawtext = self._chunk(rev)
+            else:
+                rawtext = self.revision(rev, raw=True)
+
+            newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
+                                 self.flags(rev))
+
+        tr.addbackup(self.indexfile, location='store')
+        if not self._inline:
+            tr.addbackup(self.datafile, location='store')
+
+        self.opener.rename(newrl.indexfile, self.indexfile)
+        if not self._inline:
+            self.opener.rename(newrl.datafile, self.datafile)
+
+        self.clearcaches()
+        self._loadindex(self.version, None)
+
+    def verifyintegrity(self, state):
+        """Verifies the integrity of the revlog.
+
+        Yields ``revlogproblem`` instances describing problems that are
+        found.
+        """
+        dd, di = self.checksize()
+        if dd:
+            yield revlogproblem(error=_('data length off by %d bytes') % dd)
+        if di:
+            yield revlogproblem(error=_('index contains %d extra bytes') % di)
+
+        version = self.version & 0xFFFF
+
+        # The verifier tells us what version revlog we should be.
+        if version != state['expectedversion']:
+            yield revlogproblem(
+                warning=_("warning: '%s' uses revlog format %d; expected %d") %
+                        (self.indexfile, version, state['expectedversion']))
+
+        state['skipread'] = set()
+
+        for rev in self:
+            node = self.node(rev)
+
+            # Verify contents. 4 cases to care about:
+            #
+            #   common: the most common case
+            #   rename: with a rename
+            #   meta: file content starts with b'\1\n', the metadata
+            #         header defined in filelog.py, but without a rename
+            #   ext: content stored externally
+            #
+            # More formally, their differences are shown below:
+            #
+            #                       | common | rename | meta  | ext
+            #  -------------------------------------------------------
+            #   flags()             | 0      | 0      | 0     | not 0
+            #   renamed()           | False  | True   | False | ?
+            #   rawtext[0:2]=='\1\n'| False  | True   | True  | ?
+            #
+            # "rawtext" means the raw text stored in revlog data, which
+            # could be retrieved by "revision(rev, raw=True)". "text"
+            # mentioned below is "revision(rev, raw=False)".
+            #
+            # There are 3 different lengths stored physically:
+            #  1. L1: rawsize, stored in revlog index
+            #  2. L2: len(rawtext), stored in revlog data
+            #  3. L3: len(text), stored in revlog data if flags==0, or
+            #     possibly somewhere else if flags!=0
+            #
+            # L1 should be equal to L2. L3 could be different from them.
+            # "text" may or may not affect commit hash depending on flag
+            # processors (see revlog.addflagprocessor).
+            #
+            #              | common  | rename | meta  | ext
+            # -------------------------------------------------
+            #    rawsize() | L1      | L1     | L1    | L1
+            #       size() | L1      | L2-LM  | L1(*) | L1 (?)
+            # len(rawtext) | L2      | L2     | L2    | L2
+            #    len(text) | L2      | L2     | L2    | L3
+            #  len(read()) | L2      | L2-LM  | L2-LM | L3 (?)
+            #
+            # LM:  length of metadata, depending on rawtext
+            # (*): not ideal, see comment in filelog.size
+            # (?): could be "- len(meta)" if the resolved content has
+            #      rename metadata
+            #
+            # Checks needed to be done:
+            #  1. length check: L1 == L2, in all cases.
+            #  2. hash check: depending on flag processor, we may need to
+            #     use either "text" (external), or "rawtext" (in revlog).
+
+            try:
+                skipflags = state.get('skipflags', 0)
+                if skipflags:
+                    skipflags &= self.flags(rev)
+
+                if skipflags:
+                    state['skipread'].add(node)
+                else:
+                    # Side-effect: read content and verify hash.
+                    self.revision(node)
+
+                l1 = self.rawsize(rev)
+                l2 = len(self.revision(node, raw=True))
+
+                if l1 != l2:
+                    yield revlogproblem(
+                        error=_('unpacked size is %d, %d expected') % (l2, l1),
+                        node=node)
+
+            except error.CensoredNodeError:
+                if state['erroroncensored']:
+                    yield revlogproblem(error=_('censored file data'),
+                                        node=node)
+                    state['skipread'].add(node)
+            except Exception as e:
+                yield revlogproblem(
+                    error=_('unpacking %s: %s') % (short(node),
+                                                   stringutil.forcebytestr(e)),
+                    node=node)
+                state['skipread'].add(node)
+
+    def storageinfo(self, exclusivefiles=False, sharedfiles=False,
+                    revisionscount=False, trackedsize=False,
+                    storedsize=False):
+        d = {}
+
+        if exclusivefiles:
+            d['exclusivefiles'] = [(self.opener, self.indexfile)]
+            if not self._inline:
+                d['exclusivefiles'].append((self.opener, self.datafile))
+
+        if sharedfiles:
+            d['sharedfiles'] = []
+
+        if revisionscount:
+            d['revisionscount'] = len(self)
+
+        if trackedsize:
+            d['trackedsize'] = sum(map(self.rawsize, iter(self)))
+
+        if storedsize:
+            d['storedsize'] = sum(self.opener.stat(path).st_size
+                                  for path in self.files())
+
+        return d
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/constants.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,54 @@
+# revlogdeltas.py - constant used for revlog logic
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2018 Octobus <contact@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""Helper class to compute deltas stored inside revlogs"""
+
+from __future__ import absolute_import
+
+from .. import (
+    repository,
+    util,
+)
+
+# revlog header flags
+REVLOGV0 = 0
+REVLOGV1 = 1
+# Dummy value until file format is finalized.
+# Reminder: change the bounds check in revlog.__init__ when this is changed.
+REVLOGV2 = 0xDEAD
+FLAG_INLINE_DATA = (1 << 16)
+FLAG_GENERALDELTA = (1 << 17)
+REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
+REVLOG_DEFAULT_FORMAT = REVLOGV1
+REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
+REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
+REVLOGV2_FLAGS = REVLOGV1_FLAGS
+
+# revlog index flags
+
+# For historical reasons, revlog's internal flags were exposed via the
+# wire protocol and are even exposed in parts of the storage APIs.
+
+# revision has censor metadata, must be verified
+REVIDX_ISCENSORED = repository.REVISION_FLAG_CENSORED
+# revision hash does not match data (narrowhg)
+REVIDX_ELLIPSIS = repository.REVISION_FLAG_ELLIPSIS
+# revision data is stored externally
+REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
+REVIDX_DEFAULT_FLAGS = 0
+# stable order in which flags need to be processed and their processors applied
+REVIDX_FLAGS_ORDER = [
+    REVIDX_ISCENSORED,
+    REVIDX_ELLIPSIS,
+    REVIDX_EXTSTORED,
+]
+REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
+# bitmark for flags that could cause rawdata content change
+REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
+
+SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/deltas.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,939 @@
+# revlogdeltas.py - Logic around delta computation for revlog
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2018 Octobus <contact@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""Helper class to compute deltas stored inside revlogs"""
+
+from __future__ import absolute_import
+
+import collections
+import heapq
+import struct
+
+# import stuff from node for others to import from revlog
+from ..node import (
+    nullrev,
+)
+from ..i18n import _
+
+from .constants import (
+    REVIDX_ISCENSORED,
+    REVIDX_RAWTEXT_CHANGING_FLAGS,
+)
+
+from ..thirdparty import (
+    attr,
+)
+
+from .. import (
+    error,
+    mdiff,
+)
+
+# maximum <delta-chain-data>/<revision-text-length> ratio
+LIMIT_DELTA2TEXT = 2
+
+class _testrevlog(object):
+    """minimalist fake revlog to use in doctests"""
+
+    def __init__(self, data, density=0.5, mingap=0):
+        """data is an list of revision payload boundaries"""
+        self._data = data
+        self._srdensitythreshold = density
+        self._srmingapsize = mingap
+
+    def start(self, rev):
+        if rev == 0:
+            return 0
+        return self._data[rev - 1]
+
+    def end(self, rev):
+        return self._data[rev]
+
+    def length(self, rev):
+        return self.end(rev) - self.start(rev)
+
+    def __len__(self):
+        return len(self._data)
+
+def slicechunk(revlog, revs, deltainfo=None, targetsize=None):
+    """slice revs to reduce the amount of unrelated data to be read from disk.
+
+    ``revs`` is sliced into groups that should be read in one time.
+    Assume that revs are sorted.
+
+    The initial chunk is sliced until the overall density (payload/chunks-span
+    ratio) is above `revlog._srdensitythreshold`. No gap smaller than
+    `revlog._srmingapsize` is skipped.
+
+    If `targetsize` is set, no chunk larger than `targetsize` will be yield.
+    For consistency with other slicing choice, this limit won't go lower than
+    `revlog._srmingapsize`.
+
+    If individual revisions chunk are larger than this limit, they will still
+    be raised individually.
+
+    >>> revlog = _testrevlog([
+    ...  5,  #00 (5)
+    ...  10, #01 (5)
+    ...  12, #02 (2)
+    ...  12, #03 (empty)
+    ...  27, #04 (15)
+    ...  31, #05 (4)
+    ...  31, #06 (empty)
+    ...  42, #07 (11)
+    ...  47, #08 (5)
+    ...  47, #09 (empty)
+    ...  48, #10 (1)
+    ...  51, #11 (3)
+    ...  74, #12 (23)
+    ...  85, #13 (11)
+    ...  86, #14 (1)
+    ...  91, #15 (5)
+    ... ])
+
+    >>> list(slicechunk(revlog, list(range(16))))
+    [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
+    >>> list(slicechunk(revlog, [0, 15]))
+    [[0], [15]]
+    >>> list(slicechunk(revlog, [0, 11, 15]))
+    [[0], [11], [15]]
+    >>> list(slicechunk(revlog, [0, 11, 13, 15]))
+    [[0], [11, 13, 15]]
+    >>> list(slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
+    [[1, 2], [5, 8, 10, 11], [14]]
+
+    Slicing with a maximum chunk size
+    >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
+    [[0], [11], [13], [15]]
+    >>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
+    [[0], [11], [13, 15]]
+    """
+    if targetsize is not None:
+        targetsize = max(targetsize, revlog._srmingapsize)
+    # targetsize should not be specified when evaluating delta candidates:
+    # * targetsize is used to ensure we stay within specification when reading,
+    # * deltainfo is used to pick are good delta chain when writing.
+    if not (deltainfo is None or targetsize is None):
+        msg = 'cannot use `targetsize` with a `deltainfo`'
+        raise error.ProgrammingError(msg)
+    for chunk in _slicechunktodensity(revlog, revs,
+                                      deltainfo,
+                                      revlog._srdensitythreshold,
+                                      revlog._srmingapsize):
+        for subchunk in _slicechunktosize(revlog, chunk, targetsize):
+            yield subchunk
+
+def _slicechunktosize(revlog, revs, targetsize=None):
+    """slice revs to match the target size
+
+    This is intended to be used on chunk that density slicing selected by that
+    are still too large compared to the read garantee of revlog. This might
+    happens when "minimal gap size" interrupted the slicing or when chain are
+    built in a way that create large blocks next to each other.
+
+    >>> revlog = _testrevlog([
+    ...  3,  #0 (3)
+    ...  5,  #1 (2)
+    ...  6,  #2 (1)
+    ...  8,  #3 (2)
+    ...  8,  #4 (empty)
+    ...  11, #5 (3)
+    ...  12, #6 (1)
+    ...  13, #7 (1)
+    ...  14, #8 (1)
+    ... ])
+
+    Cases where chunk is already small enough
+    >>> list(_slicechunktosize(revlog, [0], 3))
+    [[0]]
+    >>> list(_slicechunktosize(revlog, [6, 7], 3))
+    [[6, 7]]
+    >>> list(_slicechunktosize(revlog, [0], None))
+    [[0]]
+    >>> list(_slicechunktosize(revlog, [6, 7], None))
+    [[6, 7]]
+
+    cases where we need actual slicing
+    >>> list(_slicechunktosize(revlog, [0, 1], 3))
+    [[0], [1]]
+    >>> list(_slicechunktosize(revlog, [1, 3], 3))
+    [[1], [3]]
+    >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
+    [[1, 2], [3]]
+    >>> list(_slicechunktosize(revlog, [3, 5], 3))
+    [[3], [5]]
+    >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
+    [[3], [5]]
+    >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
+    [[5], [6, 7, 8]]
+    >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
+    [[0], [1, 2], [3], [5], [6, 7, 8]]
+
+    Case with too large individual chunk (must return valid chunk)
+    >>> list(_slicechunktosize(revlog, [0, 1], 2))
+    [[0], [1]]
+    >>> list(_slicechunktosize(revlog, [1, 3], 1))
+    [[1], [3]]
+    >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
+    [[3], [5]]
+    """
+    assert targetsize is None or 0 <= targetsize
+    if targetsize is None or segmentspan(revlog, revs) <= targetsize:
+        yield revs
+        return
+
+    startrevidx = 0
+    startdata = revlog.start(revs[0])
+    endrevidx = 0
+    iterrevs = enumerate(revs)
+    next(iterrevs) # skip first rev.
+    for idx, r in iterrevs:
+        span = revlog.end(r) - startdata
+        if span <= targetsize:
+            endrevidx = idx
+        else:
+            chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
+            if chunk:
+                yield chunk
+            startrevidx = idx
+            startdata = revlog.start(r)
+            endrevidx = idx
+    yield _trimchunk(revlog, revs, startrevidx)
+
+def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
+                         mingapsize=0):
+    """slice revs to reduce the amount of unrelated data to be read from disk.
+
+    ``revs`` is sliced into groups that should be read in one time.
+    Assume that revs are sorted.
+
+    ``deltainfo`` is a _deltainfo instance of a revision that we would append
+    to the top of the revlog.
+
+    The initial chunk is sliced until the overall density (payload/chunks-span
+    ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
+    skipped.
+
+    >>> revlog = _testrevlog([
+    ...  5,  #00 (5)
+    ...  10, #01 (5)
+    ...  12, #02 (2)
+    ...  12, #03 (empty)
+    ...  27, #04 (15)
+    ...  31, #05 (4)
+    ...  31, #06 (empty)
+    ...  42, #07 (11)
+    ...  47, #08 (5)
+    ...  47, #09 (empty)
+    ...  48, #10 (1)
+    ...  51, #11 (3)
+    ...  74, #12 (23)
+    ...  85, #13 (11)
+    ...  86, #14 (1)
+    ...  91, #15 (5)
+    ... ])
+
+    >>> list(_slicechunktodensity(revlog, list(range(16))))
+    [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
+    >>> list(_slicechunktodensity(revlog, [0, 15]))
+    [[0], [15]]
+    >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
+    [[0], [11], [15]]
+    >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
+    [[0], [11, 13, 15]]
+    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
+    [[1, 2], [5, 8, 10, 11], [14]]
+    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
+    ...                           mingapsize=20))
+    [[1, 2, 3, 5, 8, 10, 11], [14]]
+    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
+    ...                           targetdensity=0.95))
+    [[1, 2], [5], [8, 10, 11], [14]]
+    >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
+    ...                           targetdensity=0.95, mingapsize=12))
+    [[1, 2], [5, 8, 10, 11], [14]]
+    """
+    start = revlog.start
+    length = revlog.length
+
+    if len(revs) <= 1:
+        yield revs
+        return
+
+    nextrev = len(revlog)
+    nextoffset = revlog.end(nextrev - 1)
+
+    if deltainfo is None:
+        deltachainspan = segmentspan(revlog, revs)
+        chainpayload = sum(length(r) for r in revs)
+    else:
+        deltachainspan = deltainfo.distance
+        chainpayload = deltainfo.compresseddeltalen
+
+    if deltachainspan < mingapsize:
+        yield revs
+        return
+
+    readdata = deltachainspan
+
+    if deltachainspan:
+        density = chainpayload / float(deltachainspan)
+    else:
+        density = 1.0
+
+    if density >= targetdensity:
+        yield revs
+        return
+
+    if deltainfo is not None and deltainfo.deltalen:
+        revs = list(revs)
+        revs.append(nextrev)
+
+    # Store the gaps in a heap to have them sorted by decreasing size
+    gapsheap = []
+    heapq.heapify(gapsheap)
+    prevend = None
+    for i, rev in enumerate(revs):
+        if rev < nextrev:
+            revstart = start(rev)
+            revlen = length(rev)
+        else:
+            revstart = nextoffset
+            revlen = deltainfo.deltalen
+
+        # Skip empty revisions to form larger holes
+        if revlen == 0:
+            continue
+
+        if prevend is not None:
+            gapsize = revstart - prevend
+            # only consider holes that are large enough
+            if gapsize > mingapsize:
+                heapq.heappush(gapsheap, (-gapsize, i))
+
+        prevend = revstart + revlen
+
+    # Collect the indices of the largest holes until the density is acceptable
+    indicesheap = []
+    heapq.heapify(indicesheap)
+    while gapsheap and density < targetdensity:
+        oppgapsize, gapidx = heapq.heappop(gapsheap)
+
+        heapq.heappush(indicesheap, gapidx)
+
+        # the gap sizes are stored as negatives to be sorted decreasingly
+        # by the heap
+        readdata -= (-oppgapsize)
+        if readdata > 0:
+            density = chainpayload / float(readdata)
+        else:
+            density = 1.0
+
+    # Cut the revs at collected indices
+    previdx = 0
+    while indicesheap:
+        idx = heapq.heappop(indicesheap)
+
+        chunk = _trimchunk(revlog, revs, previdx, idx)
+        if chunk:
+            yield chunk
+
+        previdx = idx
+
+    chunk = _trimchunk(revlog, revs, previdx)
+    if chunk:
+        yield chunk
+
+def _trimchunk(revlog, revs, startidx, endidx=None):
+    """returns revs[startidx:endidx] without empty trailing revs
+
+    Doctest Setup
+    >>> revlog = _testrevlog([
+    ...  5,  #0
+    ...  10, #1
+    ...  12, #2
+    ...  12, #3 (empty)
+    ...  17, #4
+    ...  21, #5
+    ...  21, #6 (empty)
+    ... ])
+
+    Contiguous cases:
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
+    [0, 1, 2, 3, 4, 5]
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
+    [0, 1, 2, 3, 4]
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
+    [0, 1, 2]
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
+    [2]
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
+    [3, 4, 5]
+    >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
+    [3, 4]
+
+    Discontiguous cases:
+    >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
+    [1, 3, 5]
+    >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
+    [1]
+    >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
+    [3, 5]
+    >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
+    [3, 5]
+    """
+    length = revlog.length
+
+    if endidx is None:
+        endidx = len(revs)
+
+    # If we have a non-emtpy delta candidate, there are nothing to trim
+    if revs[endidx - 1] < len(revlog):
+        # Trim empty revs at the end, except the very first revision of a chain
+        while (endidx > 1
+                and endidx > startidx
+                and length(revs[endidx - 1]) == 0):
+            endidx -= 1
+
+    return revs[startidx:endidx]
+
+def segmentspan(revlog, revs, deltainfo=None):
+    """Get the byte span of a segment of revisions
+
+    revs is a sorted array of revision numbers
+
+    >>> revlog = _testrevlog([
+    ...  5,  #0
+    ...  10, #1
+    ...  12, #2
+    ...  12, #3 (empty)
+    ...  17, #4
+    ... ])
+
+    >>> segmentspan(revlog, [0, 1, 2, 3, 4])
+    17
+    >>> segmentspan(revlog, [0, 4])
+    17
+    >>> segmentspan(revlog, [3, 4])
+    5
+    >>> segmentspan(revlog, [1, 2, 3,])
+    7
+    >>> segmentspan(revlog, [1, 3])
+    7
+    """
+    if not revs:
+        return 0
+    if deltainfo is not None and len(revlog) <= revs[-1]:
+        if len(revs) == 1:
+            return deltainfo.deltalen
+        offset = revlog.end(len(revlog) - 1)
+        end = deltainfo.deltalen + offset
+    else:
+        end = revlog.end(revs[-1])
+    return end - revlog.start(revs[0])
+
+def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
+    """build full text from a (base, delta) pair and other metadata"""
+    # special case deltas which replace entire base; no need to decode
+    # base revision. this neatly avoids censored bases, which throw when
+    # they're decoded.
+    hlen = struct.calcsize(">lll")
+    if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
+                                               len(delta) - hlen):
+        fulltext = delta[hlen:]
+    else:
+        # deltabase is rawtext before changed by flag processors, which is
+        # equivalent to non-raw text
+        basetext = revlog.revision(baserev, _df=fh, raw=False)
+        fulltext = mdiff.patch(basetext, delta)
+
+    try:
+        res = revlog._processflags(fulltext, flags, 'read', raw=True)
+        fulltext, validatehash = res
+        if validatehash:
+            revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2)
+        if flags & REVIDX_ISCENSORED:
+            raise error.StorageError(_('node %s is not censored') %
+                                     expectednode)
+    except error.CensoredNodeError:
+        # must pass the censored index flag to add censored revisions
+        if not flags & REVIDX_ISCENSORED:
+            raise
+    return fulltext
+
+@attr.s(slots=True, frozen=True)
+class _deltainfo(object):
+    distance = attr.ib()
+    deltalen = attr.ib()
+    data = attr.ib()
+    base = attr.ib()
+    chainbase = attr.ib()
+    chainlen = attr.ib()
+    compresseddeltalen = attr.ib()
+    snapshotdepth = attr.ib()
+
+def isgooddeltainfo(revlog, deltainfo, revinfo):
+    """Returns True if the given delta is good. Good means that it is within
+    the disk span, disk size, and chain length bounds that we know to be
+    performant."""
+    if deltainfo is None:
+        return False
+
+    # - 'deltainfo.distance' is the distance from the base revision --
+    #   bounding it limits the amount of I/O we need to do.
+    # - 'deltainfo.compresseddeltalen' is the sum of the total size of
+    #   deltas we need to apply -- bounding it limits the amount of CPU
+    #   we consume.
+
+    if revlog._sparserevlog:
+        # As sparse-read will be used, we can consider that the distance,
+        # instead of being the span of the whole chunk,
+        # is the span of the largest read chunk
+        base = deltainfo.base
+
+        if base != nullrev:
+            deltachain = revlog._deltachain(base)[0]
+        else:
+            deltachain = []
+
+        # search for the first non-snapshot revision
+        for idx, r in enumerate(deltachain):
+            if not revlog.issnapshot(r):
+                break
+        deltachain = deltachain[idx:]
+        chunks = slicechunk(revlog, deltachain, deltainfo)
+        all_span = [segmentspan(revlog, revs, deltainfo)
+                    for revs in chunks]
+        distance = max(all_span)
+    else:
+        distance = deltainfo.distance
+
+    textlen = revinfo.textlen
+    defaultmax = textlen * 4
+    maxdist = revlog._maxdeltachainspan
+    if not maxdist:
+        maxdist = distance # ensure the conditional pass
+    maxdist = max(maxdist, defaultmax)
+    if revlog._sparserevlog and maxdist < revlog._srmingapsize:
+        # In multiple place, we are ignoring irrelevant data range below a
+        # certain size. Be also apply this tradeoff here and relax span
+        # constraint for small enought content.
+        maxdist = revlog._srmingapsize
+
+    # Bad delta from read span:
+    #
+    #   If the span of data read is larger than the maximum allowed.
+    if maxdist < distance:
+        return False
+
+    # Bad delta from new delta size:
+    #
+    #   If the delta size is larger than the target text, storing the
+    #   delta will be inefficient.
+    if textlen < deltainfo.deltalen:
+        return False
+
+    # Bad delta from cumulated payload size:
+    #
+    #   If the sum of delta get larger than K * target text length.
+    if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
+        return False
+
+    # Bad delta from chain length:
+    #
+    #   If the number of delta in the chain gets too high.
+    if (revlog._maxchainlen
+            and revlog._maxchainlen < deltainfo.chainlen):
+        return False
+
+    # bad delta from intermediate snapshot size limit
+    #
+    #   If an intermediate snapshot size is higher than the limit.  The
+    #   limit exist to prevent endless chain of intermediate delta to be
+    #   created.
+    if (deltainfo.snapshotdepth is not None and
+            (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen):
+        return False
+
+    # bad delta if new intermediate snapshot is larger than the previous
+    # snapshot
+    if (deltainfo.snapshotdepth
+            and revlog.length(deltainfo.base) < deltainfo.deltalen):
+        return False
+
+    return True
+
+def _candidategroups(revlog, textlen, p1, p2, cachedelta):
+    """Provides group of revision to be tested as delta base
+
+    This top level function focus on emitting groups with unique and worthwhile
+    content. See _raw_candidate_groups for details about the group order.
+    """
+    # should we try to build a delta?
+    if not (len(revlog) and revlog._storedeltachains):
+        yield None
+        return
+
+    deltalength = revlog.length
+    deltaparent = revlog.deltaparent
+    good = None
+
+    deltas_limit = textlen * LIMIT_DELTA2TEXT
+
+    tested = set([nullrev])
+    candidates = _refinedgroups(revlog, p1, p2, cachedelta)
+    while True:
+        temptative = candidates.send(good)
+        if temptative is None:
+            break
+        group = []
+        for rev in temptative:
+            # skip over empty delta (no need to include them in a chain)
+            while not (rev == nullrev or rev in tested or deltalength(rev)):
+                tested.add(rev)
+                rev = deltaparent(rev)
+            # filter out revision we tested already
+            if rev in tested:
+                continue
+            tested.add(rev)
+            # filter out delta base that will never produce good delta
+            if deltas_limit < revlog.length(rev):
+                continue
+            # no need to try a delta against nullrev, this will be done as a
+            # last resort.
+            if rev == nullrev:
+                continue
+            # no delta for rawtext-changing revs (see "candelta" for why)
+            if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
+                continue
+            group.append(rev)
+        if group:
+            # XXX: in the sparse revlog case, group can become large,
+            #      impacting performances. Some bounding or slicing mecanism
+            #      would help to reduce this impact.
+            good = yield tuple(group)
+    yield None
+
+def _findsnapshots(revlog, cache, start_rev):
+    """find snapshot from start_rev to tip"""
+    deltaparent = revlog.deltaparent
+    issnapshot = revlog.issnapshot
+    for rev in revlog.revs(start_rev):
+        if issnapshot(rev):
+            cache[deltaparent(rev)].append(rev)
+
+def _refinedgroups(revlog, p1, p2, cachedelta):
+    good = None
+    # First we try to reuse a the delta contained in the bundle.
+    # (or from the source revlog)
+    #
+    # This logic only applies to general delta repositories and can be disabled
+    # through configuration. Disabling reuse source delta is useful when
+    # we want to make sure we recomputed "optimal" deltas.
+    if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
+        # Assume what we received from the server is a good choice
+        # build delta will reuse the cache
+        good = yield (cachedelta[0],)
+        if good is not None:
+            yield None
+            return
+    for candidates in _rawgroups(revlog, p1, p2, cachedelta):
+        good = yield candidates
+        if good is not None:
+            break
+
+    # if we have a refinable value, try to refine it
+    if good is not None and good not in (p1, p2) and revlog.issnapshot(good):
+        # refine snapshot down
+        previous = None
+        while previous != good:
+            previous = good
+            base = revlog.deltaparent(good)
+            if base == nullrev:
+                break
+            good = yield (base,)
+        # refine snapshot up
+        #
+        # XXX the _findsnapshots call can be expensive and is "duplicated" with
+        # the one done in `_rawgroups`. Once we start working on performance,
+        # we should make the two logics share this computation.
+        snapshots = collections.defaultdict(list)
+        _findsnapshots(revlog, snapshots, good + 1)
+        previous = None
+        while good != previous:
+            previous = good
+            children = tuple(sorted(c for c in snapshots[good]))
+            good = yield children
+
+    # we have found nothing
+    yield None
+
+def _rawgroups(revlog, p1, p2, cachedelta):
+    """Provides group of revision to be tested as delta base
+
+    This lower level function focus on emitting delta theorically interresting
+    without looking it any practical details.
+
+    The group order aims at providing fast or small candidates first.
+    """
+    gdelta = revlog._generaldelta
+    sparse = revlog._sparserevlog
+    curr = len(revlog)
+    prev = curr - 1
+    deltachain = lambda rev: revlog._deltachain(rev)[0]
+
+    if gdelta:
+        # exclude already lazy tested base if any
+        parents = [p for p in (p1, p2) if p != nullrev]
+
+        if not revlog._deltabothparents and len(parents) == 2:
+            parents.sort()
+            # To minimize the chance of having to build a fulltext,
+            # pick first whichever parent is closest to us (max rev)
+            yield (parents[1],)
+            # then the other one (min rev) if the first did not fit
+            yield (parents[0],)
+        elif len(parents) > 0:
+            # Test all parents (1 or 2), and keep the best candidate
+            yield parents
+
+    if sparse and parents:
+        snapshots = collections.defaultdict(list) # map: base-rev: snapshot-rev
+        # See if we can use an existing snapshot in the parent chains to use as
+        # a base for a new intermediate-snapshot
+        #
+        # search for snapshot in parents delta chain
+        # map: snapshot-level: snapshot-rev
+        parents_snaps = collections.defaultdict(set)
+        candidate_chains = [deltachain(p) for p in parents]
+        for chain in candidate_chains:
+            for idx, s in enumerate(chain):
+                if not revlog.issnapshot(s):
+                    break
+                parents_snaps[idx].add(s)
+        snapfloor = min(parents_snaps[0]) + 1
+        _findsnapshots(revlog, snapshots, snapfloor)
+        # search for the highest "unrelated" revision
+        #
+        # Adding snapshots used by "unrelated" revision increase the odd we
+        # reuse an independant, yet better snapshot chain.
+        #
+        # XXX instead of building a set of revisions, we could lazily enumerate
+        # over the chains. That would be more efficient, however we stick to
+        # simple code for now.
+        all_revs = set()
+        for chain in candidate_chains:
+            all_revs.update(chain)
+        other = None
+        for r in revlog.revs(prev, snapfloor):
+            if r not in all_revs:
+                other = r
+                break
+        if other is not None:
+            # To avoid unfair competition, we won't use unrelated intermediate
+            # snapshot that are deeper than the ones from the parent delta
+            # chain.
+            max_depth = max(parents_snaps.keys())
+            chain = deltachain(other)
+            for idx, s in enumerate(chain):
+                if s < snapfloor:
+                    continue
+                if max_depth < idx:
+                    break
+                if not revlog.issnapshot(s):
+                    break
+                parents_snaps[idx].add(s)
+        # Test them as possible intermediate snapshot base
+        # We test them from highest to lowest level. High level one are more
+        # likely to result in small delta
+        floor = None
+        for idx, snaps in sorted(parents_snaps.items(), reverse=True):
+            siblings = set()
+            for s in snaps:
+                siblings.update(snapshots[s])
+            # Before considering making a new intermediate snapshot, we check
+            # if an existing snapshot, children of base we consider, would be
+            # suitable.
+            #
+            # It give a change to reuse a delta chain "unrelated" to the
+            # current revision instead of starting our own. Without such
+            # re-use, topological branches would keep reopening new chains.
+            # Creating more and more snapshot as the repository grow.
+
+            if floor is not None:
+                # We only do this for siblings created after the one in our
+                # parent's delta chain. Those created before has less chances
+                # to be valid base since our ancestors had to create a new
+                # snapshot.
+                siblings = [r for r in siblings if floor < r]
+            yield tuple(sorted(siblings))
+            # then test the base from our parent's delta chain.
+            yield tuple(sorted(snaps))
+            floor = min(snaps)
+        # No suitable base found in the parent chain, search if any full
+        # snapshots emitted since parent's base would be a suitable base for an
+        # intermediate snapshot.
+        #
+        # It give a chance to reuse a delta chain unrelated to the current
+        # revisions instead of starting our own. Without such re-use,
+        # topological branches would keep reopening new full chains. Creating
+        # more and more snapshot as the repository grow.
+        yield tuple(snapshots[nullrev])
+
+    if not sparse:
+        # other approach failed try against prev to hopefully save us a
+        # fulltext.
+        yield (prev,)
+
+class deltacomputer(object):
+    def __init__(self, revlog):
+        self.revlog = revlog
+
+    def buildtext(self, revinfo, fh):
+        """Builds a fulltext version of a revision
+
+        revinfo: _revisioninfo instance that contains all needed info
+        fh:      file handle to either the .i or the .d revlog file,
+                 depending on whether it is inlined or not
+        """
+        btext = revinfo.btext
+        if btext[0] is not None:
+            return btext[0]
+
+        revlog = self.revlog
+        cachedelta = revinfo.cachedelta
+        baserev = cachedelta[0]
+        delta = cachedelta[1]
+
+        fulltext = btext[0] = _textfromdelta(fh, revlog, baserev, delta,
+                                             revinfo.p1, revinfo.p2,
+                                             revinfo.flags, revinfo.node)
+        return fulltext
+
+    def _builddeltadiff(self, base, revinfo, fh):
+        revlog = self.revlog
+        t = self.buildtext(revinfo, fh)
+        if revlog.iscensored(base):
+            # deltas based on a censored revision must replace the
+            # full content in one patch, so delta works everywhere
+            header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
+            delta = header + t
+        else:
+            ptext = revlog.revision(base, _df=fh, raw=True)
+            delta = mdiff.textdiff(ptext, t)
+
+        return delta
+
+    def _builddeltainfo(self, revinfo, base, fh):
+        # can we use the cached delta?
+        delta = None
+        if revinfo.cachedelta:
+            cachebase, cachediff = revinfo.cachedelta
+            #check if the diff still apply
+            currentbase = cachebase
+            while (currentbase != nullrev
+                    and currentbase != base
+                    and self.revlog.length(currentbase) == 0):
+                currentbase = self.revlog.deltaparent(currentbase)
+            if currentbase == base:
+                delta = revinfo.cachedelta[1]
+        if delta is None:
+            delta = self._builddeltadiff(base, revinfo, fh)
+        revlog = self.revlog
+        header, data = revlog.compress(delta)
+        deltalen = len(header) + len(data)
+        chainbase = revlog.chainbase(base)
+        offset = revlog.end(len(revlog) - 1)
+        dist = deltalen + offset - revlog.start(chainbase)
+        if revlog._generaldelta:
+            deltabase = base
+        else:
+            deltabase = chainbase
+        chainlen, compresseddeltalen = revlog._chaininfo(base)
+        chainlen += 1
+        compresseddeltalen += deltalen
+
+        revlog = self.revlog
+        snapshotdepth = None
+        if deltabase == nullrev:
+            snapshotdepth = 0
+        elif revlog._sparserevlog and revlog.issnapshot(deltabase):
+            # A delta chain should always be one full snapshot,
+            # zero or more semi-snapshots, and zero or more deltas
+            p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
+            if deltabase not in (p1, p2) and revlog.issnapshot(deltabase):
+                snapshotdepth = len(revlog._deltachain(deltabase)[0])
+
+        return _deltainfo(dist, deltalen, (header, data), deltabase,
+                          chainbase, chainlen, compresseddeltalen,
+                          snapshotdepth)
+
+    def _fullsnapshotinfo(self, fh, revinfo):
+        curr = len(self.revlog)
+        rawtext = self.buildtext(revinfo, fh)
+        data = self.revlog.compress(rawtext)
+        compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
+        deltabase = chainbase = curr
+        snapshotdepth = 0
+        chainlen = 1
+
+        return _deltainfo(dist, deltalen, data, deltabase,
+                          chainbase, chainlen, compresseddeltalen,
+                          snapshotdepth)
+
+    def finddeltainfo(self, revinfo, fh):
+        """Find an acceptable delta against a candidate revision
+
+        revinfo: information about the revision (instance of _revisioninfo)
+        fh:      file handle to either the .i or the .d revlog file,
+                 depending on whether it is inlined or not
+
+        Returns the first acceptable candidate revision, as ordered by
+        _candidategroups
+
+        If no suitable deltabase is found, we return delta info for a full
+        snapshot.
+        """
+        if not revinfo.textlen:
+            return self._fullsnapshotinfo(fh, revinfo)
+
+        # no delta for flag processor revision (see "candelta" for why)
+        # not calling candelta since only one revision needs test, also to
+        # avoid overhead fetching flags again.
+        if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
+            return self._fullsnapshotinfo(fh, revinfo)
+
+        cachedelta = revinfo.cachedelta
+        p1 = revinfo.p1
+        p2 = revinfo.p2
+        revlog = self.revlog
+
+        deltainfo = None
+        p1r, p2r = revlog.rev(p1), revlog.rev(p2)
+        groups = _candidategroups(self.revlog, revinfo.textlen,
+                                             p1r, p2r, cachedelta)
+        candidaterevs = next(groups)
+        while candidaterevs is not None:
+            nominateddeltas = []
+            if deltainfo is not None:
+                # if we already found a good delta,
+                # challenge it against refined candidates
+                nominateddeltas.append(deltainfo)
+            for candidaterev in candidaterevs:
+                candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
+                if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
+                    nominateddeltas.append(candidatedelta)
+            if nominateddeltas:
+                deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
+            if deltainfo is not None:
+                candidaterevs = groups.send(deltainfo.base)
+            else:
+                candidaterevs = next(groups)
+
+        if deltainfo is None:
+            deltainfo = self._fullsnapshotinfo(fh, revinfo)
+        return deltainfo
--- a/mercurial/revset.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/revset.py	Mon Oct 22 14:46:06 2018 -0400
@@ -242,7 +242,7 @@
 
 def listset(repo, subset, *xs, **opts):
     raise error.ParseError(_("can't use a list in this context"),
-                           hint=_('see hg help "revsets.x or y"'))
+                           hint=_('see \'hg help "revsets.x or y"\''))
 
 def keyvaluepair(repo, subset, k, v, order):
     raise error.ParseError(_("can't use a key-value pair in this context"))
@@ -454,6 +454,8 @@
         kind, pattern, matcher = stringutil.stringmatcher(bm)
         bms = set()
         if kind == 'literal':
+            if bm == pattern:
+                pattern = repo._bookmarks.expandname(pattern)
             bmrev = repo._bookmarks.get(pattern, None)
             if not bmrev:
                 raise error.RepoLookupError(_("bookmark '%s' does not exist")
@@ -1558,6 +1560,12 @@
     """helper to select all rev in <targets> phases"""
     return repo._phasecache.getrevset(repo, targets, subset)
 
+@predicate('_phase(idx)', safe=True)
+def phase(repo, subset, x):
+    l = getargs(x, 1, 1, ("_phase requires one argument"))
+    target = getinteger(l[0], ("_phase expects a number"))
+    return _phase(repo, subset, target)
+
 @predicate('draft()', safe=True)
 def draft(repo, subset, x):
     """Changeset in draft phase."""
@@ -1743,6 +1751,16 @@
         return baseset()
     return subset & baseset([l])
 
+@predicate('revset(set)', safe=True, takeorder=True)
+def revsetpredicate(repo, subset, x, order):
+    """Strictly interpret the content as a revset.
+
+    The content of this special predicate will be strictly interpreted as a
+    revset. For example, ``revset(id(0))`` will be interpreted as "id(0)"
+    without possible ambiguity with a "id(0)" bookmark or tag.
+    """
+    return getset(repo, subset, x, order)
+
 @predicate('matching(revision [, field])', safe=True)
 def matching(repo, subset, x):
     """Changesets in which a given set of fields match the set of fields in the
--- a/mercurial/revsetlang.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/revsetlang.py	Mon Oct 22 14:46:06 2018 -0400
@@ -63,7 +63,7 @@
 _syminitletters = set(pycompat.iterbytestr(
     string.ascii_letters.encode('ascii') +
     string.digits.encode('ascii') +
-    '._@')) | set(map(pycompat.bytechr, xrange(128, 256)))
+    '._@')) | set(map(pycompat.bytechr, pycompat.xrange(128, 256)))
 
 # default set of valid characters for non-initial letters of symbols
 _symletters = _syminitletters | set(pycompat.iterbytestr('-/'))
@@ -177,7 +177,7 @@
                         if p: # possible consecutive -
                             yield ('symbol', p, s)
                         s += len(p)
-                        yield ('-', None, pos)
+                        yield ('-', None, s)
                         s += 1
                     if parts[-1]: # possible trailing -
                         yield ('symbol', parts[-1], s)
@@ -355,9 +355,6 @@
     elif op == 'keyvalue':
         return (op, x[1], _analyze(x[2]))
     elif op == 'func':
-        f = getsymbol(x[1])
-        if f == 'revset':
-            return _analyze(x[2])
         return (op, x[1], _analyze(x[2]))
     raise ValueError('invalid operator %r' % op)
 
--- a/mercurial/scmutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/scmutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -21,6 +21,7 @@
     bin,
     hex,
     nullid,
+    nullrev,
     short,
     wdirid,
     wdirrev,
@@ -34,9 +35,11 @@
     obsutil,
     pathutil,
     phases,
+    policy,
     pycompat,
     revsetlang,
     similar,
+    smartset,
     url,
     util,
     vfs,
@@ -52,6 +55,8 @@
 else:
     from . import scmposix as scmplatform
 
+parsers = policy.importmod(r'parsers')
+
 termsize = scmplatform.termsize
 
 class status(tuple):
@@ -166,67 +171,68 @@
     # Mercurial-specific first, followed by built-in and library exceptions
     except error.LockHeld as inst:
         if inst.errno == errno.ETIMEDOUT:
-            reason = _('timed out waiting for lock held by %r') % inst.locker
+            reason = _('timed out waiting for lock held by %r') % (
+                pycompat.bytestr(inst.locker))
         else:
             reason = _('lock held by %r') % inst.locker
-        ui.warn(_("abort: %s: %s\n")
-                % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
+        ui.error(_("abort: %s: %s\n") % (
+            inst.desc or stringutil.forcebytestr(inst.filename), reason))
         if not inst.locker:
-            ui.warn(_("(lock might be very busy)\n"))
+            ui.error(_("(lock might be very busy)\n"))
     except error.LockUnavailable as inst:
-        ui.warn(_("abort: could not lock %s: %s\n") %
-                (inst.desc or stringutil.forcebytestr(inst.filename),
-                 encoding.strtolocal(inst.strerror)))
+        ui.error(_("abort: could not lock %s: %s\n") %
+                 (inst.desc or stringutil.forcebytestr(inst.filename),
+                  encoding.strtolocal(inst.strerror)))
     except error.OutOfBandError as inst:
         if inst.args:
             msg = _("abort: remote error:\n")
         else:
             msg = _("abort: remote error\n")
-        ui.warn(msg)
+        ui.error(msg)
         if inst.args:
-            ui.warn(''.join(inst.args))
+            ui.error(''.join(inst.args))
         if inst.hint:
-            ui.warn('(%s)\n' % inst.hint)
+            ui.error('(%s)\n' % inst.hint)
     except error.RepoError as inst:
-        ui.warn(_("abort: %s!\n") % inst)
+        ui.error(_("abort: %s!\n") % inst)
         if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
+            ui.error(_("(%s)\n") % inst.hint)
     except error.ResponseError as inst:
-        ui.warn(_("abort: %s") % inst.args[0])
+        ui.error(_("abort: %s") % inst.args[0])
         msg = inst.args[1]
         if isinstance(msg, type(u'')):
             msg = pycompat.sysbytes(msg)
         if not isinstance(msg, bytes):
-            ui.warn(" %r\n" % (msg,))
+            ui.error(" %r\n" % (msg,))
         elif not msg:
-            ui.warn(_(" empty string\n"))
+            ui.error(_(" empty string\n"))
         else:
-            ui.warn("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
+            ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
     except error.CensoredNodeError as inst:
-        ui.warn(_("abort: file censored %s!\n") % inst)
-    except error.RevlogError as inst:
-        ui.warn(_("abort: %s!\n") % inst)
+        ui.error(_("abort: file censored %s!\n") % inst)
+    except error.StorageError as inst:
+        ui.error(_("abort: %s!\n") % inst)
     except error.InterventionRequired as inst:
-        ui.warn("%s\n" % inst)
+        ui.error("%s\n" % inst)
         if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
+            ui.error(_("(%s)\n") % inst.hint)
         return 1
     except error.WdirUnsupported:
-        ui.warn(_("abort: working directory revision cannot be specified\n"))
+        ui.error(_("abort: working directory revision cannot be specified\n"))
     except error.Abort as inst:
-        ui.warn(_("abort: %s\n") % inst)
+        ui.error(_("abort: %s\n") % inst)
         if inst.hint:
-            ui.warn(_("(%s)\n") % inst.hint)
+            ui.error(_("(%s)\n") % inst.hint)
     except ImportError as inst:
-        ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
+        ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
         m = stringutil.forcebytestr(inst).split()[-1]
         if m in "mpatch bdiff".split():
-            ui.warn(_("(did you forget to compile extensions?)\n"))
+            ui.error(_("(did you forget to compile extensions?)\n"))
         elif m in "zlib".split():
-            ui.warn(_("(is your Python install correct?)\n"))
+            ui.error(_("(is your Python install correct?)\n"))
     except IOError as inst:
         if util.safehasattr(inst, "code"):
-            ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
+            ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
         elif util.safehasattr(inst, "reason"):
             try: # usually it is in the form (errno, strerror)
                 reason = inst.reason.args[1]
@@ -236,34 +242,34 @@
             if isinstance(reason, pycompat.unicode):
                 # SSLError of Python 2.7.9 contains a unicode
                 reason = encoding.unitolocal(reason)
-            ui.warn(_("abort: error: %s\n") % reason)
+            ui.error(_("abort: error: %s\n") % reason)
         elif (util.safehasattr(inst, "args")
               and inst.args and inst.args[0] == errno.EPIPE):
             pass
         elif getattr(inst, "strerror", None):
             if getattr(inst, "filename", None):
-                ui.warn(_("abort: %s: %s\n") % (
+                ui.error(_("abort: %s: %s\n") % (
                     encoding.strtolocal(inst.strerror),
                     stringutil.forcebytestr(inst.filename)))
             else:
-                ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
+                ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
         else:
             raise
     except OSError as inst:
         if getattr(inst, "filename", None) is not None:
-            ui.warn(_("abort: %s: '%s'\n") % (
+            ui.error(_("abort: %s: '%s'\n") % (
                 encoding.strtolocal(inst.strerror),
                 stringutil.forcebytestr(inst.filename)))
         else:
-            ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
+            ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
     except MemoryError:
-        ui.warn(_("abort: out of memory\n"))
+        ui.error(_("abort: out of memory\n"))
     except SystemExit as inst:
         # Commands shouldn't sys.exit directly, but give a return code.
         # Just in case catch this and and pass exit code to caller.
         return inst.code
     except socket.error as inst:
-        ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
+        ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
 
     return -1
 
@@ -437,41 +443,114 @@
     return '%d:%s' % (rev, hexfunc(node))
 
 def resolvehexnodeidprefix(repo, prefix):
-    # Uses unfiltered repo because it's faster when prefix is ambiguous/
-    # This matches the shortesthexnodeidprefix() function below.
-    node = repo.unfiltered().changelog._partialmatch(prefix)
+    if (prefix.startswith('x') and
+        repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
+        prefix = prefix[1:]
+    try:
+        # Uses unfiltered repo because it's faster when prefix is ambiguous/
+        # This matches the shortesthexnodeidprefix() function below.
+        node = repo.unfiltered().changelog._partialmatch(prefix)
+    except error.AmbiguousPrefixLookupError:
+        revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
+        if revset:
+            # Clear config to avoid infinite recursion
+            configoverrides = {('experimental',
+                                'revisions.disambiguatewithin'): None}
+            with repo.ui.configoverride(configoverrides):
+                revs = repo.anyrevs([revset], user=True)
+                matches = []
+                for rev in revs:
+                    node = repo.changelog.node(rev)
+                    if hex(node).startswith(prefix):
+                        matches.append(node)
+                if len(matches) == 1:
+                    return matches[0]
+        raise
     if node is None:
         return
     repo.changelog.rev(node)  # make sure node isn't filtered
     return node
 
-def shortesthexnodeidprefix(repo, node, minlength=1):
-    """Find the shortest unambiguous prefix that matches hexnode."""
+def mayberevnum(repo, prefix):
+    """Checks if the given prefix may be mistaken for a revision number"""
+    try:
+        i = int(prefix)
+        # if we are a pure int, then starting with zero will not be
+        # confused as a rev; or, obviously, if the int is larger
+        # than the value of the tip rev. We still need to disambiguate if
+        # prefix == '0', since that *is* a valid revnum.
+        if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
+            return False
+        return True
+    except ValueError:
+        return False
+
+def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
+    """Find the shortest unambiguous prefix that matches hexnode.
+
+    If "cache" is not None, it must be a dictionary that can be used for
+    caching between calls to this method.
+    """
     # _partialmatch() of filtered changelog could take O(len(repo)) time,
     # which would be unacceptably slow. so we look for hash collision in
     # unfiltered space, which means some hashes may be slightly longer.
-    cl = repo.unfiltered().changelog
 
-    def isrev(prefix):
-        try:
-            i = int(prefix)
-            # if we are a pure int, then starting with zero will not be
-            # confused as a rev; or, obviously, if the int is larger
-            # than the value of the tip rev
-            if prefix[0:1] == b'0' or i > len(cl):
-                return False
-            return True
-        except ValueError:
-            return False
+    minlength=max(minlength, 1)
 
     def disambiguate(prefix):
         """Disambiguate against revnums."""
+        if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
+            if mayberevnum(repo, prefix):
+                return 'x' + prefix
+            else:
+                return prefix
+
         hexnode = hex(node)
         for length in range(len(prefix), len(hexnode) + 1):
             prefix = hexnode[:length]
-            if not isrev(prefix):
+            if not mayberevnum(repo, prefix):
                 return prefix
 
+    cl = repo.unfiltered().changelog
+    revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
+    if revset:
+        revs = None
+        if cache is not None:
+            revs = cache.get('disambiguationrevset')
+        if revs is None:
+            revs = repo.anyrevs([revset], user=True)
+            if cache is not None:
+                cache['disambiguationrevset'] = revs
+        if cl.rev(node) in revs:
+            hexnode = hex(node)
+            nodetree = None
+            if cache is not None:
+                nodetree = cache.get('disambiguationnodetree')
+            if not nodetree:
+                try:
+                    nodetree = parsers.nodetree(cl.index, len(revs))
+                except AttributeError:
+                    # no native nodetree
+                    pass
+                else:
+                    for r in revs:
+                        nodetree.insert(r)
+                    if cache is not None:
+                        cache['disambiguationnodetree'] = nodetree
+            if nodetree is not None:
+                length = max(nodetree.shortest(node), minlength)
+                prefix = hexnode[:length]
+                return disambiguate(prefix)
+            for length in range(minlength, len(hexnode) + 1):
+                matches = []
+                prefix = hexnode[:length]
+                for rev in revs:
+                    otherhexnode = repo[rev].hex()
+                    if prefix == otherhexnode[:length]:
+                        matches.append(otherhexnode)
+                if len(matches) == 1:
+                    return disambiguate(prefix)
+
     try:
         return disambiguate(cl.shortest(node, minlength))
     except error.LookupError:
@@ -480,8 +559,8 @@
 def isrevsymbol(repo, symbol):
     """Checks if a symbol exists in the repo.
 
-    See revsymbol() for details. Raises error.LookupError if the symbol is an
-    ambiguous nodeid prefix.
+    See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
+    symbol is an ambiguous nodeid prefix.
     """
     try:
         revsymbol(repo, symbol)
@@ -657,7 +736,7 @@
     if len(parents) > 1:
         return parents
     if repo.ui.debugflag:
-        return [parents[0], repo['null']]
+        return [parents[0], repo[nullrev]]
     if parents[0].rev() >= intrev(ctx) - 1:
         return []
     return parents
@@ -780,7 +859,7 @@
         return self._revcontains(self._torev(node))
 
 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
-                 fixphase=False, targetphase=None):
+                 fixphase=False, targetphase=None, backup=True):
     """do common cleanups when old nodes are replaced by new nodes
 
     That includes writing obsmarkers or stripping nodes, and moving bookmarks.
@@ -802,39 +881,52 @@
 
     # translate mapping's other forms
     if not util.safehasattr(replacements, 'items'):
-        replacements = {n: () for n in replacements}
+        replacements = {(n,): () for n in replacements}
+    else:
+        # upgrading non tuple "source" to tuple ones for BC
+        repls = {}
+        for key, value in replacements.items():
+            if not isinstance(key, tuple):
+                key = (key,)
+            repls[key] = value
+        replacements = repls
 
     # Calculate bookmark movements
     if moves is None:
         moves = {}
     # Unfiltered repo is needed since nodes in replacements might be hidden.
     unfi = repo.unfiltered()
-    for oldnode, newnodes in replacements.items():
-        if oldnode in moves:
-            continue
-        if len(newnodes) > 1:
-            # usually a split, take the one with biggest rev number
-            newnode = next(unfi.set('max(%ln)', newnodes)).node()
-        elif len(newnodes) == 0:
-            # move bookmark backwards
-            roots = list(unfi.set('max((::%n) - %ln)', oldnode,
-                                  list(replacements)))
-            if roots:
-                newnode = roots[0].node()
+    for oldnodes, newnodes in replacements.items():
+        for oldnode in oldnodes:
+            if oldnode in moves:
+                continue
+            if len(newnodes) > 1:
+                # usually a split, take the one with biggest rev number
+                newnode = next(unfi.set('max(%ln)', newnodes)).node()
+            elif len(newnodes) == 0:
+                # move bookmark backwards
+                allreplaced = []
+                for rep in replacements:
+                    allreplaced.extend(rep)
+                roots = list(unfi.set('max((::%n) - %ln)', oldnode,
+                                      allreplaced))
+                if roots:
+                    newnode = roots[0].node()
+                else:
+                    newnode = nullid
             else:
-                newnode = nullid
-        else:
-            newnode = newnodes[0]
-        moves[oldnode] = newnode
+                newnode = newnodes[0]
+            moves[oldnode] = newnode
 
     allnewnodes = [n for ns in replacements.values() for n in ns]
     toretract = {}
     toadvance = {}
     if fixphase:
         precursors = {}
-        for oldnode, newnodes in replacements.items():
-            for newnode in newnodes:
-                precursors.setdefault(newnode, []).append(oldnode)
+        for oldnodes, newnodes in replacements.items():
+            for oldnode in oldnodes:
+                for newnode in newnodes:
+                    precursors.setdefault(newnode, []).append(oldnode)
 
         allnewnodes.sort(key=lambda n: unfi[n].rev())
         newphases = {}
@@ -891,21 +983,22 @@
             # unnecessary. That's the "if s or not isobs(n)" check below.
             # Also sort the node in topology order, that might be useful for
             # some obsstore logic.
-            # NOTE: the filtering and sorting might belong to createmarkers.
-            isobs = unfi.obsstore.successors.__contains__
+            # NOTE: the sorting might belong to createmarkers.
             torev = unfi.changelog.rev
-            sortfunc = lambda ns: torev(ns[0])
-            rels = [(unfi[n], tuple(unfi[m] for m in s))
-                    for n, s in sorted(replacements.items(), key=sortfunc)
-                    if s or not isobs(n)]
+            sortfunc = lambda ns: torev(ns[0][0])
+            rels = []
+            for ns, s in sorted(replacements.items(), key=sortfunc):
+                rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
+                rels.append(rel)
             if rels:
                 obsolete.createmarkers(repo, rels, operation=operation,
                                        metadata=metadata)
         else:
             from . import repair # avoid import cycle
-            tostrip = list(replacements)
+            tostrip = list(n for ns in replacements for n in ns)
             if tostrip:
-                repair.delayedstrip(repo.ui, repo, tostrip, operation)
+                repair.delayedstrip(repo.ui, repo, tostrip, operation,
+                                    backup=backup)
 
 def addremove(repo, matcher, prefix, opts=None):
     if opts is None:
@@ -952,9 +1045,11 @@
         if repo.ui.verbose or not m.exact(abs):
             if abs in unknownset:
                 status = _('adding %s\n') % m.uipath(abs)
+                label = 'ui.addremove.added'
             else:
                 status = _('removing %s\n') % m.uipath(abs)
-            repo.ui.status(status)
+                label = 'ui.addremove.removed'
+            repo.ui.status(status, label=label)
 
     renames = _findrenames(repo, m, added + unknown, removed + deleted,
                            similarity)
@@ -1007,6 +1102,7 @@
 
     ctx = repo[None]
     dirstate = repo.dirstate
+    matcher = repo.narrowmatch(matcher, includeexact=True)
     walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
                                 unknown=True, ignored=False, full=False)
     for abs, st in walkresults.iteritems():
@@ -1069,25 +1165,6 @@
         elif not dryrun:
             wctx.copy(origsrc, dst)
 
-def readrequires(opener, supported):
-    '''Reads and parses .hg/requires and checks if all entries found
-    are in the list of supported features.'''
-    requirements = set(opener.read("requires").splitlines())
-    missings = []
-    for r in requirements:
-        if r not in supported:
-            if not r or not r[0:1].isalnum():
-                raise error.RequirementError(_(".hg/requires file is corrupt"))
-            missings.append(r)
-    missings.sort()
-    if missings:
-        raise error.RequirementError(
-            _("repository requires features unknown to this Mercurial: %s")
-            % " ".join(missings),
-            hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
-                   " for more information"))
-    return requirements
-
 def writerequires(opener, requirements):
     with opener('requires', 'w') as fp:
         for r in sorted(requirements):
@@ -1282,9 +1359,11 @@
         if spec.startswith("shell:"):
             # external commands should be run relative to the repo root
             cmd = spec[6:]
-            proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
+            proc = subprocess.Popen(procutil.tonativestr(cmd),
+                                    shell=True, bufsize=-1,
                                     close_fds=procutil.closefds,
-                                    stdout=subprocess.PIPE, cwd=repo.root)
+                                    stdout=subprocess.PIPE,
+                                    cwd=procutil.tonativestr(repo.root))
             src = proc.stdout
         else:
             # treat as a URL or file
@@ -1542,39 +1621,63 @@
         @reportsummary
         def reportnewcs(repo, tr):
             """Report the range of new revisions pulled/unbundled."""
-            newrevs = tr.changes.get('revs', xrange(0, 0))
-            if not newrevs:
+            origrepolen = tr.changes.get('origrepolen', len(repo))
+            unfi = repo.unfiltered()
+            if origrepolen >= len(unfi):
                 return
 
-            # Compute the bounds of new revisions' range, excluding obsoletes.
-            unfi = repo.unfiltered()
-            revs = unfi.revs('%ld and not obsolete()', newrevs)
-            if not revs:
-                # Got only obsoletes.
-                return
-            minrev, maxrev = repo[revs.min()], repo[revs.max()]
+            # Compute the bounds of new visible revisions' range.
+            revs = smartset.spanset(repo, start=origrepolen)
+            if revs:
+                minrev, maxrev = repo[revs.min()], repo[revs.max()]
 
-            if minrev == maxrev:
-                revrange = minrev
-            else:
-                revrange = '%s:%s' % (minrev, maxrev)
-            repo.ui.status(_('new changesets %s\n') % revrange)
+                if minrev == maxrev:
+                    revrange = minrev
+                else:
+                    revrange = '%s:%s' % (minrev, maxrev)
+                draft = len(repo.revs('%ld and draft()', revs))
+                secret = len(repo.revs('%ld and secret()', revs))
+                if not (draft or secret):
+                    msg = _('new changesets %s\n') % revrange
+                elif draft and secret:
+                    msg = _('new changesets %s (%d drafts, %d secrets)\n')
+                    msg %= (revrange, draft, secret)
+                elif draft:
+                    msg = _('new changesets %s (%d drafts)\n')
+                    msg %= (revrange, draft)
+                elif secret:
+                    msg = _('new changesets %s (%d secrets)\n')
+                    msg %= (revrange, secret)
+                else:
+                    errormsg = 'entered unreachable condition'
+                    raise error.ProgrammingError(errormsg)
+                repo.ui.status(msg)
+
+            # search new changesets directly pulled as obsolete
+            duplicates = tr.changes.get('revduplicates', ())
+            obsadded = unfi.revs('(%d: + %ld) and obsolete()',
+                                 origrepolen, duplicates)
+            cl = repo.changelog
+            extinctadded = [r for r in obsadded if r not in cl]
+            if extinctadded:
+                # They are not just obsolete, but obsolete and invisible
+                # we call them "extinct" internally but the terms have not been
+                # exposed to users.
+                msg = '(%d other changesets obsolete on arrival)\n'
+                repo.ui.status(msg % len(extinctadded))
 
         @reportsummary
         def reportphasechanges(repo, tr):
             """Report statistics of phase changes for changesets pre-existing
             pull/unbundle.
             """
-            # TODO set() is only appropriate for 4.7 since revs post
-            # 45e05d39d9ce is a pycompat.membershiprange, which has O(n)
-            # membership testing.
-            newrevs = set(tr.changes.get('revs', xrange(0, 0)))
+            origrepolen = tr.changes.get('origrepolen', len(repo))
             phasetracking = tr.changes.get('phases', {})
             if not phasetracking:
                 return
             published = [
                 rev for rev, (old, new) in phasetracking.iteritems()
-                if new == phases.public and rev not in newrevs
+                if new == phases.public and rev < origrepolen
             ]
             if not published:
                 return
--- a/mercurial/server.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/server.py	Mon Oct 22 14:46:06 2018 -0400
@@ -79,7 +79,7 @@
             runargs.append('--daemon-postexec=unlink:%s' % lockpath)
             # Don't pass --cwd to the child process, because we've already
             # changed directory.
-            for i in xrange(1, len(runargs)):
+            for i in pycompat.xrange(1, len(runargs)):
                 if runargs[i].startswith('--cwd='):
                     del runargs[i]
                     break
--- a/mercurial/setdiscovery.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/setdiscovery.py	Mon Oct 22 14:46:06 2018 -0400
@@ -51,30 +51,25 @@
     nullrev,
 )
 from . import (
-    dagutil,
     error,
     util,
 )
 
-def _updatesample(dag, nodes, sample, quicksamplesize=0):
+def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
     """update an existing sample to match the expected size
 
-    The sample is updated with nodes exponentially distant from each head of the
-    <nodes> set. (H~1, H~2, H~4, H~8, etc).
+    The sample is updated with revs exponentially distant from each head of the
+    <revs> set. (H~1, H~2, H~4, H~8, etc).
 
     If a target size is specified, the sampling will stop once this size is
-    reached. Otherwise sampling will happen until roots of the <nodes> set are
+    reached. Otherwise sampling will happen until roots of the <revs> set are
     reached.
 
-    :dag: a dag object from dagutil
-    :nodes:  set of nodes we want to discover (if None, assume the whole dag)
+    :revs:  set of revs we want to discover (if None, assume the whole dag)
+    :heads: set of DAG head revs
     :sample: a sample to update
+    :parentfn: a callable to resolve parents for a revision
     :quicksamplesize: optional target size of the sample"""
-    # if nodes is empty we scan the entire graph
-    if nodes:
-        heads = dag.headsetofconnecteds(nodes)
-    else:
-        heads = dag.heads()
     dist = {}
     visit = collections.deque(heads)
     seen = set()
@@ -91,37 +86,69 @@
             if quicksamplesize and (len(sample) >= quicksamplesize):
                 return
         seen.add(curr)
-        for p in dag.parents(curr):
-            if not nodes or p in nodes:
+
+        for p in parentfn(curr):
+            if p != nullrev and (not revs or p in revs):
                 dist.setdefault(p, d + 1)
                 visit.append(p)
 
-def _takequicksample(dag, nodes, size):
+def _takequicksample(repo, headrevs, revs, size):
     """takes a quick sample of size <size>
 
     It is meant for initial sampling and focuses on querying heads and close
     ancestors of heads.
 
     :dag: a dag object
-    :nodes: set of nodes to discover
+    :headrevs: set of head revisions in local DAG to consider
+    :revs: set of revs to discover
     :size: the maximum size of the sample"""
-    sample = dag.headsetofconnecteds(nodes)
+    sample = set(repo.revs('heads(%ld)', revs))
+
     if len(sample) >= size:
         return _limitsample(sample, size)
-    _updatesample(dag, None, sample, quicksamplesize=size)
+
+    _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
+                  quicksamplesize=size)
     return sample
 
-def _takefullsample(dag, nodes, size):
-    sample = dag.headsetofconnecteds(nodes)
+def _takefullsample(repo, headrevs, revs, size):
+    sample = set(repo.revs('heads(%ld)', revs))
+
     # update from heads
-    _updatesample(dag, nodes, sample)
+    revsheads = set(repo.revs('heads(%ld)', revs))
+    _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
+
     # update from roots
-    _updatesample(dag.inverse(), nodes, sample)
+    revsroots = set(repo.revs('roots(%ld)', revs))
+
+    # _updatesample() essentially does interaction over revisions to look up
+    # their children. This lookup is expensive and doing it in a loop is
+    # quadratic. We precompute the children for all relevant revisions and
+    # make the lookup in _updatesample() a simple dict lookup.
+    #
+    # Because this function can be called multiple times during discovery, we
+    # may still perform redundant work and there is room to optimize this by
+    # keeping a persistent cache of children across invocations.
+    children = {}
+
+    parentrevs = repo.changelog.parentrevs
+    for rev in repo.changelog.revs(start=min(revsroots)):
+        # Always ensure revision has an entry so we don't need to worry about
+        # missing keys.
+        children.setdefault(rev, [])
+
+        for prev in parentrevs(rev):
+            if prev == nullrev:
+                continue
+
+            children.setdefault(prev, []).append(rev)
+
+    _updatesample(revs, revsroots, sample, children.__getitem__)
     assert sample
     sample = _limitsample(sample, size)
     if len(sample) < size:
         more = size - len(sample)
-        sample.update(random.sample(list(nodes - sample), more))
+        sample.update(random.sample(list(revs - sample), more))
     return sample
 
 def _limitsample(sample, desiredlen):
@@ -142,16 +169,17 @@
 
     roundtrips = 0
     cl = local.changelog
-    localsubset = None
+    clnode = cl.node
+    clrev = cl.rev
+
     if ancestorsof is not None:
-        rev = local.changelog.rev
-        localsubset = [rev(n) for n in ancestorsof]
-    dag = dagutil.revlogdag(cl, localsubset=localsubset)
+        ownheads = [clrev(n) for n in ancestorsof]
+    else:
+        ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
 
     # early exit if we know all the specified remote heads already
     ui.debug("query 1; heads\n")
     roundtrips += 1
-    ownheads = dag.heads()
     sample = _limitsample(ownheads, initialsamplesize)
     # indices between sample and externalized version must match
     sample = list(sample)
@@ -159,7 +187,7 @@
     with remote.commandexecutor() as e:
         fheads = e.callcommand('heads', {})
         fknown = e.callcommand('known', {
-            'nodes': dag.externalizeall(sample),
+            'nodes': [clnode(r) for r in sample],
         })
 
     srvheadhashes, yesno = fheads.result(), fknown.result()
@@ -173,15 +201,25 @@
     # compatibility reasons)
     ui.status(_("searching for changes\n"))
 
-    srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
+    srvheads = []
+    for node in srvheadhashes:
+        if node == nullid:
+            continue
+
+        try:
+            srvheads.append(clrev(node))
+        # Catches unknown and filtered nodes.
+        except error.LookupError:
+            continue
+
     if len(srvheads) == len(srvheadhashes):
         ui.debug("all remote heads known locally\n")
-        return (srvheadhashes, False, srvheadhashes,)
+        return srvheadhashes, False, srvheadhashes
 
     if len(sample) == len(ownheads) and all(yesno):
         ui.note(_("all local heads known remotely\n"))
-        ownheadhashes = dag.externalizeall(ownheads)
-        return (ownheadhashes, True, srvheadhashes,)
+        ownheadhashes = [clnode(r) for r in ownheads]
+        return ownheadhashes, True, srvheadhashes
 
     # full blown discovery
 
@@ -202,7 +240,12 @@
 
         if sample:
             missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
-            missing.update(dag.descendantset(missinginsample, missing))
+
+            if missing:
+                missing.update(local.revs('descendants(%ld) - descendants(%ld)',
+                                          missinginsample, missing))
+            else:
+                missing.update(local.revs('descendants(%ld)', missinginsample))
 
             undecided.difference_update(missing)
 
@@ -224,7 +267,7 @@
         if len(undecided) < targetsize:
             sample = list(undecided)
         else:
-            sample = samplefunc(dag, undecided, targetsize)
+            sample = samplefunc(local, ownheads, undecided, targetsize)
 
         roundtrips += 1
         progress.update(roundtrips)
@@ -235,7 +278,7 @@
 
         with remote.commandexecutor() as e:
             yesno = e.callcommand('known', {
-                'nodes': dag.externalizeall(sample),
+                'nodes': [clnode(r) for r in sample],
             }).result()
 
         full = True
@@ -247,10 +290,8 @@
 
     # heads(common) == heads(common.bases) since common represents common.bases
     # and all its ancestors
-    result = dag.headsetofconnecteds(common.bases)
-    # common.bases can include nullrev, but our contract requires us to not
-    # return any heads in that case, so discard that
-    result.discard(nullrev)
+    # The presence of nullrev will confuse heads(). So filter it out.
+    result = set(local.revs('heads(%ld)', common.bases - {nullrev}))
     elapsed = util.timer() - start
     progress.complete()
     ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
@@ -268,4 +309,5 @@
         return ({nullid}, True, srvheadhashes,)
 
     anyincoming = (srvheadhashes != [nullid])
-    return dag.externalizeall(result), anyincoming, srvheadhashes
+    result = {clnode(r) for r in result}
+    return result, anyincoming, srvheadhashes
--- a/mercurial/simplemerge.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/simplemerge.py	Mon Oct 22 14:46:06 2018 -0400
@@ -58,7 +58,8 @@
     """
     if (aend - astart) != (bend - bstart):
         return False
-    for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
+    for ia, ib in zip(pycompat.xrange(astart, aend),
+                      pycompat.xrange(bstart, bend)):
         if a[ia] != b[ib]:
             return False
     else:
--- a/mercurial/smartset.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/smartset.py	Mon Oct 22 14:46:06 2018 -0400
@@ -137,7 +137,7 @@
 
         This is part of the mandatory API for smartset."""
         # builtin cannot be cached. but do not needs to
-        if cache and util.safehasattr(condition, 'func_code'):
+        if cache and util.safehasattr(condition, '__code__'):
             condition = util.cachefunc(condition)
         return filteredset(self, condition, condrepr)
 
@@ -152,11 +152,11 @@
         # but start > stop is allowed, which should be an empty set.
         ys = []
         it = iter(self)
-        for x in xrange(start):
+        for x in pycompat.xrange(start):
             y = next(it, None)
             if y is None:
                 break
-        for x in xrange(stop - start):
+        for x in pycompat.xrange(stop - start):
             y = next(it, None)
             if y is None:
                 break
@@ -1005,13 +1005,13 @@
             return self.fastdesc()
 
     def fastasc(self):
-        iterrange = xrange(self._start, self._end)
+        iterrange = pycompat.xrange(self._start, self._end)
         if self._hiddenrevs:
             return self._iterfilter(iterrange)
         return iter(iterrange)
 
     def fastdesc(self):
-        iterrange = xrange(self._end - 1, self._start - 1, -1)
+        iterrange = pycompat.xrange(self._end - 1, self._start - 1, -1)
         if self._hiddenrevs:
             return self._iterfilter(iterrange)
         return iter(iterrange)
--- a/mercurial/sparse.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/sparse.py	Mon Oct 22 14:46:06 2018 -0400
@@ -31,9 +31,11 @@
 # a per-repo option, possibly a repo requirement.
 enabled = False
 
-def parseconfig(ui, raw):
+def parseconfig(ui, raw, action):
     """Parse sparse config file content.
 
+    action is the command which is trigerring this read, can be narrow, sparse
+
     Returns a tuple of includes, excludes, and profiles.
     """
     includes = set()
@@ -54,8 +56,8 @@
         elif line == '[include]':
             if havesection and current != includes:
                 # TODO pass filename into this API so we can report it.
-                raise error.Abort(_('sparse config cannot have includes ' +
-                                    'after excludes'))
+                raise error.Abort(_('%(action)s config cannot have includes '
+                                    'after excludes') % {'action': action})
             havesection = True
             current = includes
             continue
@@ -64,14 +66,16 @@
             current = excludes
         elif line:
             if current is None:
-                raise error.Abort(_('sparse config entry outside of '
-                                    'section: %s') % line,
+                raise error.Abort(_('%(action)s config entry outside of '
+                                    'section: %(line)s')
+                                  % {'action': action, 'line': line},
                                   hint=_('add an [include] or [exclude] line '
                                          'to declare the entry type'))
 
             if line.strip().startswith('/'):
-                ui.warn(_('warning: sparse profile cannot use' +
-                          ' paths starting with /, ignoring %s\n') % line)
+                ui.warn(_('warning: %(action)s profile cannot use'
+                          ' paths starting with /, ignoring %(line)s\n')
+                        % {'action': action, 'line': line})
                 continue
             current.add(line)
 
@@ -102,7 +106,7 @@
         raise error.Abort(_('cannot parse sparse patterns from working '
                             'directory'))
 
-    includes, excludes, profiles = parseconfig(repo.ui, raw)
+    includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
     ctx = repo[rev]
 
     if profiles:
@@ -128,7 +132,7 @@
                     repo.ui.debug(msg)
                 continue
 
-            pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw)
+            pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw, 'sparse')
             includes.update(pincludes)
             excludes.update(pexcludes)
             profiles.update(subprofs)
@@ -357,6 +361,11 @@
         elif file in wctx:
             prunedactions[file] = ('r', args, msg)
 
+        if branchmerge and type == mergemod.ACTION_MERGE:
+            f1, f2, fa, move, anc = args
+            if not sparsematch(f1):
+                temporaryfiles.append(f1)
+
     if len(temporaryfiles) > 0:
         repo.ui.status(_('temporarily included %d file(s) in the sparse '
                          'checkout for merging\n') % len(temporaryfiles))
@@ -516,7 +525,7 @@
                                 force=False, removing=False):
     """Update the sparse config and working directory state."""
     raw = repo.vfs.tryread('sparse')
-    oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw)
+    oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, 'sparse')
 
     oldstatus = repo.status()
     oldmatch = matcher(repo)
@@ -556,7 +565,7 @@
     """
     with repo.wlock():
         raw = repo.vfs.tryread('sparse')
-        includes, excludes, profiles = parseconfig(repo.ui, raw)
+        includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
 
         if not includes and not excludes:
             return
@@ -572,7 +581,7 @@
     with repo.wlock():
         # read current configuration
         raw = repo.vfs.tryread('sparse')
-        includes, excludes, profiles = parseconfig(repo.ui, raw)
+        includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse')
         aincludes, aexcludes, aprofiles = activeconfig(repo)
 
         # Import rules on top; only take in rules that are not yet
@@ -582,7 +591,8 @@
             with util.posixfile(util.expandpath(p), mode='rb') as fh:
                 raw = fh.read()
 
-            iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw)
+            iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw,
+                                                          'sparse')
             oldsize = len(includes) + len(excludes) + len(profiles)
             includes.update(iincludes - aincludes)
             excludes.update(iexcludes - aexcludes)
@@ -615,7 +625,8 @@
     """
     with repo.wlock():
         raw = repo.vfs.tryread('sparse')
-        oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw)
+        oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw,
+                                                          'sparse')
 
         if reset:
             newinclude = set()
--- a/mercurial/sshpeer.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/sshpeer.py	Mon Oct 22 14:46:06 2018 -0400
@@ -597,7 +597,7 @@
         raise error.RepoError(_('unknown version of SSH protocol: %s') %
                               protoname)
 
-def instance(ui, path, create, intents=None):
+def instance(ui, path, create, intents=None, createopts=None):
     """Create an SSH peer.
 
     The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
@@ -620,6 +620,14 @@
     args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
 
     if create:
+        # We /could/ do this, but only if the remote init command knows how to
+        # handle them. We don't yet make any assumptions about that. And without
+        # querying the remote, there's no way of knowing if the remote even
+        # supports said requested feature.
+        if createopts:
+            raise error.RepoError(_('cannot create remote SSH repositories '
+                                    'with extra options'))
+
         cmd = '%s %s %s' % (sshcmd, args,
             procutil.shellquote('%s init %s' %
                 (_serverquote(remotecmd), _serverquote(remotepath))))
--- a/mercurial/state.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/state.py	Mon Oct 22 14:46:06 2018 -0400
@@ -19,12 +19,13 @@
 
 from __future__ import absolute_import
 
-from .thirdparty import cbor
-
 from . import (
     error,
     util,
 )
+from .utils import (
+    cborutil,
+)
 
 class cmdstate(object):
     """a wrapper class to store the state of commands like `rebase`, `graft`,
@@ -62,7 +63,8 @@
 
         with self._repo.vfs(self.fname, 'wb', atomictemp=True) as fp:
             fp.write('%d\n' % version)
-            cbor.dump(data, fp, canonical=True)
+            for chunk in cborutil.streamencode(data):
+                fp.write(chunk)
 
     def _read(self):
         """reads the state file and returns a dictionary which contain
@@ -73,7 +75,8 @@
             except ValueError:
                 raise error.CorruptedState("unknown version of state file"
                                            " found")
-            return cbor.load(fp)
+
+            return cborutil.decodeall(fp.read())[0]
 
     def delete(self):
         """drop the state file if exists"""
--- a/mercurial/statichttprepo.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/statichttprepo.py	Mon Oct 22 14:46:06 2018 -0400
@@ -19,8 +19,6 @@
     manifest,
     namespaces,
     pathutil,
-    scmutil,
-    store,
     url,
     util,
     vfs as vfsmod,
@@ -136,7 +134,8 @@
     def canpush(self):
         return False
 
-class statichttprepository(localrepo.localrepository):
+class statichttprepository(localrepo.localrepository,
+                           localrepo.revlogfilestorage):
     supported = localrepo.localrepository._basesupported
 
     def __init__(self, ui, path):
@@ -156,7 +155,7 @@
         self.filtername = None
 
         try:
-            requirements = scmutil.readrequires(self.vfs, self.supported)
+            requirements = set(self.vfs.read(b'requires').splitlines())
         except IOError as inst:
             if inst.errno != errno.ENOENT:
                 raise
@@ -174,15 +173,21 @@
                 msg = _("'%s' does not appear to be an hg repository") % path
                 raise error.RepoError(msg)
 
+        supportedrequirements = localrepo.gathersupportedrequirements(ui)
+        localrepo.ensurerequirementsrecognized(requirements,
+                                               supportedrequirements)
+        localrepo.ensurerequirementscompatible(ui, requirements)
+
         # setup store
-        self.store = store.store(requirements, self.path, vfsclass)
+        self.store = localrepo.makestore(requirements, self.path, vfsclass)
         self.spath = self.store.path
         self.svfs = self.store.opener
         self.sjoin = self.store.join
         self._filecache = {}
         self.requirements = requirements
 
-        self.manifestlog = manifest.manifestlog(self.svfs, self)
+        rootmanifest = manifest.manifestrevlog(self.svfs)
+        self.manifestlog = manifest.manifestlog(self.svfs, self, rootmanifest)
         self.changelog = changelog.changelog(self.svfs)
         self._tags = None
         self.nodetagscache = None
@@ -215,7 +220,7 @@
     def _writecaches(self):
         pass # statichttprepository are read only
 
-def instance(ui, path, create, intents=None):
+def instance(ui, path, create, intents=None, createopts=None):
     if create:
         raise error.Abort(_('cannot create new static-http repository'))
     return statichttprepository(ui, path[7:])
--- a/mercurial/statprof.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/statprof.py	Mon Oct 22 14:46:06 2018 -0400
@@ -125,13 +125,21 @@
 
 __all__ = ['start', 'stop', 'reset', 'display', 'profile']
 
-skips = {"util.py:check", "extensions.py:closure",
-         "color.py:colorcmd", "dispatch.py:checkargs",
-         "dispatch.py:<lambda>", "dispatch.py:_runcatch",
-         "dispatch.py:_dispatch", "dispatch.py:_runcommand",
-         "pager.py:pagecmd", "dispatch.py:run",
-         "dispatch.py:dispatch", "dispatch.py:runcommand",
-         "hg.py:<module>", "evolve.py:warnobserrors",
+skips = {
+    r"util.py:check",
+    r"extensions.py:closure",
+    r"color.py:colorcmd",
+    r"dispatch.py:checkargs",
+    r"dispatch.py:<lambda>",
+    r"dispatch.py:_runcatch",
+    r"dispatch.py:_dispatch",
+    r"dispatch.py:_runcommand",
+    r"pager.py:pagecmd",
+    r"dispatch.py:run",
+    r"dispatch.py:dispatch",
+    r"dispatch.py:runcommand",
+    r"hg.py:<module>",
+    r"evolve.py:warnobserrors",
 }
 
 ###########################################################################
@@ -249,6 +257,9 @@
     def filename(self):
         return os.path.basename(self.path)
 
+    def skipname(self):
+        return r'%s:%s' % (self.filename(), self.function)
+
 class Sample(object):
     __slots__ = (u'stack', u'time')
 
@@ -352,11 +363,11 @@
     with open(path, 'w+') as file:
         file.write("%f %f\n" % state.accumulated_time)
         for sample in state.samples:
-            time = str(sample.time)
+            time = sample.time
             stack = sample.stack
-            sites = ['\1'.join([s.path, str(s.lineno), s.function])
+            sites = ['\1'.join([s.path, b'%d' % s.lineno, s.function])
                      for s in stack]
-            file.write(time + '\0' + '\0'.join(sites) + '\n')
+            file.write("%d\0%s\n" % (time, '\0'.join(sites)))
 
 def load_data(path):
     lines = open(path, 'r').read().splitlines()
@@ -461,7 +472,7 @@
         import sys
         fp = sys.stdout
     if len(data.samples) == 0:
-        print('No samples recorded.', file=fp)
+        fp.write(b'No samples recorded.\n')
         return
 
     if format == DisplayFormats.ByLine:
@@ -482,10 +493,9 @@
         raise Exception("Invalid display format")
 
     if format not in (DisplayFormats.Json, DisplayFormats.Chrome):
-        print('---', file=fp)
-        print('Sample count: %d' % len(data.samples), file=fp)
-        print('Total time: %f seconds (%f wall)' % data.accumulated_time,
-              file=fp)
+        fp.write(b'---\n')
+        fp.write(b'Sample count: %d\n' % len(data.samples))
+        fp.write(b'Total time: %f seconds (%f wall)\n' % data.accumulated_time)
 
 def display_by_line(data, fp):
     '''Print the profiler data with each sample line represented
@@ -493,34 +503,34 @@
     stats = SiteStats.buildstats(data.samples)
     stats.sort(reverse=True, key=lambda x: x.selfseconds())
 
-    print('%5.5s %10.10s   %7.7s  %-8.8s' %
-          ('%  ', 'cumulative', 'self', ''), file=fp)
-    print('%5.5s  %9.9s  %8.8s  %-8.8s' %
-          ("time", "seconds", "seconds", "name"), file=fp)
+    fp.write(b'%5.5s %10.10s   %7.7s  %-8.8s\n' % (
+        b'%  ', b'cumulative', b'self', b''))
+    fp.write(b'%5.5s  %9.9s  %8.8s  %-8.8s\n' % (
+        b"time", b"seconds", b"seconds", b"name"))
 
     for stat in stats:
         site = stat.site
-        sitelabel = '%s:%d:%s' % (site.filename(), site.lineno, site.function)
-        print('%6.2f %9.2f %9.2f  %s' % (stat.selfpercent(),
-                                         stat.totalseconds(),
-                                         stat.selfseconds(),
-                                         sitelabel),
-              file=fp)
+        sitelabel = '%s:%d:%s' % (pycompat.fsencode(site.filename()),
+                                  site.lineno,
+                                  pycompat.sysbytes(site.function))
+        fp.write(b'%6.2f %9.2f %9.2f  %s\n' % (
+            stat.selfpercent(), stat.totalseconds(),
+            stat.selfseconds(), sitelabel))
 
 def display_by_method(data, fp):
     '''Print the profiler data with each sample function represented
     as one row in a table.  Important lines within that function are
     output as nested rows.  Sorted by self-time per line.'''
-    print('%5.5s %10.10s   %7.7s  %-8.8s' %
-          ('%  ', 'cumulative', 'self', ''), file=fp)
-    print('%5.5s  %9.9s  %8.8s  %-8.8s' %
-          ("time", "seconds", "seconds", "name"), file=fp)
+    fp.write(b'%5.5s %10.10s   %7.7s  %-8.8s\n' %
+          ('%  ', 'cumulative', 'self', ''))
+    fp.write(b'%5.5s  %9.9s  %8.8s  %-8.8s\n' %
+          ("time", "seconds", "seconds", "name"))
 
     stats = SiteStats.buildstats(data.samples)
 
     grouped = defaultdict(list)
     for stat in stats:
-        grouped[stat.site.filename() + ":" + stat.site.function].append(stat)
+        grouped[stat.site.filename() + r":" + stat.site.function].append(stat)
 
     # compute sums for each function
     functiondata = []
@@ -545,20 +555,24 @@
     for function in functiondata:
         if function[3] < 0.05:
             continue
-        print('%6.2f %9.2f %9.2f  %s' % (function[3], # total percent
-                                         function[1], # total cum sec
-                                         function[2], # total self sec
-                                         function[0]), # file:function
-              file=fp)
+        fp.write(b'%6.2f %9.2f %9.2f  %s\n' % (
+            function[3], # total percent
+            function[1], # total cum sec
+            function[2], # total self sec
+            pycompat.sysbytes(function[0]))) # file:function
+
         function[4].sort(reverse=True, key=lambda i: i.selfseconds())
         for stat in function[4]:
             # only show line numbers for significant locations (>1% time spent)
             if stat.selfpercent() > 1:
                 source = stat.site.getsource(25)
+                if sys.version_info.major >= 3 and not isinstance(source, bytes):
+                    source = pycompat.bytestr(source)
+
                 stattuple = (stat.selfpercent(), stat.selfseconds(),
                              stat.site.lineno, source)
 
-                print('%33.0f%% %6.2f   line %s: %s' % (stattuple), file=fp)
+                fp.write(b'%33.0f%% %6.2f   line %d: %s\n' % stattuple)
 
 def display_about_method(data, fp, function=None, **kwargs):
     if function is None:
@@ -592,9 +606,12 @@
     parents = [(parent, count) for parent, count in parents.iteritems()]
     parents.sort(reverse=True, key=lambda x: x[1])
     for parent, count in parents:
-        print('%6.2f%%   %s:%s   line %s: %s' %
-            (count / relevant_samples * 100, parent.filename(),
-            parent.function, parent.lineno, parent.getsource(50)), file=fp)
+        fp.write(b'%6.2f%%   %s:%s   line %s: %s\n' %
+            (count / relevant_samples * 100,
+             pycompat.fsencode(parent.filename()),
+             pycompat.sysbytes(parent.function),
+             parent.lineno,
+             pycompat.sysbytes(parent.getsource(50))))
 
     stats = SiteStats.buildstats(data.samples)
     stats = [s for s in stats
@@ -611,23 +628,23 @@
         total_self_percent += stat.selfpercent()
         total_cum_percent += stat.totalpercent()
 
-    print(
-        '\n    %s:%s    Total: %0.2fs (%0.2f%%)    Self: %0.2fs (%0.2f%%)\n' %
-        (
-        filename or '___',
-        function,
+    fp.write(
+        b'\n    %s:%s    Total: %0.2fs (%0.2f%%)    Self: %0.2fs (%0.2f%%)\n\n'
+        % (
+        pycompat.sysbytes(filename or '___'),
+        pycompat.sysbytes(function),
         total_cum_sec,
         total_cum_percent,
         total_self_sec,
         total_self_percent
-        ), file=fp)
+        ))
 
     children = [(child, count) for child, count in children.iteritems()]
     children.sort(reverse=True, key=lambda x: x[1])
     for child, count in children:
-        print('        %6.2f%%   line %s: %s' %
+        fp.write(b'        %6.2f%%   line %s: %s\n' %
               (count / relevant_samples * 100, child.lineno,
-               child.getsource(50)), file=fp)
+               pycompat.sysbytes(child.getsource(50))))
 
 def display_hotpath(data, fp, limit=0.05, **kwargs):
     class HotNode(object):
@@ -647,7 +664,7 @@
             if len(stack) > 1:
                 i = 1
                 # Skip boiler plate parts of the stack
-                while i < len(stack) and '%s:%s' % (stack[i].filename(), stack[i].function) in skips:
+                while i < len(stack) and stack[i].skipname() in skips:
                     i += 1
                 if i < len(stack):
                     child.add(stack[i:], time)
@@ -688,7 +705,7 @@
             # Make frames that didn't actually perform work dark grey
             elif node.count - childrensamples == 0:
                 finalstring = '\033[90m' + finalstring + '\033[0m'
-            print(finalstring, file=fp)
+            fp.write(finalstring + b'\n')
 
         newdepth = depth
         if len(visiblechildren) > 1 or multiple_siblings:
@@ -705,9 +722,8 @@
     if scriptpath is None:
         scriptpath = encoding.environ['HOME'] + '/flamegraph.pl'
     if not os.path.exists(scriptpath):
-        print("error: missing %s" % scriptpath, file=fp)
-        print("get it here: https://github.com/brendangregg/FlameGraph",
-              file=fp)
+        fp.write(b'error: missing %s\n' % scriptpath)
+        fp.write(b'get it here: https://github.com/brendangregg/FlameGraph\n')
         return
 
     fd, path = pycompat.mkstemp()
@@ -725,7 +741,7 @@
             lines[line] = 1
 
     for line, count in lines.iteritems():
-        file.write("%s %s\n" % (line, count))
+        file.write("%s %d\n" % (line, count))
 
     file.close()
 
@@ -733,7 +749,7 @@
         outputfile = '~/flamegraph.svg'
 
     os.system("perl ~/flamegraph.pl %s > %s" % (path, outputfile))
-    print("Written to %s" % outputfile, file=fp)
+    fp.write(b'Written to %s\n' % outputfile)
 
 _pathcache = {}
 def simplifypath(path):
@@ -763,7 +779,11 @@
 
         samples.append((sample.time, stack))
 
-    print(json.dumps(samples), file=fp)
+    data = json.dumps(samples)
+    if not isinstance(data, bytes):
+        data = data.encode('utf-8')
+
+    fp.write(data)
 
 def write_to_chrome(data, fp, minthreshold=0.005, maxthreshold=0.999):
     samples = []
@@ -861,7 +881,7 @@
     fp.write('\n')
 
 def printusage():
-    print("""
+    print(r"""
 The statprof command line allows you to inspect the last profile's results in
 the following forms:
 
@@ -892,17 +912,17 @@
 
     optstart = 2
     displayargs['function'] = None
-    if argv[1] == 'hotpath':
+    if argv[1] == r'hotpath':
         displayargs['format'] = DisplayFormats.Hotpath
-    elif argv[1] == 'lines':
+    elif argv[1] == r'lines':
         displayargs['format'] = DisplayFormats.ByLine
-    elif argv[1] == 'functions':
+    elif argv[1] == r'functions':
         displayargs['format'] = DisplayFormats.ByMethod
-    elif argv[1] == 'function':
+    elif argv[1] == r'function':
         displayargs['format'] = DisplayFormats.AboutMethod
         displayargs['function'] = argv[2]
         optstart = 3
-    elif argv[1] == 'flame':
+    elif argv[1] == r'flame':
         displayargs['format'] = DisplayFormats.FlameGraph
     else:
         printusage()
@@ -920,22 +940,22 @@
     displayargs['limit'] = 0.05
     path = None
     for o, value in opts:
-        if o in ("-l", "--limit"):
+        if o in (r"-l", r"--limit"):
             displayargs['limit'] = float(value)
-        elif o in ("-f", "--file"):
+        elif o in (r"-f", r"--file"):
             path = value
-        elif o in ("-o", "--output-file"):
+        elif o in (r"-o", r"--output-file"):
             displayargs['outputfile'] = value
-        elif o in ("-p", "--script-path"):
+        elif o in (r"-p", r"--script-path"):
             displayargs['scriptpath'] = value
-        elif o in ("-h", "help"):
+        elif o in (r"-h", r"help"):
             printusage()
             return 0
         else:
             assert False, "unhandled option %s" % o
 
     if not path:
-        print('must specify --file to load')
+        print(r'must specify --file to load')
         return 1
 
     load_data(path=path)
@@ -944,5 +964,5 @@
 
     return 0
 
-if __name__ == "__main__":
+if __name__ == r"__main__":
     sys.exit(main())
--- a/mercurial/store.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/store.py	Mon Oct 22 14:46:06 2018 -0400
@@ -118,7 +118,7 @@
     def decode(s):
         i = 0
         while i < len(s):
-            for l in xrange(1, 4):
+            for l in pycompat.xrange(1, 4):
                 try:
                     yield dmap[s[i:i + l]]
                     i += l
@@ -127,7 +127,8 @@
                     pass
             else:
                 raise KeyError
-    return (lambda s: ''.join([cmap[s[c:c + 1]] for c in xrange(len(s))]),
+    return (lambda s: ''.join([cmap[s[c:c + 1]]
+                               for c in pycompat.xrange(len(s))]),
             lambda s: ''.join(list(decode(s))))
 
 _encodefname, _decodefname = _buildencodefun()
@@ -159,7 +160,7 @@
     'the~07quick~adshot'
     '''
     xchr = pycompat.bytechr
-    cmap = dict([(xchr(x), xchr(x)) for x in xrange(127)])
+    cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
     for x in _reserved():
         cmap[xchr(x)] = "~%02x" % x
     for x in range(ord("A"), ord("Z") + 1):
@@ -316,8 +317,8 @@
         mode = None
     return mode
 
-_data = ('data meta 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
-         ' phaseroots obsstore')
+_data = ('narrowspec data meta 00manifest.d 00manifest.i'
+         ' 00changelog.d 00changelog.i phaseroots obsstore')
 
 def isrevlog(f, kind, st):
     return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
@@ -358,17 +359,21 @@
         l.sort()
         return l
 
-    def datafiles(self):
+    def datafiles(self, matcher=None):
         return self._walk('data', True) + self._walk('meta', True)
 
     def topfiles(self):
         # yield manifest before changelog
         return reversed(self._walk('', False))
 
-    def walk(self):
-        '''yields (unencoded, encoded, size)'''
+    def walk(self, matcher=None):
+        '''yields (unencoded, encoded, size)
+
+        if a matcher is passed, storage files of only those tracked paths
+        are passed with matches the matcher
+        '''
         # yield data files first
-        for x in self.datafiles():
+        for x in self.datafiles(matcher):
             yield x
         for x in self.topfiles():
             yield x
@@ -406,7 +411,7 @@
         self.vfs = vfsmod.filtervfs(vfs, encodefilename)
         self.opener = self.vfs
 
-    def datafiles(self):
+    def datafiles(self, matcher=None):
         for a, b, size in super(encodedstore, self).datafiles():
             try:
                 a = decodefilename(a)
@@ -535,7 +540,7 @@
     def getsize(self, path):
         return self.rawvfs.stat(path).st_size
 
-    def datafiles(self):
+    def datafiles(self, matcher=None):
         for f in sorted(self.fncache):
             ef = self.encode(f)
             try:
@@ -545,7 +550,7 @@
                     raise
 
     def copylist(self):
-        d = ('data meta dh fncache phaseroots obsstore'
+        d = ('narrowspec data meta dh fncache phaseroots obsstore'
              ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
         return (['requires', '00changelog.i'] +
                 ['store/' + f for f in d.split()])
@@ -584,10 +589,3 @@
             if e.startswith(path) and self._exists(e):
                 return True
         return False
-
-def store(requirements, path, vfstype):
-    if 'store' in requirements:
-        if 'fncache' in requirements:
-            return fncachestore(path, vfstype, 'dotencode' in requirements)
-        return encodedstore(path, vfstype)
-    return basicstore(path, vfstype)
--- a/mercurial/streamclone.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/streamclone.py	Mon Oct 22 14:46:06 2018 -0400
@@ -10,15 +10,16 @@
 import contextlib
 import os
 import struct
-import warnings
 
 from .i18n import _
 from . import (
     branchmap,
     cacheutil,
     error,
+    narrowspec,
     phases,
     pycompat,
+    repository,
     store,
     util,
 )
@@ -114,6 +115,8 @@
     A legacy stream clone will not be performed if a bundle2 stream clone is
     supported.
     """
+    from . import localrepo
+
     supported, requirements = canperformstreamclone(pullop)
 
     if not supported:
@@ -166,7 +169,8 @@
         # requirements from the streamed-in repository
         repo.requirements = requirements | (
                 repo.requirements - repo.supportedformats)
-        repo._applyopenerreqs()
+        repo.svfs.options = localrepo.resolvestorevfsoptions(
+            repo.ui, repo.requirements, repo.features)
         repo._writerequirements()
 
         if rbranchmap:
@@ -176,6 +180,9 @@
 
 def allowservergeneration(repo):
     """Whether streaming clones are allowed from the server."""
+    if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
+        return False
+
     if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
         return False
 
@@ -188,8 +195,8 @@
     return True
 
 # This is it's own function so extensions can override it.
-def _walkstreamfiles(repo):
-    return repo.store.walk()
+def _walkstreamfiles(repo, matcher=None):
+    return repo.store.walk(matcher)
 
 def generatev1(repo):
     """Emit content for version 1 of a streaming clone.
@@ -358,7 +365,7 @@
 
         with repo.transaction('clone'):
             with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
-                for i in xrange(filecount):
+                for i in pycompat.xrange(filecount):
                     # XXX doesn't support '\n' or '\r' in filenames
                     l = fp.readline()
                     try:
@@ -525,7 +532,7 @@
             finally:
                 fp.close()
 
-def generatev2(repo):
+def generatev2(repo, includes, excludes, includeobsmarkers):
     """Emit content for version 2 of a streaming clone.
 
     the data stream consists the following entries:
@@ -538,13 +545,21 @@
     Returns a 3-tuple of (file count, file size, data iterator).
     """
 
+    # temporarily raise error until we add storage level logic
+    if includes or excludes:
+        raise error.Abort(_("server does not support narrow stream clones"))
+
     with repo.lock():
 
         entries = []
         totalfilesize = 0
 
+        matcher = None
+        if includes or excludes:
+            matcher = narrowspec.match(repo.root, includes, excludes)
+
         repo.ui.debug('scanning\n')
-        for name, ename, size in _walkstreamfiles(repo):
+        for name, ename, size in _walkstreamfiles(repo, matcher):
             if size:
                 entries.append((_srcstore, name, _fileappend, size))
                 totalfilesize += size
@@ -552,6 +567,9 @@
             if repo.svfs.exists(name):
                 totalfilesize += repo.svfs.lstat(name).st_size
                 entries.append((_srcstore, name, _filefull, None))
+        if includeobsmarkers and repo.svfs.exists('obsstore'):
+            totalfilesize += repo.svfs.lstat('obsstore').st_size
+            entries.append((_srcstore, 'obsstore', _filefull, None))
         for name in cacheutil.cachetocopy(repo):
             if repo.cachevfs.exists(name):
                 totalfilesize += repo.cachevfs.lstat(name).st_size
@@ -565,12 +583,13 @@
 
 @contextlib.contextmanager
 def nested(*ctxs):
-    with warnings.catch_warnings():
-        # For some reason, Python decided 'nested' was deprecated without
-        # replacement. They officially advertised for filtering the deprecation
-        # warning for people who actually need the feature.
-        warnings.filterwarnings("ignore",category=DeprecationWarning)
-        with contextlib.nested(*ctxs):
+    this = ctxs[0]
+    rest = ctxs[1:]
+    with this:
+        if rest:
+            with nested(*rest):
+                yield
+        else:
             yield
 
 def consumev2(repo, fp, filecount, filesize):
@@ -624,6 +643,8 @@
         progress.complete()
 
 def applybundlev2(repo, fp, filecount, filesize, requirements):
+    from . import localrepo
+
     missingreqs = [r for r in requirements if r not in repo.supported]
     if missingreqs:
         raise error.Abort(_('unable to apply stream clone: '
@@ -637,5 +658,6 @@
     # requirements from the streamed-in repository
     repo.requirements = set(requirements) | (
             repo.requirements - repo.supportedformats)
-    repo._applyopenerreqs()
+    repo.svfs.options = localrepo.resolvestorevfsoptions(
+        repo.ui, repo.requirements, repo.features)
     repo._writerequirements()
--- a/mercurial/subrepo.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/subrepo.py	Mon Oct 22 14:46:06 2018 -0400
@@ -951,9 +951,11 @@
             env['LANG'] = lc_all
             del env['LC_ALL']
         env['LC_MESSAGES'] = 'C'
-        p = subprocess.Popen(cmd, bufsize=-1, close_fds=procutil.closefds,
+        p = subprocess.Popen(pycompat.rapply(procutil.tonativestr, cmd),
+                             bufsize=-1, close_fds=procutil.closefds,
                              stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                              universal_newlines=True, env=env, **extrakw)
+                             universal_newlines=True,
+                             env=procutil.tonativeenv(env), **extrakw)
         stdout, stderr = p.communicate()
         stderr = stderr.strip()
         if not failok:
@@ -1268,8 +1270,12 @@
             # insert the argument in the front,
             # the end of git diff arguments is used for paths
             commands.insert(1, '--color')
-        p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
-                             cwd=cwd, env=env, close_fds=procutil.closefds,
+        p = subprocess.Popen(pycompat.rapply(procutil.tonativestr,
+                                             [self._gitexecutable] + commands),
+                             bufsize=-1,
+                             cwd=pycompat.rapply(procutil.tonativestr, cwd),
+                             env=procutil.tonativeenv(env),
+                             close_fds=procutil.closefds,
                              stdout=subprocess.PIPE, stderr=errpipe)
         if stream:
             return p.stdout, None
@@ -1577,17 +1583,15 @@
         if self._gitmissing():
             return []
 
-        (modified, added, removed,
-         deleted, unknown, ignored, clean) = self.status(None, unknown=True,
-                                                         clean=True)
+        s = self.status(None, unknown=True, clean=True)
 
         tracked = set()
         # dirstates 'amn' warn, 'r' is added again
-        for l in (modified, added, deleted, clean):
+        for l in (s.modified, s.added, s.deleted, s.clean):
             tracked.update(l)
 
         # Unknown files not of interest will be rejected by the matcher
-        files = unknown
+        files = s.unknown
         files.extend(match.files())
 
         rejected = []
--- a/mercurial/templatefilters.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/templatefilters.py	Mon Oct 22 14:46:06 2018 -0400
@@ -119,7 +119,7 @@
             b = b[:len(a)]
         if a == b:
             return a
-        for i in xrange(len(a)):
+        for i in pycompat.xrange(len(a)):
             if a[i] != b[i]:
                 return a[:i]
         return a
@@ -200,7 +200,7 @@
             if not m:
                 uctext = encoding.unifromlocal(text[start:])
                 w = len(uctext)
-                while 0 < w and uctext[w - 1].isspace():
+                while w > 0 and uctext[w - 1].isspace():
                     w -= 1
                 yield (encoding.unitolocal(uctext[:w]),
                        encoding.unitolocal(uctext[w:]))
@@ -266,7 +266,7 @@
     num_lines = len(lines)
     endswithnewline = text[-1:] == '\n'
     def indenter():
-        for i in xrange(num_lines):
+        for i in pycompat.xrange(num_lines):
             l = lines[i]
             if i and l.strip():
                 yield prefix
--- a/mercurial/templatefuncs.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/templatefuncs.py	Mon Oct 22 14:46:06 2018 -0400
@@ -140,7 +140,7 @@
     ctx = context.resource(mapping, 'ctx')
     m = ctx.match([raw])
     files = list(ctx.matches(m))
-    return templateutil.compatlist(context, mapping, "file", files)
+    return templateutil.compatfileslist(context, mapping, "file", files)
 
 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
 def fill(context, mapping, args):
@@ -216,8 +216,9 @@
 
     return stringutil.mapname(cache['mailmap'], author)
 
-@templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
-              argspec='text width fillchar left')
+@templatefunc(
+    'pad(text, width[, fillchar=\' \'[, left=False[, truncate=False]]])',
+    argspec='text width fillchar left truncate')
 def pad(context, mapping, args):
     """Pad text with a
     fill character."""
@@ -231,6 +232,7 @@
 
     text = evalstring(context, mapping, args['text'])
 
+    truncate = False
     left = False
     fillchar = ' '
     if 'fillchar' in args:
@@ -240,8 +242,12 @@
             raise error.ParseError(_("pad() expects a single fill character"))
     if 'left' in args:
         left = evalboolean(context, mapping, args['left'])
+    if 'truncate' in args:
+        truncate = evalboolean(context, mapping, args['truncate'])
 
     fillwidth = width - encoding.colwidth(color.stripeffects(text))
+    if fillwidth < 0 and truncate:
+        return encoding.trim(color.stripeffects(text), width, leftside=left)
     if fillwidth <= 0:
         return text
     if left:
@@ -575,7 +581,7 @@
     text = evalstring(context, mapping, args[0])
     style = evalstring(context, mapping, args[1])
 
-    return minirst.format(text, style=style, keep=['verbose'])[0]
+    return minirst.format(text, style=style, keep=['verbose'])
 
 @templatefunc('separate(sep, args...)', argspec='sep *args')
 def separate(context, mapping, args):
@@ -596,7 +602,7 @@
             yield sep
         yield argstr
 
-@templatefunc('shortest(node, minlength=4)', requires={'repo'})
+@templatefunc('shortest(node, minlength=4)', requires={'repo', 'cache'})
 def shortest(context, mapping, args):
     """Obtain the shortest representation of
     a node."""
@@ -629,8 +635,9 @@
             return hexnode
         if not node:
             return hexnode
+    cache = context.resource(mapping, 'cache')
     try:
-        return scmutil.shortesthexnodeidprefix(repo, node, minlength)
+        return scmutil.shortesthexnodeidprefix(repo, node, minlength, cache)
     except error.RepoLookupError:
         return hexnode
 
--- a/mercurial/templatekw.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/templatekw.py	Mon Oct 22 14:46:06 2018 -0400
@@ -11,6 +11,8 @@
 from .node import (
     hex,
     nullid,
+    wdirid,
+    wdirrev,
 )
 
 from . import (
@@ -168,9 +170,8 @@
 
 @templatekeyword('author', requires={'ctx'})
 def showauthor(context, mapping):
-    """String. The unmodified author of the changeset."""
-    ctx = context.resource(mapping, 'ctx')
-    return ctx.user()
+    """Alias for ``{user}``"""
+    return showuser(context, mapping)
 
 @templatekeyword('bisect', requires={'repo', 'ctx'})
 def showbisect(context, mapping):
@@ -292,16 +293,31 @@
     return _hybrid(f, extras, makemap,
                    lambda k: '%s=%s' % (k, stringutil.escapestr(extras[k])))
 
-def _showfilesbystat(context, mapping, name, index):
-    repo = context.resource(mapping, 'repo')
+def _getfilestatus(context, mapping, listall=False):
     ctx = context.resource(mapping, 'ctx')
     revcache = context.resource(mapping, 'revcache')
-    if 'files' not in revcache:
-        revcache['files'] = repo.status(ctx.p1(), ctx)[:3]
-    files = revcache['files'][index]
-    return compatlist(context, mapping, name, files, element='file')
+    if 'filestatus' not in revcache or revcache['filestatusall'] < listall:
+        stat = ctx.p1().status(ctx, listignored=listall, listclean=listall,
+                               listunknown=listall)
+        revcache['filestatus'] = stat
+        revcache['filestatusall'] = listall
+    return revcache['filestatus']
 
-@templatekeyword('file_adds', requires={'repo', 'ctx', 'revcache'})
+def _getfilestatusmap(context, mapping, listall=False):
+    revcache = context.resource(mapping, 'revcache')
+    if 'filestatusmap' not in revcache or revcache['filestatusall'] < listall:
+        stat = _getfilestatus(context, mapping, listall=listall)
+        revcache['filestatusmap'] = statmap = {}
+        for char, files in zip(pycompat.iterbytestr('MAR!?IC'), stat):
+            statmap.update((f, char) for f in files)
+    return revcache['filestatusmap']  # {path: statchar}
+
+def _showfilesbystat(context, mapping, name, index):
+    stat = _getfilestatus(context, mapping)
+    files = stat[index]
+    return templateutil.compatfileslist(context, mapping, name, files)
+
+@templatekeyword('file_adds', requires={'ctx', 'revcache'})
 def showfileadds(context, mapping):
     """List of strings. Files added by this changeset."""
     return _showfilesbystat(context, mapping, 'file_add', 1)
@@ -325,11 +341,8 @@
             rename = getrenamed(fn, ctx.rev())
             if rename:
                 copies.append((fn, rename))
-
-    copies = util.sortdict(copies)
-    return compatdict(context, mapping, 'file_copy', copies,
-                      key='name', value='source', fmt='%s (%s)',
-                      plural='file_copies')
+    return templateutil.compatfilecopiesdict(context, mapping, 'file_copy',
+                                             copies)
 
 # showfilecopiesswitch() displays file copies only if copy records are
 # provided before calling the templater, usually with a --copies
@@ -340,17 +353,15 @@
     only if the --copied switch is set.
     """
     copies = context.resource(mapping, 'revcache').get('copies') or []
-    copies = util.sortdict(copies)
-    return compatdict(context, mapping, 'file_copy', copies,
-                      key='name', value='source', fmt='%s (%s)',
-                      plural='file_copies')
+    return templateutil.compatfilecopiesdict(context, mapping, 'file_copy',
+                                             copies)
 
-@templatekeyword('file_dels', requires={'repo', 'ctx', 'revcache'})
+@templatekeyword('file_dels', requires={'ctx', 'revcache'})
 def showfiledels(context, mapping):
     """List of strings. Files removed by this changeset."""
     return _showfilesbystat(context, mapping, 'file_del', 2)
 
-@templatekeyword('file_mods', requires={'repo', 'ctx', 'revcache'})
+@templatekeyword('file_mods', requires={'ctx', 'revcache'})
 def showfilemods(context, mapping):
     """List of strings. Files modified by this changeset."""
     return _showfilesbystat(context, mapping, 'file_mod', 0)
@@ -361,7 +372,7 @@
     changeset.
     """
     ctx = context.resource(mapping, 'ctx')
-    return compatlist(context, mapping, 'file', ctx.files())
+    return templateutil.compatfileslist(context, mapping, 'file', ctx.files())
 
 @templatekeyword('graphnode', requires={'repo', 'ctx'})
 def showgraphnode(context, mapping):
@@ -466,14 +477,13 @@
     ctx = context.resource(mapping, 'ctx')
     mnode = ctx.manifestnode()
     if mnode is None:
-        # just avoid crash, we might want to use the 'ff...' hash in future
-        return
-    mrev = repo.manifestlog.rev(mnode)
+        mnode = wdirid
+        mrev = wdirrev
+    else:
+        mrev = repo.manifestlog.rev(mnode)
     mhex = hex(mnode)
     mapping = context.overlaymap(mapping, {'rev': mrev, 'node': mhex})
     f = context.process('manifest', mapping)
-    # TODO: perhaps 'ctx' should be dropped from mapping because manifest
-    # rev and node are completely different from changeset's.
     return templateutil.hybriditem(f, None, f,
                                    lambda x: {'rev': mrev, 'node': mhex})
 
@@ -550,6 +560,12 @@
         return 'obsolete'
     return ''
 
+@templatekeyword('path', requires={'fctx'})
+def showpath(context, mapping):
+    """String. Repository-absolute path of the current file. (EXPERIMENTAL)"""
+    fctx = context.resource(mapping, 'fctx')
+    return fctx.path()
+
 @templatekeyword('peerurls', requires={'repo'})
 def showpeerurls(context, mapping):
     """A dictionary of repository locations defined in the [paths] section
@@ -583,6 +599,25 @@
     repo = context.resource(mapping, 'repo')
     return repo.root
 
+@templatekeyword('size', requires={'fctx'})
+def showsize(context, mapping):
+    """Integer. Size of the current file in bytes. (EXPERIMENTAL)"""
+    fctx = context.resource(mapping, 'fctx')
+    return fctx.size()
+
+# requires 'fctx' to denote {status} depends on (ctx, path) pair
+@templatekeyword('status', requires={'ctx', 'fctx', 'revcache'})
+def showstatus(context, mapping):
+    """String. Status code of the current file. (EXPERIMENTAL)"""
+    path = templateutil.runsymbol(context, mapping, 'path')
+    path = templateutil.stringify(context, mapping, path)
+    if not path:
+        return
+    statmap = _getfilestatusmap(context, mapping)
+    if path not in statmap:
+        statmap = _getfilestatusmap(context, mapping, listall=True)
+    return statmap.get(path)
+
 @templatekeyword("successorssets", requires={'repo', 'ctx'})
 def showsuccessorssets(context, mapping):
     """Returns a string of sets of successors for a changectx. Format used
@@ -758,6 +793,12 @@
     ui = context.resource(mapping, 'ui')
     return ui.termwidth()
 
+@templatekeyword('user', requires={'ctx'})
+def showuser(context, mapping):
+    """String. The unmodified author of the changeset."""
+    ctx = context.resource(mapping, 'ctx')
+    return ctx.user()
+
 @templatekeyword('instabilities', requires={'ctx'})
 def showinstabilities(context, mapping):
     """List of strings. Evolution instabilities affecting the changeset.
--- a/mercurial/templater.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/templater.py	Mon Oct 22 14:46:06 2018 -0400
@@ -548,7 +548,7 @@
     __metaclass__ = abc.ABCMeta
 
     @abc.abstractmethod
-    def availablekeys(self, context, mapping):
+    def availablekeys(self, mapping):
         """Return a set of available resource keys based on the given mapping"""
 
     @abc.abstractmethod
@@ -556,7 +556,7 @@
         """Return a set of supported resource keys"""
 
     @abc.abstractmethod
-    def lookup(self, context, mapping, key):
+    def lookup(self, mapping, key):
         """Return a resource for the key if available; otherwise None"""
 
     @abc.abstractmethod
@@ -565,13 +565,13 @@
         with the given new mapping"""
 
 class nullresourcemapper(resourcemapper):
-    def availablekeys(self, context, mapping):
+    def availablekeys(self, mapping):
         return set()
 
     def knownkeys(self):
         return set()
 
-    def lookup(self, context, mapping, key):
+    def lookup(self, mapping, key):
         return None
 
     def populatemap(self, context, origmapping, newmapping):
@@ -618,7 +618,7 @@
         # do not copy symbols which overrides the defaults depending on
         # new resources, so the defaults will be re-evaluated (issue5612)
         knownres = self._resources.knownkeys()
-        newres = self._resources.availablekeys(self, newmapping)
+        newres = self._resources.availablekeys(newmapping)
         mapping = {k: v for k, v in origmapping.iteritems()
                    if (k in knownres  # not a symbol per self.symbol()
                        or newres.isdisjoint(self._defaultrequires(k)))}
@@ -645,7 +645,7 @@
 
     def availableresourcekeys(self, mapping):
         """Return a set of available resource keys based on the given mapping"""
-        return self._resources.availablekeys(self, mapping)
+        return self._resources.availablekeys(mapping)
 
     def knownresourcekeys(self):
         """Return a set of supported resource keys"""
@@ -654,7 +654,7 @@
     def resource(self, mapping, key):
         """Return internal data (e.g. cache) used for keyword/function
         evaluation"""
-        v = self._resources.lookup(self, mapping, key)
+        v = self._resources.lookup(mapping, key)
         if v is None:
             raise templateutil.ResourceUnavailable(
                 _('template resource not available: %s') % key)
--- a/mercurial/templates/json/map	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/templates/json/map	Mon Oct 22 14:46:06 2018 -0400
@@ -1,4 +1,6 @@
+default = 'shortlog'
 mimetype = 'application/json'
+
 filerevision = '\{
   "node": {node|json},
   "path": {file|json},
@@ -239,3 +241,6 @@
   "lastchange": {lastchange|json},
   "labels": {labels|json}
   }'
+error = '\{
+  "error": {error|utf8|json}
+  }'
--- a/mercurial/templates/map-cmdline.bisect	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/templates/map-cmdline.bisect	Mon Oct 22 14:46:06 2018 -0400
@@ -1,10 +1,10 @@
 %include map-cmdline.default
 
 [templates]
-changeset = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{user}{ldate}{summary}\n'
+changeset = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{summary}\n'
 changeset_quiet = '{lshortbisect} {rev}:{node|short}\n'
-changeset_verbose = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{user}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
-changeset_debug = '{fullcset}{lbisect}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
+changeset_verbose = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
+changeset_debug = '{fullcset}{lbisect}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
 
 # We take the zeroth word in order to omit "(implicit)" in the label
 bisectlabel = ' bisect.{word('0', bisect)}'
--- a/mercurial/templates/map-cmdline.default	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/templates/map-cmdline.default	Mon Oct 22 14:46:06 2018 -0400
@@ -2,10 +2,10 @@
 # to replace some keywords with 'lkeyword', for 'labelled keyword'
 
 [templates]
-changeset = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{ltroubles}{lobsfate}{summary}\n'
+changeset = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{ltroubles}{lobsfate}{summary}\n'
 changeset_quiet = '{lnode}'
-changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{ltroubles}{lobsfate}{lfiles}{lfile_copies_switch}{description}\n'
-changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{ltroubles}{lobsfate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
+changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{ltroubles}{lobsfate}{lfiles}{lfile_copies_switch}{description}\n'
+changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{ltroubles}{lobsfate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n'
 
 # File templates
 lfiles = '{if(files,
@@ -54,8 +54,8 @@
 bookmark = '{label("log.bookmark",
                    "bookmark:    {bookmark}")}\n'
 
-user = '{label("log.user",
-               "user:        {author}")}\n'
+luser = '{label("log.user",
+                "user:        {author}")}\n'
 
 summary = '{if(desc|strip, "{label('log.summary',
                                    'summary:     {desc|firstline}')}\n")}'
@@ -74,7 +74,7 @@
                                 {label('ui.note log.description',
                                        '{desc|strip}')}\n\n")}'
 
-status = '{status} {path}\n{if(copy, "  {copy}\n")}'
+status = '{status} {path|relpath}\n{if(source, "  {source|relpath}\n")}'
 
 # Obsfate templates, it would be removed once we introduce the obsfate
 # template fragment
--- a/mercurial/templates/map-cmdline.phases	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/templates/map-cmdline.phases	Mon Oct 22 14:46:06 2018 -0400
@@ -1,5 +1,5 @@
 %include map-cmdline.default
 
 [templates]
-changeset = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{user}{ldate}{summary}\n'
-changeset_verbose = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{user}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
+changeset = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{luser}{ldate}{summary}\n'
+changeset_verbose = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{luser}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
--- a/mercurial/templates/map-cmdline.status	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/templates/map-cmdline.status	Mon Oct 22 14:46:06 2018 -0400
@@ -2,9 +2,9 @@
 
 [templates]
 # Override base templates
-changeset = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{summary}{lfiles}\n'
-changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{description}{lfiles}\n'
-changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{extras}{description}{lfiles}\n'
+changeset = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{summary}{lfiles}\n'
+changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{description}{lfiles}\n'
+changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{extras}{description}{lfiles}\n'
 
 # Override the file templates
 lfiles = '{if(files,
--- a/mercurial/templateutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/templateutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -570,6 +570,32 @@
     f = _showcompatlist(context, mapping, name, data, plural, separator)
     return hybridlist(data, name=element or name, fmt=fmt, gen=f)
 
+def compatfilecopiesdict(context, mapping, name, copies):
+    """Wrap list of (dest, source) file names to support old-style list
+    template and field names
+
+    This exists for backward compatibility. Use hybriddict for new template
+    keywords.
+    """
+    # no need to provide {path} to old-style list template
+    c = [{'name': k, 'source': v} for k, v in copies]
+    f = _showcompatlist(context, mapping, name, c, plural='file_copies')
+    copies = util.sortdict(copies)
+    return hybrid(f, copies,
+                  lambda k: {'name': k, 'path': k, 'source': copies[k]},
+                  lambda k: '%s (%s)' % (k, copies[k]))
+
+def compatfileslist(context, mapping, name, files):
+    """Wrap list of file names to support old-style list template and field
+    names
+
+    This exists for backward compatibility. Use hybridlist for new template
+    keywords.
+    """
+    f = _showcompatlist(context, mapping, name, files)
+    return hybrid(f, files, lambda x: {'file': x, 'path': x},
+                  pycompat.identity)
+
 def _showcompatlist(context, mapping, name, values, plural=None, separator=' '):
     """Return a generator that renders old-style list template
 
@@ -810,8 +836,9 @@
     return data
 
 def _recursivesymbolblocker(key):
-    def showrecursion(**args):
+    def showrecursion(context, mapping):
         raise error.Abort(_("recursive reference '%s' in template") % key)
+    showrecursion._requires = ()  # mark as new-style templatekw
     return showrecursion
 
 def runsymbol(context, mapping, key, default=''):
@@ -827,12 +854,16 @@
             v = default
     if callable(v) and getattr(v, '_requires', None) is None:
         # old templatekw: expand all keywords and resources
-        # (TODO: deprecate this after porting web template keywords to new API)
-        props = {k: context._resources.lookup(context, mapping, k)
+        # (TODO: drop support for old-style functions. 'f._requires = ()'
+        #  can be removed.)
+        props = {k: context._resources.lookup(mapping, k)
                  for k in context._resources.knownkeys()}
         # pass context to _showcompatlist() through templatekw._showlist()
         props['templ'] = context
         props.update(mapping)
+        ui = props.get('ui')
+        if ui:
+            ui.deprecwarn("old-style template keyword '%s'" % key, '4.8')
         return v(**pycompat.strkwargs(props))
     if callable(v):
         # new templatekw
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/testing/storage.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,1345 @@
+# storage.py - Testing of storage primitives.
+#
+# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import unittest
+
+from ..node import (
+    hex,
+    nullid,
+    nullrev,
+)
+from .. import (
+    error,
+    mdiff,
+    repository,
+)
+from ..utils import (
+    storageutil,
+)
+
+class basetestcase(unittest.TestCase):
+    if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
+        assertRaisesRegex = (# camelcase-required
+            unittest.TestCase.assertRaisesRegexp)
+
+class ifileindextests(basetestcase):
+    """Generic tests for the ifileindex interface.
+
+    All file storage backends for index data should conform to the tests in this
+    class.
+
+    Use ``makeifileindextests()`` to create an instance of this type.
+    """
+    def testempty(self):
+        f = self._makefilefn()
+        self.assertEqual(len(f), 0, 'new file store has 0 length by default')
+        self.assertEqual(list(f), [], 'iter yields nothing by default')
+
+        gen = iter(f)
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        self.assertFalse(f.hasnode(None))
+        self.assertFalse(f.hasnode(0))
+        self.assertFalse(f.hasnode(nullrev))
+        self.assertFalse(f.hasnode(nullid))
+        self.assertFalse(f.hasnode(b'0'))
+        self.assertFalse(f.hasnode(b'a' * 20))
+
+        # revs() should evaluate to an empty list.
+        self.assertEqual(list(f.revs()), [])
+
+        revs = iter(f.revs())
+        with self.assertRaises(StopIteration):
+            next(revs)
+
+        self.assertEqual(list(f.revs(start=20)), [])
+
+        # parents() and parentrevs() work with nullid/nullrev.
+        self.assertEqual(f.parents(nullid), (nullid, nullid))
+        self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
+
+        with self.assertRaises(error.LookupError):
+            f.parents(b'\x01' * 20)
+
+        for i in range(-5, 5):
+            if i == nullrev:
+                continue
+
+            with self.assertRaises(IndexError):
+                f.parentrevs(i)
+
+        # nullid/nullrev lookup always works.
+        self.assertEqual(f.rev(nullid), nullrev)
+        self.assertEqual(f.node(nullrev), nullid)
+
+        with self.assertRaises(error.LookupError):
+            f.rev(b'\x01' * 20)
+
+        for i in range(-5, 5):
+            if i == nullrev:
+                continue
+
+            with self.assertRaises(IndexError):
+                f.node(i)
+
+        self.assertEqual(f.lookup(nullid), nullid)
+        self.assertEqual(f.lookup(nullrev), nullid)
+        self.assertEqual(f.lookup(hex(nullid)), nullid)
+        self.assertEqual(f.lookup(b'%d' % nullrev), nullid)
+
+        with self.assertRaises(error.LookupError):
+            f.lookup(b'badvalue')
+
+        with self.assertRaises(error.LookupError):
+            f.lookup(hex(nullid)[0:12])
+
+        with self.assertRaises(error.LookupError):
+            f.lookup(b'-2')
+
+        with self.assertRaises(error.LookupError):
+            f.lookup(b'0')
+
+        with self.assertRaises(error.LookupError):
+            f.lookup(b'1')
+
+        with self.assertRaises(error.LookupError):
+            f.lookup(b'11111111111111111111111111111111111111')
+
+        for i in range(-5, 5):
+            if i == nullrev:
+                continue
+
+            with self.assertRaises(LookupError):
+                f.lookup(i)
+
+        self.assertEqual(f.linkrev(nullrev), nullrev)
+
+        for i in range(-5, 5):
+            if i == nullrev:
+                continue
+
+            with self.assertRaises(IndexError):
+                f.linkrev(i)
+
+        self.assertFalse(f.iscensored(nullrev))
+
+        for i in range(-5, 5):
+            if i == nullrev:
+                continue
+
+            with self.assertRaises(IndexError):
+                f.iscensored(i)
+
+        self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
+
+        with self.assertRaises(ValueError):
+            self.assertEqual(list(f.descendants([])), [])
+
+        self.assertEqual(list(f.descendants([nullrev])), [])
+
+        self.assertEqual(f.heads(), [nullid])
+        self.assertEqual(f.heads(nullid), [nullid])
+        self.assertEqual(f.heads(None, [nullid]), [nullid])
+        self.assertEqual(f.heads(nullid, [nullid]), [nullid])
+
+        self.assertEqual(f.children(nullid), [])
+
+        with self.assertRaises(error.LookupError):
+            f.children(b'\x01' * 20)
+
+    def testsinglerevision(self):
+        f = self._makefilefn()
+        with self._maketransactionfn() as tr:
+            node = f.add(b'initial', None, tr, 0, nullid, nullid)
+
+        self.assertEqual(len(f), 1)
+        self.assertEqual(list(f), [0])
+
+        gen = iter(f)
+        self.assertEqual(next(gen), 0)
+
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        self.assertTrue(f.hasnode(node))
+        self.assertFalse(f.hasnode(hex(node)))
+        self.assertFalse(f.hasnode(nullrev))
+        self.assertFalse(f.hasnode(nullid))
+        self.assertFalse(f.hasnode(node[0:12]))
+        self.assertFalse(f.hasnode(hex(node)[0:20]))
+
+        self.assertEqual(list(f.revs()), [0])
+        self.assertEqual(list(f.revs(start=1)), [])
+        self.assertEqual(list(f.revs(start=0)), [0])
+        self.assertEqual(list(f.revs(stop=0)), [0])
+        self.assertEqual(list(f.revs(stop=1)), [0])
+        self.assertEqual(list(f.revs(1, 1)), [])
+        # TODO buggy
+        self.assertEqual(list(f.revs(1, 0)), [1, 0])
+        self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
+
+        self.assertEqual(f.parents(node), (nullid, nullid))
+        self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
+
+        with self.assertRaises(error.LookupError):
+            f.parents(b'\x01' * 20)
+
+        with self.assertRaises(IndexError):
+            f.parentrevs(1)
+
+        self.assertEqual(f.rev(node), 0)
+
+        with self.assertRaises(error.LookupError):
+            f.rev(b'\x01' * 20)
+
+        self.assertEqual(f.node(0), node)
+
+        with self.assertRaises(IndexError):
+            f.node(1)
+
+        self.assertEqual(f.lookup(node), node)
+        self.assertEqual(f.lookup(0), node)
+        self.assertEqual(f.lookup(-1), nullid)
+        self.assertEqual(f.lookup(b'0'), node)
+        self.assertEqual(f.lookup(hex(node)), node)
+
+        with self.assertRaises(error.LookupError):
+            f.lookup(hex(node)[0:12])
+
+        with self.assertRaises(error.LookupError):
+            f.lookup(-2)
+
+        with self.assertRaises(error.LookupError):
+            f.lookup(b'-2')
+
+        with self.assertRaises(error.LookupError):
+            f.lookup(1)
+
+        with self.assertRaises(error.LookupError):
+            f.lookup(b'1')
+
+        self.assertEqual(f.linkrev(0), 0)
+
+        with self.assertRaises(IndexError):
+            f.linkrev(1)
+
+        self.assertFalse(f.iscensored(0))
+
+        with self.assertRaises(IndexError):
+            f.iscensored(1)
+
+        self.assertEqual(list(f.descendants([0])), [])
+
+        self.assertEqual(f.heads(), [node])
+        self.assertEqual(f.heads(node), [node])
+        self.assertEqual(f.heads(stop=[node]), [node])
+
+        with self.assertRaises(error.LookupError):
+            f.heads(stop=[b'\x01' * 20])
+
+        self.assertEqual(f.children(node), [])
+
+    def testmultiplerevisions(self):
+        fulltext0 = b'x' * 1024
+        fulltext1 = fulltext0 + b'y'
+        fulltext2 = b'y' + fulltext0 + b'z'
+
+        f = self._makefilefn()
+        with self._maketransactionfn() as tr:
+            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
+            node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+
+        self.assertEqual(len(f), 3)
+        self.assertEqual(list(f), [0, 1, 2])
+
+        gen = iter(f)
+        self.assertEqual(next(gen), 0)
+        self.assertEqual(next(gen), 1)
+        self.assertEqual(next(gen), 2)
+
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        self.assertEqual(list(f.revs()), [0, 1, 2])
+        self.assertEqual(list(f.revs(0)), [0, 1, 2])
+        self.assertEqual(list(f.revs(1)), [1, 2])
+        self.assertEqual(list(f.revs(2)), [2])
+        self.assertEqual(list(f.revs(3)), [])
+        self.assertEqual(list(f.revs(stop=1)), [0, 1])
+        self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
+        self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
+        self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
+        self.assertEqual(list(f.revs(2, 1)), [2, 1])
+        # TODO this is wrong
+        self.assertEqual(list(f.revs(3, 2)), [3, 2])
+
+        self.assertEqual(f.parents(node0), (nullid, nullid))
+        self.assertEqual(f.parents(node1), (node0, nullid))
+        self.assertEqual(f.parents(node2), (node1, nullid))
+
+        self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
+        self.assertEqual(f.parentrevs(1), (0, nullrev))
+        self.assertEqual(f.parentrevs(2), (1, nullrev))
+
+        self.assertEqual(f.rev(node0), 0)
+        self.assertEqual(f.rev(node1), 1)
+        self.assertEqual(f.rev(node2), 2)
+
+        with self.assertRaises(error.LookupError):
+            f.rev(b'\x01' * 20)
+
+        self.assertEqual(f.node(0), node0)
+        self.assertEqual(f.node(1), node1)
+        self.assertEqual(f.node(2), node2)
+
+        with self.assertRaises(IndexError):
+            f.node(3)
+
+        self.assertEqual(f.lookup(node0), node0)
+        self.assertEqual(f.lookup(0), node0)
+        self.assertEqual(f.lookup(b'0'), node0)
+        self.assertEqual(f.lookup(hex(node0)), node0)
+
+        self.assertEqual(f.lookup(node1), node1)
+        self.assertEqual(f.lookup(1), node1)
+        self.assertEqual(f.lookup(b'1'), node1)
+        self.assertEqual(f.lookup(hex(node1)), node1)
+
+        self.assertEqual(f.linkrev(0), 0)
+        self.assertEqual(f.linkrev(1), 1)
+        self.assertEqual(f.linkrev(2), 3)
+
+        with self.assertRaises(IndexError):
+            f.linkrev(3)
+
+        self.assertFalse(f.iscensored(0))
+        self.assertFalse(f.iscensored(1))
+        self.assertFalse(f.iscensored(2))
+
+        with self.assertRaises(IndexError):
+            f.iscensored(3)
+
+        self.assertEqual(f.commonancestorsheads(node1, nullid), [])
+        self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
+        self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
+        self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
+        self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
+        self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
+
+        self.assertEqual(list(f.descendants([0])), [1, 2])
+        self.assertEqual(list(f.descendants([1])), [2])
+        self.assertEqual(list(f.descendants([0, 1])), [1, 2])
+
+        self.assertEqual(f.heads(), [node2])
+        self.assertEqual(f.heads(node0), [node2])
+        self.assertEqual(f.heads(node1), [node2])
+        self.assertEqual(f.heads(node2), [node2])
+
+        # TODO this behavior seems wonky. Is it correct? If so, the
+        # docstring for heads() should be updated to reflect desired
+        # behavior.
+        self.assertEqual(f.heads(stop=[node1]), [node1, node2])
+        self.assertEqual(f.heads(stop=[node0]), [node0, node2])
+        self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
+
+        with self.assertRaises(error.LookupError):
+            f.heads(stop=[b'\x01' * 20])
+
+        self.assertEqual(f.children(node0), [node1])
+        self.assertEqual(f.children(node1), [node2])
+        self.assertEqual(f.children(node2), [])
+
+    def testmultipleheads(self):
+        f = self._makefilefn()
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(b'0', None, tr, 0, nullid, nullid)
+            node1 = f.add(b'1', None, tr, 1, node0, nullid)
+            node2 = f.add(b'2', None, tr, 2, node1, nullid)
+            node3 = f.add(b'3', None, tr, 3, node0, nullid)
+            node4 = f.add(b'4', None, tr, 4, node3, nullid)
+            node5 = f.add(b'5', None, tr, 5, node0, nullid)
+
+        self.assertEqual(len(f), 6)
+
+        self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
+        self.assertEqual(list(f.descendants([1])), [2])
+        self.assertEqual(list(f.descendants([2])), [])
+        self.assertEqual(list(f.descendants([3])), [4])
+        self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
+        self.assertEqual(list(f.descendants([1, 3])), [2, 4])
+
+        self.assertEqual(f.heads(), [node2, node4, node5])
+        self.assertEqual(f.heads(node0), [node2, node4, node5])
+        self.assertEqual(f.heads(node1), [node2])
+        self.assertEqual(f.heads(node2), [node2])
+        self.assertEqual(f.heads(node3), [node4])
+        self.assertEqual(f.heads(node4), [node4])
+        self.assertEqual(f.heads(node5), [node5])
+
+        # TODO this seems wrong.
+        self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
+        self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
+
+        self.assertEqual(f.children(node0), [node1, node3, node5])
+        self.assertEqual(f.children(node1), [node2])
+        self.assertEqual(f.children(node2), [])
+        self.assertEqual(f.children(node3), [node4])
+        self.assertEqual(f.children(node4), [])
+        self.assertEqual(f.children(node5), [])
+
+class ifiledatatests(basetestcase):
+    """Generic tests for the ifiledata interface.
+
+    All file storage backends for data should conform to the tests in this
+    class.
+
+    Use ``makeifiledatatests()`` to create an instance of this type.
+    """
+    def testempty(self):
+        f = self._makefilefn()
+
+        self.assertEqual(f.storageinfo(), {})
+        self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
+                         {'revisionscount': 0, 'trackedsize': 0})
+
+        self.assertEqual(f.size(nullrev), 0)
+
+        for i in range(-5, 5):
+            if i == nullrev:
+                continue
+
+            with self.assertRaises(IndexError):
+                f.size(i)
+
+        self.assertEqual(f.revision(nullid), b'')
+        self.assertEqual(f.revision(nullid, raw=True), b'')
+
+        with self.assertRaises(error.LookupError):
+            f.revision(b'\x01' * 20)
+
+        self.assertEqual(f.read(nullid), b'')
+
+        with self.assertRaises(error.LookupError):
+            f.read(b'\x01' * 20)
+
+        self.assertFalse(f.renamed(nullid))
+
+        with self.assertRaises(error.LookupError):
+            f.read(b'\x01' * 20)
+
+        self.assertTrue(f.cmp(nullid, b''))
+        self.assertTrue(f.cmp(nullid, b'foo'))
+
+        with self.assertRaises(error.LookupError):
+            f.cmp(b'\x01' * 20, b'irrelevant')
+
+        # Emitting empty list is an empty generator.
+        gen = f.emitrevisions([])
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        # Emitting null node yields nothing.
+        gen = f.emitrevisions([nullid])
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        # Requesting unknown node fails.
+        with self.assertRaises(error.LookupError):
+            list(f.emitrevisions([b'\x01' * 20]))
+
+    def testsinglerevision(self):
+        fulltext = b'initial'
+
+        f = self._makefilefn()
+        with self._maketransactionfn() as tr:
+            node = f.add(fulltext, None, tr, 0, nullid, nullid)
+
+        self.assertEqual(f.storageinfo(), {})
+        self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
+                         {'revisionscount': 1, 'trackedsize': len(fulltext)})
+
+        self.assertEqual(f.size(0), len(fulltext))
+
+        with self.assertRaises(IndexError):
+            f.size(1)
+
+        self.assertEqual(f.revision(node), fulltext)
+        self.assertEqual(f.revision(node, raw=True), fulltext)
+
+        self.assertEqual(f.read(node), fulltext)
+
+        self.assertFalse(f.renamed(node))
+
+        self.assertFalse(f.cmp(node, fulltext))
+        self.assertTrue(f.cmp(node, fulltext + b'extra'))
+
+        # Emitting a single revision works.
+        gen = f.emitrevisions([node])
+        rev = next(gen)
+
+        self.assertEqual(rev.node, node)
+        self.assertEqual(rev.p1node, nullid)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertIsNone(rev.linknode)
+        self.assertEqual(rev.basenode, nullid)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertIsNone(rev.revision)
+        self.assertIsNone(rev.delta)
+
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        # Requesting revision data works.
+        gen = f.emitrevisions([node], revisiondata=True)
+        rev = next(gen)
+
+        self.assertEqual(rev.node, node)
+        self.assertEqual(rev.p1node, nullid)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertIsNone(rev.linknode)
+        self.assertEqual(rev.basenode, nullid)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertEqual(rev.revision, fulltext)
+        self.assertIsNone(rev.delta)
+
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        # Emitting an unknown node after a known revision results in error.
+        with self.assertRaises(error.LookupError):
+            list(f.emitrevisions([node, b'\x01' * 20]))
+
+    def testmultiplerevisions(self):
+        fulltext0 = b'x' * 1024
+        fulltext1 = fulltext0 + b'y'
+        fulltext2 = b'y' + fulltext0 + b'z'
+
+        f = self._makefilefn()
+        with self._maketransactionfn() as tr:
+            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
+            node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+
+        self.assertEqual(f.storageinfo(), {})
+        self.assertEqual(
+            f.storageinfo(revisionscount=True, trackedsize=True),
+            {
+                'revisionscount': 3,
+                'trackedsize': len(fulltext0) + len(fulltext1) + len(fulltext2),
+            })
+
+        self.assertEqual(f.size(0), len(fulltext0))
+        self.assertEqual(f.size(1), len(fulltext1))
+        self.assertEqual(f.size(2), len(fulltext2))
+
+        with self.assertRaises(IndexError):
+            f.size(3)
+
+        self.assertEqual(f.revision(node0), fulltext0)
+        self.assertEqual(f.revision(node0, raw=True), fulltext0)
+        self.assertEqual(f.revision(node1), fulltext1)
+        self.assertEqual(f.revision(node1, raw=True), fulltext1)
+        self.assertEqual(f.revision(node2), fulltext2)
+        self.assertEqual(f.revision(node2, raw=True), fulltext2)
+
+        with self.assertRaises(error.LookupError):
+            f.revision(b'\x01' * 20)
+
+        self.assertEqual(f.read(node0), fulltext0)
+        self.assertEqual(f.read(node1), fulltext1)
+        self.assertEqual(f.read(node2), fulltext2)
+
+        with self.assertRaises(error.LookupError):
+            f.read(b'\x01' * 20)
+
+        self.assertFalse(f.renamed(node0))
+        self.assertFalse(f.renamed(node1))
+        self.assertFalse(f.renamed(node2))
+
+        with self.assertRaises(error.LookupError):
+            f.renamed(b'\x01' * 20)
+
+        self.assertFalse(f.cmp(node0, fulltext0))
+        self.assertFalse(f.cmp(node1, fulltext1))
+        self.assertFalse(f.cmp(node2, fulltext2))
+
+        self.assertTrue(f.cmp(node1, fulltext0))
+        self.assertTrue(f.cmp(node2, fulltext1))
+
+        with self.assertRaises(error.LookupError):
+            f.cmp(b'\x01' * 20, b'irrelevant')
+
+        # Nodes should be emitted in order.
+        gen = f.emitrevisions([node0, node1, node2], revisiondata=True)
+
+        rev = next(gen)
+
+        self.assertEqual(rev.node, node0)
+        self.assertEqual(rev.p1node, nullid)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertIsNone(rev.linknode)
+        self.assertEqual(rev.basenode, nullid)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertEqual(rev.revision, fulltext0)
+        self.assertIsNone(rev.delta)
+
+        rev = next(gen)
+
+        self.assertEqual(rev.node, node1)
+        self.assertEqual(rev.p1node, node0)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertIsNone(rev.linknode)
+        self.assertEqual(rev.basenode, node0)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertIsNone(rev.revision)
+        self.assertEqual(rev.delta,
+                         b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
+                         fulltext1)
+
+        rev = next(gen)
+
+        self.assertEqual(rev.node, node2)
+        self.assertEqual(rev.p1node, node1)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertIsNone(rev.linknode)
+        self.assertEqual(rev.basenode, node1)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertIsNone(rev.revision)
+        self.assertEqual(rev.delta,
+                         b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
+                         fulltext2)
+
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        # Request not in DAG order is reordered to be in DAG order.
+        gen = f.emitrevisions([node2, node1, node0], revisiondata=True)
+
+        rev = next(gen)
+
+        self.assertEqual(rev.node, node0)
+        self.assertEqual(rev.p1node, nullid)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertIsNone(rev.linknode)
+        self.assertEqual(rev.basenode, nullid)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertEqual(rev.revision, fulltext0)
+        self.assertIsNone(rev.delta)
+
+        rev = next(gen)
+
+        self.assertEqual(rev.node, node1)
+        self.assertEqual(rev.p1node, node0)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertIsNone(rev.linknode)
+        self.assertEqual(rev.basenode, node0)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertIsNone(rev.revision)
+        self.assertEqual(rev.delta,
+                         b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
+                         fulltext1)
+
+        rev = next(gen)
+
+        self.assertEqual(rev.node, node2)
+        self.assertEqual(rev.p1node, node1)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertIsNone(rev.linknode)
+        self.assertEqual(rev.basenode, node1)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertIsNone(rev.revision)
+        self.assertEqual(rev.delta,
+                         b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
+                         fulltext2)
+
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        # Unrecognized nodesorder value raises ProgrammingError.
+        with self.assertRaises(error.ProgrammingError):
+            list(f.emitrevisions([], nodesorder='bad'))
+
+        # nodesorder=storage is recognized. But we can't test it thoroughly
+        # because behavior is storage-dependent.
+        res = list(f.emitrevisions([node2, node1, node0],
+                                         nodesorder='storage'))
+        self.assertEqual(len(res), 3)
+        self.assertEqual({o.node for o in res}, {node0, node1, node2})
+
+        # nodesorder=nodes forces the order.
+        gen = f.emitrevisions([node2, node0], nodesorder='nodes',
+                              revisiondata=True)
+
+        rev = next(gen)
+        self.assertEqual(rev.node, node2)
+        self.assertEqual(rev.p1node, node1)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.basenode, nullid)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertEqual(rev.revision, fulltext2)
+        self.assertIsNone(rev.delta)
+
+        rev = next(gen)
+        self.assertEqual(rev.node, node0)
+        self.assertEqual(rev.p1node, nullid)
+        self.assertEqual(rev.p2node, nullid)
+        # Delta behavior is storage dependent, so we can't easily test it.
+
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        # assumehaveparentrevisions=False (the default) won't send a delta for
+        # the first revision.
+        gen = f.emitrevisions({node2, node1}, revisiondata=True)
+
+        rev = next(gen)
+        self.assertEqual(rev.node, node1)
+        self.assertEqual(rev.p1node, node0)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.basenode, nullid)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertEqual(rev.revision, fulltext1)
+        self.assertIsNone(rev.delta)
+
+        rev = next(gen)
+        self.assertEqual(rev.node, node2)
+        self.assertEqual(rev.p1node, node1)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.basenode, node1)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertIsNone(rev.revision)
+        self.assertEqual(rev.delta,
+                         b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
+                         fulltext2)
+
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        # assumehaveparentrevisions=True allows delta against initial revision.
+        gen = f.emitrevisions([node2, node1],
+                              revisiondata=True, assumehaveparentrevisions=True)
+
+        rev = next(gen)
+        self.assertEqual(rev.node, node1)
+        self.assertEqual(rev.p1node, node0)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.basenode, node0)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertIsNone(rev.revision)
+        self.assertEqual(rev.delta,
+                         b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
+                         fulltext1)
+
+        # forceprevious=True forces a delta against the previous revision.
+        # Special case for initial revision.
+        gen = f.emitrevisions([node0], revisiondata=True, deltaprevious=True)
+
+        rev = next(gen)
+        self.assertEqual(rev.node, node0)
+        self.assertEqual(rev.p1node, nullid)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.basenode, nullid)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertIsNone(rev.revision)
+        self.assertEqual(rev.delta,
+                         b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
+                         fulltext0)
+
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+        gen = f.emitrevisions([node0, node2], revisiondata=True,
+                              deltaprevious=True)
+
+        rev = next(gen)
+        self.assertEqual(rev.node, node0)
+        self.assertEqual(rev.p1node, nullid)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.basenode, nullid)
+        self.assertIsNone(rev.baserevisionsize)
+        self.assertIsNone(rev.revision)
+        self.assertEqual(rev.delta,
+                         b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
+                         fulltext0)
+
+        rev = next(gen)
+        self.assertEqual(rev.node, node2)
+        self.assertEqual(rev.p1node, node1)
+        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.basenode, node0)
+
+        with self.assertRaises(StopIteration):
+            next(gen)
+
+    def testrenamed(self):
+        fulltext0 = b'foo'
+        fulltext1 = b'bar'
+        fulltext2 = b'baz'
+
+        meta1 = {
+            b'copy': b'source0',
+            b'copyrev': b'a' * 40,
+        }
+
+        meta2 = {
+            b'copy': b'source1',
+            b'copyrev': b'b' * 40,
+        }
+
+        stored1 = b''.join([
+            b'\x01\ncopy: source0\n',
+            b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
+            fulltext1,
+        ])
+
+        stored2 = b''.join([
+            b'\x01\ncopy: source1\n',
+            b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
+            fulltext2,
+        ])
+
+        f = self._makefilefn()
+        with self._maketransactionfn() as tr:
+            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
+            node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
+
+        # Metadata header isn't recognized when parent isn't nullid.
+        self.assertEqual(f.size(1), len(stored1))
+        self.assertEqual(f.size(2), len(fulltext2))
+
+        self.assertEqual(f.revision(node1), stored1)
+        self.assertEqual(f.revision(node1, raw=True), stored1)
+        self.assertEqual(f.revision(node2), stored2)
+        self.assertEqual(f.revision(node2, raw=True), stored2)
+
+        self.assertEqual(f.read(node1), fulltext1)
+        self.assertEqual(f.read(node2), fulltext2)
+
+        # Returns False when first parent is set.
+        self.assertFalse(f.renamed(node1))
+        self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
+
+        self.assertTrue(f.cmp(node1, fulltext1))
+        self.assertTrue(f.cmp(node1, stored1))
+        self.assertFalse(f.cmp(node2, fulltext2))
+        self.assertTrue(f.cmp(node2, stored2))
+
+    def testmetadataprefix(self):
+        # Content with metadata prefix has extra prefix inserted in storage.
+        fulltext0 = b'\x01\nfoo'
+        stored0 = b'\x01\n\x01\n\x01\nfoo'
+
+        fulltext1 = b'\x01\nbar'
+        meta1 = {
+            b'copy': b'source0',
+            b'copyrev': b'b' * 40,
+        }
+        stored1 = b''.join([
+            b'\x01\ncopy: source0\n',
+            b'copyrev: %s\n' % (b'b' * 40),
+            b'\x01\n\x01\nbar',
+        ])
+
+        f = self._makefilefn()
+        with self._maketransactionfn() as tr:
+            node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
+            node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
+
+        # TODO this is buggy.
+        self.assertEqual(f.size(0), len(fulltext0) + 4)
+
+        self.assertEqual(f.size(1), len(fulltext1))
+
+        self.assertEqual(f.revision(node0), stored0)
+        self.assertEqual(f.revision(node0, raw=True), stored0)
+
+        self.assertEqual(f.revision(node1), stored1)
+        self.assertEqual(f.revision(node1, raw=True), stored1)
+
+        self.assertEqual(f.read(node0), fulltext0)
+        self.assertEqual(f.read(node1), fulltext1)
+
+        self.assertFalse(f.cmp(node0, fulltext0))
+        self.assertTrue(f.cmp(node0, stored0))
+
+        self.assertFalse(f.cmp(node1, fulltext1))
+        self.assertTrue(f.cmp(node1, stored0))
+
+    def testbadnoderead(self):
+        f = self._makefilefn()
+
+        fulltext0 = b'foo\n' * 30
+        fulltext1 = fulltext0 + b'bar\n'
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node1 = b'\xaa' * 20
+
+            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
+                                   rawtext=fulltext1)
+
+        self.assertEqual(len(f), 2)
+        self.assertEqual(f.parents(node1), (node0, nullid))
+
+        # revision() raises since it performs hash verification.
+        with self.assertRaises(error.StorageError):
+            f.revision(node1)
+
+        # raw=True still verifies because there are no special storage
+        # settings.
+        with self.assertRaises(error.StorageError):
+            f.revision(node1, raw=True)
+
+        # read() behaves like revision().
+        with self.assertRaises(error.StorageError):
+            f.read(node1)
+
+        # We can't test renamed() here because some backends may not require
+        # reading/validating the fulltext to return rename metadata.
+
+    def testbadnoderevisionraw(self):
+        # Like above except we test revision(raw=True) first to isolate
+        # revision caching behavior.
+        f = self._makefilefn()
+
+        fulltext0 = b'foo\n' * 30
+        fulltext1 = fulltext0 + b'bar\n'
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node1 = b'\xaa' * 20
+
+            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
+                                   rawtext=fulltext1)
+
+        with self.assertRaises(error.StorageError):
+            f.revision(node1, raw=True)
+
+        with self.assertRaises(error.StorageError):
+            f.revision(node1, raw=True)
+
+    def testbadnoderevisionraw(self):
+        # Like above except we test read() first to isolate revision caching
+        # behavior.
+        f = self._makefilefn()
+
+        fulltext0 = b'foo\n' * 30
+        fulltext1 = fulltext0 + b'bar\n'
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node1 = b'\xaa' * 20
+
+            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
+                                   rawtext=fulltext1)
+
+        with self.assertRaises(error.StorageError):
+            f.read(node1)
+
+        with self.assertRaises(error.StorageError):
+            f.read(node1)
+
+    def testbadnodedelta(self):
+        f = self._makefilefn()
+
+        fulltext0 = b'foo\n' * 31
+        fulltext1 = fulltext0 + b'bar\n'
+        fulltext2 = fulltext1 + b'baz\n'
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node1 = b'\xaa' * 20
+
+            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
+                                   rawtext=fulltext1)
+
+        with self.assertRaises(error.StorageError):
+            f.read(node1)
+
+        node2 = storageutil.hashrevisionsha1(fulltext2, node1, nullid)
+
+        with self._maketransactionfn() as tr:
+            delta = mdiff.textdiff(fulltext1, fulltext2)
+            self._addrawrevisionfn(f, tr, node2, node1, nullid,
+                                   2, delta=(1, delta))
+
+        self.assertEqual(len(f), 3)
+
+        # Assuming a delta is stored, we shouldn't need to validate node1 in
+        # order to retrieve node2.
+        self.assertEqual(f.read(node2), fulltext2)
+
+    def testcensored(self):
+        f = self._makefilefn()
+
+        stored1 = storageutil.packmeta({
+            b'censored': b'tombstone',
+        }, b'')
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+
+            # The node value doesn't matter since we can't verify it.
+            node1 = b'\xbb' * 20
+
+            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
+                                   censored=True)
+
+        self.assertTrue(f.iscensored(1))
+
+        with self.assertRaises(error.CensoredNodeError):
+            f.revision(1)
+
+        with self.assertRaises(error.CensoredNodeError):
+            f.revision(1, raw=True)
+
+        with self.assertRaises(error.CensoredNodeError):
+            f.read(1)
+
+    def testcensoredrawrevision(self):
+        # Like above, except we do the revision(raw=True) request first to
+        # isolate revision caching behavior.
+
+        f = self._makefilefn()
+
+        stored1 = storageutil.packmeta({
+            b'censored': b'tombstone',
+        }, b'')
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+
+            # The node value doesn't matter since we can't verify it.
+            node1 = b'\xbb' * 20
+
+            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
+                                   censored=True)
+
+        with self.assertRaises(error.CensoredNodeError):
+            f.revision(1, raw=True)
+
+class ifilemutationtests(basetestcase):
+    """Generic tests for the ifilemutation interface.
+
+    All file storage backends that support writing should conform to this
+    interface.
+
+    Use ``makeifilemutationtests()`` to create an instance of this type.
+    """
+    def testaddnoop(self):
+        f = self._makefilefn()
+        with self._maketransactionfn() as tr:
+            node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+            node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
+            # Varying by linkrev shouldn't impact hash.
+            node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
+
+        self.assertEqual(node1, node0)
+        self.assertEqual(node2, node0)
+        self.assertEqual(len(f), 1)
+
+    def testaddrevisionbadnode(self):
+        f = self._makefilefn()
+        with self._maketransactionfn() as tr:
+            # Adding a revision with bad node value fails.
+            with self.assertRaises(error.StorageError):
+                f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
+
+    def testaddrevisionunknownflag(self):
+        f = self._makefilefn()
+        with self._maketransactionfn() as tr:
+            for i in range(15, 0, -1):
+                if (1 << i) & ~repository.REVISION_FLAGS_KNOWN:
+                    flags = 1 << i
+                    break
+
+            with self.assertRaises(error.StorageError):
+                f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
+
+    def testaddgroupsimple(self):
+        f = self._makefilefn()
+
+        callbackargs = []
+        def cb(*args, **kwargs):
+            callbackargs.append((args, kwargs))
+
+        def linkmapper(node):
+            return 0
+
+        with self._maketransactionfn() as tr:
+            nodes = f.addgroup([], None, tr, addrevisioncb=cb)
+
+        self.assertEqual(nodes, [])
+        self.assertEqual(callbackargs, [])
+        self.assertEqual(len(f), 0)
+
+        fulltext0 = b'foo'
+        delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+
+        f = self._makefilefn()
+
+        deltas = [
+            (node0, nullid, nullid, nullid, nullid, delta0, 0),
+        ]
+
+        with self._maketransactionfn() as tr:
+            nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
+
+        self.assertEqual(nodes, [
+            b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
+            b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
+
+        self.assertEqual(len(callbackargs), 1)
+        self.assertEqual(callbackargs[0][0][1], nodes[0])
+
+        self.assertEqual(list(f.revs()), [0])
+        self.assertEqual(f.rev(nodes[0]), 0)
+        self.assertEqual(f.node(0), nodes[0])
+
+    def testaddgroupmultiple(self):
+        f = self._makefilefn()
+
+        fulltexts = [
+            b'foo',
+            b'bar',
+            b'x' * 1024,
+        ]
+
+        nodes = []
+        with self._maketransactionfn() as tr:
+            for fulltext in fulltexts:
+                nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
+
+        f = self._makefilefn()
+        deltas = []
+        for i, fulltext in enumerate(fulltexts):
+            delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
+
+            deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
+
+        with self._maketransactionfn() as tr:
+            self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
+
+        self.assertEqual(len(f), len(deltas))
+        self.assertEqual(list(f.revs()), [0, 1, 2])
+        self.assertEqual(f.rev(nodes[0]), 0)
+        self.assertEqual(f.rev(nodes[1]), 1)
+        self.assertEqual(f.rev(nodes[2]), 2)
+        self.assertEqual(f.node(0), nodes[0])
+        self.assertEqual(f.node(1), nodes[1])
+        self.assertEqual(f.node(2), nodes[2])
+
+    def testdeltaagainstcensored(self):
+        # Attempt to apply a delta made against a censored revision.
+        f = self._makefilefn()
+
+        stored1 = storageutil.packmeta({
+            b'censored': b'tombstone',
+        }, b'')
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
+
+            # The node value doesn't matter since we can't verify it.
+            node1 = b'\xbb' * 20
+
+            self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
+                                   censored=True)
+
+        delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
+        deltas = [(b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0)]
+
+        with self._maketransactionfn() as tr:
+            with self.assertRaises(error.CensoredBaseError):
+                f.addgroup(deltas, lambda x: 0, tr)
+
+    def testcensorrevisionbasic(self):
+        f = self._makefilefn()
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
+            node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, nullid)
+            node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, nullid)
+
+        with self._maketransactionfn() as tr:
+            f.censorrevision(tr, node1)
+
+        self.assertEqual(len(f), 3)
+        self.assertEqual(list(f.revs()), [0, 1, 2])
+
+        self.assertEqual(f.read(node0), b'foo\n' * 30)
+        self.assertEqual(f.read(node2), b'foo\n' * 32)
+
+        with self.assertRaises(error.CensoredNodeError):
+            f.read(node1)
+
+    def testgetstrippointnoparents(self):
+        # N revisions where none have parents.
+        f = self._makefilefn()
+
+        with self._maketransactionfn() as tr:
+            for rev in range(10):
+                f.add(b'%d' % rev, None, tr, rev, nullid, nullid)
+
+        for rev in range(10):
+            self.assertEqual(f.getstrippoint(rev), (rev, set()))
+
+    def testgetstrippointlinear(self):
+        # N revisions in a linear chain.
+        f = self._makefilefn()
+
+        with self._maketransactionfn() as tr:
+            p1 = nullid
+
+            for rev in range(10):
+                f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+
+        for rev in range(10):
+            self.assertEqual(f.getstrippoint(rev), (rev, set()))
+
+    def testgetstrippointmultipleheads(self):
+        f = self._makefilefn()
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(b'0', None, tr, 0, nullid, nullid)
+            node1 = f.add(b'1', None, tr, 1, node0, nullid)
+            f.add(b'2', None, tr, 2, node1, nullid)
+            f.add(b'3', None, tr, 3, node0, nullid)
+            f.add(b'4', None, tr, 4, node0, nullid)
+
+        for rev in range(5):
+            self.assertEqual(f.getstrippoint(rev), (rev, set()))
+
+    def testgetstrippointearlierlinkrevs(self):
+        f = self._makefilefn()
+
+        with self._maketransactionfn() as tr:
+            node0 = f.add(b'0', None, tr, 0, nullid, nullid)
+            f.add(b'1', None, tr, 10, node0, nullid)
+            f.add(b'2', None, tr, 5, node0, nullid)
+
+        self.assertEqual(f.getstrippoint(0), (0, set()))
+        self.assertEqual(f.getstrippoint(1), (1, set()))
+        self.assertEqual(f.getstrippoint(2), (1, set()))
+        self.assertEqual(f.getstrippoint(3), (1, set()))
+        self.assertEqual(f.getstrippoint(4), (1, set()))
+        self.assertEqual(f.getstrippoint(5), (1, set()))
+        self.assertEqual(f.getstrippoint(6), (1, {2}))
+        self.assertEqual(f.getstrippoint(7), (1, {2}))
+        self.assertEqual(f.getstrippoint(8), (1, {2}))
+        self.assertEqual(f.getstrippoint(9), (1, {2}))
+        self.assertEqual(f.getstrippoint(10), (1, {2}))
+        self.assertEqual(f.getstrippoint(11), (3, set()))
+
+    def teststripempty(self):
+        f = self._makefilefn()
+
+        with self._maketransactionfn() as tr:
+            f.strip(0, tr)
+
+        self.assertEqual(len(f), 0)
+
+    def teststripall(self):
+        f = self._makefilefn()
+
+        with self._maketransactionfn() as tr:
+            p1 = nullid
+            for rev in range(10):
+                p1 = f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+
+        self.assertEqual(len(f), 10)
+
+        with self._maketransactionfn() as tr:
+            f.strip(0, tr)
+
+        self.assertEqual(len(f), 0)
+
+    def teststrippartial(self):
+        f = self._makefilefn()
+
+        with self._maketransactionfn() as tr:
+            f.add(b'0', None, tr, 0, nullid, nullid)
+            node1 = f.add(b'1', None, tr, 5, nullid, nullid)
+            node2 = f.add(b'2', None, tr, 10, nullid, nullid)
+
+        self.assertEqual(len(f), 3)
+
+        with self._maketransactionfn() as tr:
+            f.strip(11, tr)
+
+        self.assertEqual(len(f), 3)
+
+        with self._maketransactionfn() as tr:
+            f.strip(10, tr)
+
+        self.assertEqual(len(f), 2)
+
+        with self.assertRaises(error.LookupError):
+            f.rev(node2)
+
+        with self._maketransactionfn() as tr:
+            f.strip(6, tr)
+
+        self.assertEqual(len(f), 2)
+
+        with self._maketransactionfn() as tr:
+            f.strip(3, tr)
+
+        self.assertEqual(len(f), 1)
+
+        with self.assertRaises(error.LookupError):
+            f.rev(node1)
+
+def makeifileindextests(makefilefn, maketransactionfn, addrawrevisionfn):
+    """Create a unittest.TestCase class suitable for testing file storage.
+
+    ``makefilefn`` is a callable which receives the test case as an
+    argument and returns an object implementing the ``ifilestorage`` interface.
+
+    ``maketransactionfn`` is a callable which receives the test case as an
+    argument and returns a transaction object.
+
+    ``addrawrevisionfn`` is a callable which receives arguments describing a
+    low-level revision to add. This callable allows the insertion of
+    potentially bad data into the store in order to facilitate testing.
+
+    Returns a type that is a ``unittest.TestCase`` that can be used for
+    testing the object implementing the file storage interface. Simply
+    assign the returned value to a module-level attribute and a test loader
+    should find and run it automatically.
+    """
+    d = {
+        r'_makefilefn': makefilefn,
+        r'_maketransactionfn': maketransactionfn,
+        r'_addrawrevisionfn': addrawrevisionfn,
+    }
+    return type(r'ifileindextests', (ifileindextests,), d)
+
+def makeifiledatatests(makefilefn, maketransactionfn, addrawrevisionfn):
+    d = {
+        r'_makefilefn': makefilefn,
+        r'_maketransactionfn': maketransactionfn,
+        r'_addrawrevisionfn': addrawrevisionfn,
+    }
+    return type(r'ifiledatatests', (ifiledatatests,), d)
+
+def makeifilemutationtests(makefilefn, maketransactionfn, addrawrevisionfn):
+    d = {
+        r'_makefilefn': makefilefn,
+        r'_maketransactionfn': maketransactionfn,
+        r'_addrawrevisionfn': addrawrevisionfn,
+    }
+    return type(r'ifilemutationtests', (ifilemutationtests,), d)
--- a/mercurial/transaction.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/transaction.py	Mon Oct 22 14:46:06 2018 -0400
@@ -38,7 +38,7 @@
 
 def active(func):
     def _active(self, *args, **kwds):
-        if self.count == 0:
+        if self._count == 0:
             raise error.Abort(_(
                 'cannot use transaction when it is already committed/aborted'))
         return func(self, *args, **kwds)
@@ -119,37 +119,37 @@
         which determine whether file stat ambiguity should be avoided
         for corresponded files.
         """
-        self.count = 1
-        self.usages = 1
-        self.report = report
+        self._count = 1
+        self._usages = 1
+        self._report = report
         # a vfs to the store content
-        self.opener = opener
+        self._opener = opener
         # a map to access file in various {location -> vfs}
         vfsmap = vfsmap.copy()
         vfsmap[''] = opener  # set default value
         self._vfsmap = vfsmap
-        self.after = after
-        self.entries = []
-        self.map = {}
-        self.journal = journalname
-        self.undoname = undoname
+        self._after = after
+        self._entries = []
+        self._map = {}
+        self._journal = journalname
+        self._undoname = undoname
         self._queue = []
         # A callback to validate transaction content before closing it.
         # should raise exception is anything is wrong.
         # target user is repository hooks.
         if validator is None:
             validator = lambda tr: None
-        self.validator = validator
+        self._validator = validator
         # A callback to do something just after releasing transaction.
         if releasefn is None:
             releasefn = lambda tr, success: None
-        self.releasefn = releasefn
+        self._releasefn = releasefn
 
-        self.checkambigfiles = set()
+        self._checkambigfiles = set()
         if checkambigfiles:
-            self.checkambigfiles.update(checkambigfiles)
+            self._checkambigfiles.update(checkambigfiles)
 
-        self.names = [name]
+        self._names = [name]
 
         # A dict dedicated to precisely tracking the changes introduced in the
         # transaction.
@@ -157,7 +157,7 @@
 
         # a dict of arguments to be passed to hooks
         self.hookargs = {}
-        self.file = opener.open(self.journal, "w")
+        self._file = opener.open(self._journal, "w")
 
         # a list of ('location', 'path', 'backuppath', cache) entries.
         # - if 'backuppath' is empty, no file existed at backup time
@@ -167,12 +167,12 @@
         # (cache is currently unused)
         self._backupentries = []
         self._backupmap = {}
-        self._backupjournal = "%s.backupfiles" % self.journal
+        self._backupjournal = "%s.backupfiles" % self._journal
         self._backupsfile = opener.open(self._backupjournal, 'w')
         self._backupsfile.write('%d\n' % version)
 
         if createmode is not None:
-            opener.chmod(self.journal, createmode & 0o666)
+            opener.chmod(self._journal, createmode & 0o666)
             opener.chmod(self._backupjournal, createmode & 0o666)
 
         # hold file generations to be performed on commit
@@ -189,12 +189,12 @@
         self._abortcallback = {}
 
     def __repr__(self):
-        name = r'/'.join(self.names)
+        name = r'/'.join(self._names)
         return (r'<transaction name=%s, count=%d, usages=%d>' %
-                (name, self.count, self.usages))
+                (name, self._count, self._usages))
 
     def __del__(self):
-        if self.journal:
+        if self._journal:
             self._abort()
 
     @active
@@ -218,7 +218,7 @@
     @active
     def add(self, file, offset, data=None):
         """record the state of an append-only file before update"""
-        if file in self.map or file in self._backupmap:
+        if file in self._map or file in self._backupmap:
             return
         if self._queue:
             self._queue[-1].append((file, offset, data))
@@ -228,13 +228,13 @@
 
     def _addentry(self, file, offset, data):
         """add a append-only entry to memory and on-disk state"""
-        if file in self.map or file in self._backupmap:
+        if file in self._map or file in self._backupmap:
             return
-        self.entries.append((file, offset, data))
-        self.map[file] = len(self.entries) - 1
+        self._entries.append((file, offset, data))
+        self._map[file] = len(self._entries) - 1
         # add enough data to the journal to do the truncate
-        self.file.write("%s\0%d\n" % (file, offset))
-        self.file.flush()
+        self._file.write("%s\0%d\n" % (file, offset))
+        self._file.flush()
 
     @active
     def addbackup(self, file, hardlink=True, location=''):
@@ -251,11 +251,11 @@
             msg = 'cannot use transaction.addbackup inside "group"'
             raise error.ProgrammingError(msg)
 
-        if file in self.map or file in self._backupmap:
+        if file in self._map or file in self._backupmap:
             return
         vfs = self._vfsmap[location]
         dirname, filename = vfs.split(file)
-        backupfilename = "%s.backup.%s" % (self.journal, filename)
+        backupfilename = "%s.backup.%s" % (self._journal, filename)
         backupfile = vfs.reljoin(dirname, backupfilename)
         if vfs.exists(file):
             filepath = vfs.join(file)
@@ -340,7 +340,7 @@
                         checkambig = False
                     else:
                         self.addbackup(name, location=location)
-                        checkambig = (name, location) in self.checkambigfiles
+                        checkambig = (name, location) in self._checkambigfiles
                     files.append(vfs(name, 'w', atomictemp=True,
                                      checkambig=checkambig))
                 genfunc(*files)
@@ -351,8 +351,8 @@
 
     @active
     def find(self, file):
-        if file in self.map:
-            return self.entries[self.map[file]]
+        if file in self._map:
+            return self._entries[self._map[file]]
         if file in self._backupmap:
             return self._backupentries[self._backupmap[file]]
         return None
@@ -364,31 +364,31 @@
         that are not pending in the queue
         '''
 
-        if file not in self.map:
+        if file not in self._map:
             raise KeyError(file)
-        index = self.map[file]
-        self.entries[index] = (file, offset, data)
-        self.file.write("%s\0%d\n" % (file, offset))
-        self.file.flush()
+        index = self._map[file]
+        self._entries[index] = (file, offset, data)
+        self._file.write("%s\0%d\n" % (file, offset))
+        self._file.flush()
 
     @active
     def nest(self, name=r'<unnamed>'):
-        self.count += 1
-        self.usages += 1
-        self.names.append(name)
+        self._count += 1
+        self._usages += 1
+        self._names.append(name)
         return self
 
     def release(self):
-        if self.count > 0:
-            self.usages -= 1
-        if self.names:
-            self.names.pop()
+        if self._count > 0:
+            self._usages -= 1
+        if self._names:
+            self._names.pop()
         # if the transaction scopes are left without being closed, fail
-        if self.count > 0 and self.usages == 0:
+        if self._count > 0 and self._usages == 0:
             self._abort()
 
     def running(self):
-        return self.count > 0
+        return self._count > 0
 
     def addpending(self, category, callback):
         """add a callback to be called when the transaction is pending
@@ -454,9 +454,9 @@
     @active
     def close(self):
         '''commit the transaction'''
-        if self.count == 1:
-            self.validator(self)  # will raise exception if needed
-            self.validator = None # Help prevent cycles.
+        if self._count == 1:
+            self._validator(self)  # will raise exception if needed
+            self._validator = None # Help prevent cycles.
             self._generatefiles(group=gengroupprefinalize)
             categories = sorted(self._finalizecallback)
             for cat in categories:
@@ -465,16 +465,16 @@
             self._finalizecallback = None
             self._generatefiles(group=gengrouppostfinalize)
 
-        self.count -= 1
-        if self.count != 0:
+        self._count -= 1
+        if self._count != 0:
             return
-        self.file.close()
+        self._file.close()
         self._backupsfile.close()
         # cleanup temporary files
         for l, f, b, c in self._backupentries:
             if l not in self._vfsmap and c:
-                self.report("couldn't remove %s: unknown cache location %s\n"
-                            % (b, l))
+                self._report("couldn't remove %s: unknown cache location %s\n"
+                             % (b, l))
                 continue
             vfs = self._vfsmap[l]
             if not f and b and vfs.exists(b):
@@ -484,21 +484,21 @@
                     if not c:
                         raise
                     # Abort may be raise by read only opener
-                    self.report("couldn't remove %s: %s\n"
-                                % (vfs.join(b), inst))
-        self.entries = []
+                    self._report("couldn't remove %s: %s\n"
+                                 % (vfs.join(b), inst))
+        self._entries = []
         self._writeundo()
-        if self.after:
-            self.after()
-            self.after = None # Help prevent cycles.
-        if self.opener.isfile(self._backupjournal):
-            self.opener.unlink(self._backupjournal)
-        if self.opener.isfile(self.journal):
-            self.opener.unlink(self.journal)
+        if self._after:
+            self._after()
+            self._after = None # Help prevent cycles.
+        if self._opener.isfile(self._backupjournal):
+            self._opener.unlink(self._backupjournal)
+        if self._opener.isfile(self._journal):
+            self._opener.unlink(self._journal)
         for l, _f, b, c in self._backupentries:
             if l not in self._vfsmap and c:
-                self.report("couldn't remove %s: unknown cache location"
-                            "%s\n" % (b, l))
+                self._report("couldn't remove %s: unknown cache location"
+                             "%s\n" % (b, l))
                 continue
             vfs = self._vfsmap[l]
             if b and vfs.exists(b):
@@ -508,13 +508,13 @@
                     if not c:
                         raise
                     # Abort may be raise by read only opener
-                    self.report("couldn't remove %s: %s\n"
-                                % (vfs.join(b), inst))
+                    self._report("couldn't remove %s: %s\n"
+                                 % (vfs.join(b), inst))
         self._backupentries = []
-        self.journal = None
+        self._journal = None
 
-        self.releasefn(self, True) # notify success of closing transaction
-        self.releasefn = None # Help prevent cycles.
+        self._releasefn(self, True) # notify success of closing transaction
+        self._releasefn = None # Help prevent cycles.
 
         # run post close action
         categories = sorted(self._postclosecallback)
@@ -532,9 +532,10 @@
 
     def _writeundo(self):
         """write transaction data for possible future undo call"""
-        if self.undoname is None:
+        if self._undoname is None:
             return
-        undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
+        undobackupfile = self._opener.open("%s.backupfiles" % self._undoname,
+                                           'w')
         undobackupfile.write('%d\n' % version)
         for l, f, b, c in self._backupentries:
             if not f:  # temporary file
@@ -543,13 +544,13 @@
                 u = ''
             else:
                 if l not in self._vfsmap and c:
-                    self.report("couldn't remove %s: unknown cache location"
-                                "%s\n" % (b, l))
+                    self._report("couldn't remove %s: unknown cache location"
+                                 "%s\n" % (b, l))
                     continue
                 vfs = self._vfsmap[l]
                 base, name = vfs.split(b)
-                assert name.startswith(self.journal), name
-                uname = name.replace(self.journal, self.undoname, 1)
+                assert name.startswith(self._journal), name
+                uname = name.replace(self._journal, self._undoname, 1)
                 u = vfs.reljoin(base, uname)
                 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
             undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
@@ -557,36 +558,36 @@
 
 
     def _abort(self):
-        self.count = 0
-        self.usages = 0
-        self.file.close()
+        self._count = 0
+        self._usages = 0
+        self._file.close()
         self._backupsfile.close()
 
         try:
-            if not self.entries and not self._backupentries:
+            if not self._entries and not self._backupentries:
                 if self._backupjournal:
-                    self.opener.unlink(self._backupjournal)
-                if self.journal:
-                    self.opener.unlink(self.journal)
+                    self._opener.unlink(self._backupjournal)
+                if self._journal:
+                    self._opener.unlink(self._journal)
                 return
 
-            self.report(_("transaction abort!\n"))
+            self._report(_("transaction abort!\n"))
 
             try:
                 for cat in sorted(self._abortcallback):
                     self._abortcallback[cat](self)
                 # Prevent double usage and help clear cycles.
                 self._abortcallback = None
-                _playback(self.journal, self.report, self.opener, self._vfsmap,
-                          self.entries, self._backupentries, False,
-                          checkambigfiles=self.checkambigfiles)
-                self.report(_("rollback completed\n"))
+                _playback(self._journal, self._report, self._opener,
+                          self._vfsmap, self._entries, self._backupentries,
+                          False, checkambigfiles=self._checkambigfiles)
+                self._report(_("rollback completed\n"))
             except BaseException:
-                self.report(_("rollback failed - please run hg recover\n"))
+                self._report(_("rollback failed - please run hg recover\n"))
         finally:
-            self.journal = None
-            self.releasefn(self, False) # notify failure of transaction
-            self.releasefn = None # Help prevent cycles.
+            self._journal = None
+            self._releasefn(self, False) # notify failure of transaction
+            self._releasefn = None # Help prevent cycles.
 
 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
     """Rolls back the transaction contained in the given file
--- a/mercurial/treediscovery.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/treediscovery.py	Mon Oct 22 14:46:06 2018 -0400
@@ -16,6 +16,7 @@
 )
 from . import (
     error,
+    pycompat,
 )
 
 def findcommonincoming(repo, remote, heads=None, force=False):
@@ -111,7 +112,7 @@
             progress.increment()
             repo.ui.debug("request %d: %s\n" %
                         (reqcnt, " ".join(map(short, r))))
-            for p in xrange(0, len(r), 10):
+            for p in pycompat.xrange(0, len(r), 10):
                 with remote.commandexecutor() as e:
                     branches = e.callcommand('branches', {
                         'nodes': r[p:p + 10],
--- a/mercurial/ui.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/ui.py	Mon Oct 22 14:46:06 2018 -0400
@@ -67,6 +67,9 @@
 update.check = noconflict
 # Show conflicts information in `hg status`
 status.verbose = True
+# Refuse to perform `hg resolve --mark` on files that still have conflict
+# markers
+resolve.mark-check = abort
 
 [diff]
 git = 1
@@ -394,7 +397,7 @@
     def readconfig(self, filename, root=None, trust=False,
                    sections=None, remap=None):
         try:
-            fp = open(filename, u'rb')
+            fp = open(filename, r'rb')
         except IOError:
             if not sections: # ignore unless we were looking for something
                 return
@@ -446,7 +449,7 @@
         if section in (None, 'paths'):
             # expand vars and ~
             # translate paths relative to root (or home) into absolute paths
-            root = root or pycompat.getcwd()
+            root = root or encoding.getcwd()
             for c in self._tcfg, self._ucfg, self._ocfg:
                 for n, p in c.items('paths'):
                     # Ignore sub-options.
@@ -1051,6 +1054,7 @@
             command in self.configlist('pager', 'ignore')
             or not self.configbool('ui', 'paginate')
             or not self.configbool('pager', 'attend-' + command, True)
+            or encoding.environ.get('TERM') == 'dumb'
             # TODO: if we want to allow HGPLAINEXCEPT=pager,
             # formatted() will need some adjustment.
             or not self.formatted()
@@ -1072,7 +1076,8 @@
             if name not in encoding.environ:
                 pagerenv[name] = value
 
-        self.debug('starting pager for command %r\n' % command)
+        self.debug('starting pager for command %s\n' %
+                   stringutil.pprint(command))
         self.flush()
 
         wasformatted = self.formatted()
@@ -1128,10 +1133,10 @@
 
         try:
             pager = subprocess.Popen(
-                command, shell=shell, bufsize=-1,
+                procutil.tonativestr(command), shell=shell, bufsize=-1,
                 close_fds=procutil.closefds, stdin=subprocess.PIPE,
                 stdout=procutil.stdout, stderr=procutil.stderr,
-                env=procutil.shellenviron(env))
+                env=procutil.tonativeenv(procutil.shellenviron(env)))
         except OSError as e:
             if e.errno == errno.ENOENT and not shell:
                 self.warn(_("missing pager command '%s', skipping pager\n")
@@ -1422,6 +1427,7 @@
                     return getpass.getpass('')
         except EOFError:
             raise error.ResponseExpected()
+
     def status(self, *msg, **opts):
         '''write status message to output (if ui.quiet is False)
 
@@ -1430,6 +1436,7 @@
         if not self.quiet:
             opts[r'label'] = opts.get(r'label', '') + ' ui.status'
             self.write(*msg, **opts)
+
     def warn(self, *msg, **opts):
         '''write warning message to output (stderr)
 
@@ -1437,6 +1444,15 @@
         '''
         opts[r'label'] = opts.get(r'label', '') + ' ui.warning'
         self.write_err(*msg, **opts)
+
+    def error(self, *msg, **opts):
+        '''write error message to output (stderr)
+
+        This adds an output label of "ui.error".
+        '''
+        opts[r'label'] = opts.get(r'label', '') + ' ui.error'
+        self.write_err(*msg, **opts)
+
     def note(self, *msg, **opts):
         '''write note to output (if ui.verbose is True)
 
@@ -1445,6 +1461,7 @@
         if self.verbose:
             opts[r'label'] = opts.get(r'label', '') + ' ui.note'
             self.write(*msg, **opts)
+
     def debug(self, *msg, **opts):
         '''write debug message to output (if ui.debugflag is True)
 
--- a/mercurial/unionrepo.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/unionrepo.py	Mon Oct 22 14:46:06 2018 -0400
@@ -19,13 +19,13 @@
 from . import (
     changelog,
     cmdutil,
+    encoding,
     error,
     filelog,
     localrepo,
     manifest,
     mdiff,
     pathutil,
-    pycompat,
     revlog,
     util,
     vfs as vfsmod,
@@ -73,7 +73,7 @@
             # I have no idea if csize is valid in the base revlog context.
             e = (flags, None, rsize, base,
                  link, self.rev(p1node), self.rev(p2node), node)
-            self.index.insert(-1, e)
+            self.index.append(e)
             self.nodemap[node] = n
             self.bundlerevs.add(n)
             n += 1
@@ -110,7 +110,7 @@
 
         if rev > self.repotiprev:
             text = self.revlog2.revision(node)
-            self._cache = (node, rev, text)
+            self._revisioncache = (node, rev, text)
         else:
             text = self.baserevision(rev)
             # already cached
@@ -192,28 +192,33 @@
     def canpush(self):
         return False
 
-class unionrepository(localrepo.localrepository):
-    def __init__(self, ui, path, path2):
-        localrepo.localrepository.__init__(self, ui, path)
+class unionrepository(object):
+    """Represents the union of data in 2 repositories.
+
+    Instances are not usable if constructed directly. Use ``instance()``
+    or ``makeunionrepository()`` to create a usable instance.
+    """
+    def __init__(self, repo2, url):
+        self.repo2 = repo2
+        self._url = url
+
         self.ui.setconfig('phases', 'publish', False, 'unionrepo')
 
-        self._url = 'union:%s+%s' % (util.expandpath(path),
-                                     util.expandpath(path2))
-        self.repo2 = localrepo.localrepository(ui, path2)
-
     @localrepo.unfilteredpropertycache
     def changelog(self):
         return unionchangelog(self.svfs, self.repo2.svfs)
 
+    @localrepo.unfilteredpropertycache
+    def manifestlog(self):
+        rootstore = unionmanifest(self.svfs, self.repo2.svfs,
+                                  self.unfiltered()._clrev)
+        return manifest.manifestlog(self.svfs, self, rootstore)
+
     def _clrev(self, rev2):
         """map from repo2 changelog rev to temporary rev in self.changelog"""
         node = self.repo2.changelog.node(rev2)
         return self.changelog.rev(node)
 
-    def _constructmanifest(self):
-        return unionmanifest(self.svfs, self.repo2.svfs,
-                             self.unfiltered()._clrev)
-
     def url(self):
         return self._url
 
@@ -231,21 +236,21 @@
         return unionpeer(self)
 
     def getcwd(self):
-        return pycompat.getcwd() # always outside the repo
+        return encoding.getcwd() # always outside the repo
 
-def instance(ui, path, create, intents=None):
+def instance(ui, path, create, intents=None, createopts=None):
     if create:
         raise error.Abort(_('cannot create new union repository'))
     parentpath = ui.config("bundle", "mainreporoot")
     if not parentpath:
         # try to find the correct path to the working directory repo
-        parentpath = cmdutil.findrepo(pycompat.getcwd())
+        parentpath = cmdutil.findrepo(encoding.getcwd())
         if parentpath is None:
             parentpath = ''
     if parentpath:
         # Try to make the full path relative so we get a nice, short URL.
         # In particular, we don't want temp dir names in test outputs.
-        cwd = pycompat.getcwd()
+        cwd = encoding.getcwd()
         if parentpath == cwd:
             parentpath = ''
         else:
@@ -260,4 +265,22 @@
             repopath, repopath2 = s
     else:
         repopath, repopath2 = parentpath, path
-    return unionrepository(ui, repopath, repopath2)
+
+    return makeunionrepository(ui, repopath, repopath2)
+
+def makeunionrepository(ui, repopath1, repopath2):
+    """Make a union repository object from 2 local repo paths."""
+    repo1 = localrepo.instance(ui, repopath1, create=False)
+    repo2 = localrepo.instance(ui, repopath2, create=False)
+
+    url = 'union:%s+%s' % (util.expandpath(repopath1),
+                           util.expandpath(repopath2))
+
+    class derivedunionrepository(unionrepository, repo1.__class__):
+        pass
+
+    repo = repo1
+    repo.__class__ = derivedunionrepository
+    unionrepository.__init__(repo1, repo2, url)
+
+    return repo
--- a/mercurial/upgrade.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/upgrade.py	Mon Oct 22 14:46:06 2018 -0400
@@ -198,8 +198,9 @@
     _requirement = None
 
     @staticmethod
-    def _newreporequirements(repo):
-        return localrepo.newreporequirements(repo)
+    def _newreporequirements(ui):
+        return localrepo.newreporequirements(
+            ui, localrepo.defaultcreateopts(ui))
 
     @classmethod
     def fromrepo(cls, repo):
@@ -209,7 +210,7 @@
     @classmethod
     def fromconfig(cls, repo):
         assert cls._requirement is not None
-        return cls._requirement in cls._newreporequirements(repo)
+        return cls._requirement in cls._newreporequirements(repo.ui)
 
 @registerformatvariant
 class fncache(requirementformatvariant):
@@ -450,7 +451,7 @@
         return changelog.changelog(repo.svfs)
     elif path.endswith('00manifest.i'):
         mandir = path[:-len('00manifest.i')]
-        return manifest.manifestrevlog(repo.svfs, dir=mandir)
+        return manifest.manifestrevlog(repo.svfs, tree=mandir)
     else:
         #reverse of "/".join(("data", path + ".i"))
         return filelog.filelog(repo.svfs, path[5:-2])
@@ -483,15 +484,13 @@
             continue
 
         rl = _revlogfrompath(srcrepo, unencoded)
-        revcount += len(rl)
+
+        info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
+                              trackedsize=True, storedsize=True)
 
-        datasize = 0
-        rawsize = 0
-        idx = rl.index
-        for rev in rl:
-            e = idx[rev]
-            datasize += e[1]
-            rawsize += e[2]
+        revcount += info['revisionscount'] or 0
+        datasize = info['storedsize'] or 0
+        rawsize = info['trackedsize'] or 0
 
         srcsize += datasize
         srcrawsize += rawsize
@@ -581,10 +580,8 @@
                     deltareuse=deltareuse,
                     deltabothparents=deltabothparents)
 
-        datasize = 0
-        idx = newrl.index
-        for rev in newrl:
-            datasize += idx[rev][1]
+        info = newrl.storageinfo(storedsize=True)
+        datasize = info['storedsize'] or 0
 
         dstsize += datasize
 
@@ -751,7 +748,8 @@
 
     # FUTURE there is potentially a need to control the wanted requirements via
     # command arguments or via an extension hook point.
-    newreqs = localrepo.newreporequirements(repo)
+    newreqs = localrepo.newreporequirements(
+        repo.ui, localrepo.defaultcreateopts(repo.ui))
     newreqs.update(preservedrequirements(repo))
 
     noremovereqs = (repo.requirements - newreqs -
--- a/mercurial/url.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/url.py	Mon Oct 22 14:46:06 2018 -0400
@@ -317,8 +317,8 @@
 
 class logginghttphandler(httphandler):
     """HTTP handler that logs socket I/O."""
-    def __init__(self, logfh, name, observeropts):
-        super(logginghttphandler, self).__init__()
+    def __init__(self, logfh, name, observeropts, timeout=None):
+        super(logginghttphandler, self).__init__(timeout=timeout)
 
         self._logfh = logfh
         self._logname = name
@@ -339,7 +339,7 @@
         return logginghttpconnection(createconnection, *args, **kwargs)
 
 if has_https:
-    class httpsconnection(httplib.HTTPConnection):
+    class httpsconnection(keepalive.HTTPConnection):
         response_class = keepalive.HTTPResponse
         default_port = httplib.HTTPS_PORT
         # must be able to send big bundle as stream.
@@ -348,7 +348,7 @@
 
         def __init__(self, host, port=None, key_file=None, cert_file=None,
                      *args, **kwargs):
-            httplib.HTTPConnection.__init__(self, host, port, *args, **kwargs)
+            keepalive.HTTPConnection.__init__(self, host, port, *args, **kwargs)
             self.key_file = key_file
             self.cert_file = cert_file
 
@@ -365,8 +365,8 @@
             sslutil.validatesocket(self.sock)
 
     class httpshandler(keepalive.KeepAliveHandler, urlreq.httpshandler):
-        def __init__(self, ui):
-            keepalive.KeepAliveHandler.__init__(self)
+        def __init__(self, ui, timeout=None):
+            keepalive.KeepAliveHandler.__init__(self, timeout=timeout)
             urlreq.httpshandler.__init__(self)
             self.ui = ui
             self.pwmgr = passwordmgr(self.ui,
@@ -525,18 +525,19 @@
     ``sendaccept`` allows controlling whether the ``Accept`` request header
     is sent. The header is sent by default.
     '''
+    timeout = ui.configwith(float, 'http', 'timeout')
     handlers = []
 
     if loggingfh:
         handlers.append(logginghttphandler(loggingfh, loggingname,
-                                           loggingopts or {}))
+                                           loggingopts or {}, timeout=timeout))
         # We don't yet support HTTPS when logging I/O. If we attempt to open
         # an HTTPS URL, we'll likely fail due to unknown protocol.
 
     else:
-        handlers.append(httphandler())
+        handlers.append(httphandler(timeout=timeout))
         if has_https:
-            handlers.append(httpshandler(ui))
+            handlers.append(httpshandler(ui, timeout=timeout))
 
     handlers.append(proxyhandler(ui))
 
@@ -555,6 +556,11 @@
     handlers.append(cookiehandler(ui))
     opener = urlreq.buildopener(*handlers)
 
+    # keepalive.py's handlers will populate these attributes if they exist.
+    opener.requestscount = 0
+    opener.sentbytescount = 0
+    opener.receivedbytescount = 0
+
     # The user agent should should *NOT* be used by servers for e.g.
     # protocol detection or feature negotiation: there are other
     # facilities for that.
@@ -595,3 +601,39 @@
         url_ = 'file://' + pycompat.bytesurl(urlreq.pathname2url(path))
         authinfo = None
     return opener(ui, authinfo).open(pycompat.strurl(url_), data)
+
+def wrapresponse(resp):
+    """Wrap a response object with common error handlers.
+
+    This ensures that any I/O from any consumer raises the appropriate
+    error and messaging.
+    """
+    origread = resp.read
+
+    class readerproxy(resp.__class__):
+        def read(self, size=None):
+            try:
+                return origread(size)
+            except httplib.IncompleteRead as e:
+                # e.expected is an integer if length known or None otherwise.
+                if e.expected:
+                    got = len(e.partial)
+                    total = e.expected + got
+                    msg = _('HTTP request error (incomplete response; '
+                            'expected %d bytes got %d)') % (total, got)
+                else:
+                    msg = _('HTTP request error (incomplete response)')
+
+                raise error.PeerTransportError(
+                    msg,
+                    hint=_('this may be an intermittent network failure; '
+                           'if the error persists, consider contacting the '
+                           'network or server operator'))
+            except httplib.HTTPException as e:
+                raise error.PeerTransportError(
+                    _('HTTP request error (%s)') % e,
+                    hint=_('this may be an intermittent network failure; '
+                           'if the error persists, consider contacting the '
+                           'network or server operator'))
+
+    resp.__class__ = readerproxy
--- a/mercurial/urllibcompat.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/urllibcompat.py	Mon Oct 22 14:46:06 2018 -0400
@@ -92,6 +92,10 @@
     # (if necessary), and returns str. This is wonky. We provide a custom
     # implementation that only accepts bytes and emits bytes.
     def quote(s, safe=r'/'):
+        # bytestr has an __iter__ that emits characters. quote_from_bytes()
+        # does an iteration and expects ints. We coerce to bytes to appease it.
+        if isinstance(s, pycompat.bytestr):
+            s = bytes(s)
         s = urllib.parse.quote_from_bytes(s, safe=safe)
         return s.encode('ascii', 'strict')
 
--- a/mercurial/util.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/util.py	Mon Oct 22 14:46:06 2018 -0400
@@ -36,6 +36,10 @@
 import warnings
 import zlib
 
+from .thirdparty import (
+    attr,
+)
+from hgdemandimport import tracing
 from . import (
     encoding,
     error,
@@ -108,6 +112,7 @@
 pconvert = platform.pconvert
 poll = platform.poll
 posixfile = platform.posixfile
+readlink = platform.readlink
 rename = platform.rename
 removedirs = platform.removedirs
 samedevice = platform.samedevice
@@ -328,7 +333,7 @@
         return self._frombuffer(min(self._lenbuf, size))
 
     def readline(self, *args, **kwargs):
-        if 1 < len(self._buffer):
+        if len(self._buffer) > 1:
             # this should not happen because both read and readline end with a
             # _frombuffer call that collapse it.
             self._buffer = [''.join(self._buffer)]
@@ -343,7 +348,7 @@
         size = lfi + 1
         if lfi < 0: # end of file
             size = self._lenbuf
-        elif 1 < len(self._buffer):
+        elif len(self._buffer) > 1:
             # we need to take previous chunks into account
             size += self._lenbuf - len(self._buffer[-1])
         return self._frombuffer(size)
@@ -355,7 +360,7 @@
         if size == 0 or not self._buffer:
             return ''
         buf = self._buffer[0]
-        if 1 < len(self._buffer):
+        if len(self._buffer) > 1:
             buf = ''.join(self._buffer)
 
         data = buf[:size]
@@ -945,12 +950,12 @@
 
         self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
 
-    def setsockopt(self, level, optname, value):
+    def setsockopt(self, res, level, optname, value):
         if not self.states:
             return
 
         self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
-            self.name, level, optname, value))
+            self.name, level, optname, value, res))
 
 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
                       logdata=False, logdataapis=True):
@@ -1205,7 +1210,7 @@
     Holds a reference to nodes on either side as well as a key-value
     pair for the dictionary entry.
     """
-    __slots__ = (u'next', u'prev', u'key', u'value')
+    __slots__ = (u'next', u'prev', u'key', u'value', u'cost')
 
     def __init__(self):
         self.next = None
@@ -1213,10 +1218,13 @@
 
         self.key = _notset
         self.value = None
+        self.cost = 0
 
     def markempty(self):
         """Mark the node as emptied."""
         self.key = _notset
+        self.value = None
+        self.cost = 0
 
 class lrucachedict(object):
     """Dict that caches most recent accesses and sets.
@@ -1229,15 +1237,27 @@
     we recycle head.prev and make it the new head. Cache accesses result in
     the node being moved to before the existing head and being marked as the
     new head node.
+
+    Items in the cache can be inserted with an optional "cost" value. This is
+    simply an integer that is specified by the caller. The cache can be queried
+    for the total cost of all items presently in the cache.
+
+    The cache can also define a maximum cost. If a cache insertion would
+    cause the total cost of the cache to go beyond the maximum cost limit,
+    nodes will be evicted to make room for the new code. This can be used
+    to e.g. set a max memory limit and associate an estimated bytes size
+    cost to each item in the cache. By default, no maximum cost is enforced.
     """
-    def __init__(self, max):
+    def __init__(self, max, maxcost=0):
         self._cache = {}
 
         self._head = head = _lrucachenode()
         head.prev = head
         head.next = head
         self._size = 1
-        self._capacity = max
+        self.capacity = max
+        self.totalcost = 0
+        self.maxcost = maxcost
 
     def __len__(self):
         return len(self._cache)
@@ -1257,15 +1277,23 @@
         self._movetohead(node)
         return node.value
 
-    def __setitem__(self, k, v):
+    def insert(self, k, v, cost=0):
+        """Insert a new item in the cache with optional cost value."""
         node = self._cache.get(k)
         # Replace existing value and mark as newest.
         if node is not None:
+            self.totalcost -= node.cost
             node.value = v
+            node.cost = cost
+            self.totalcost += cost
             self._movetohead(node)
+
+            if self.maxcost:
+                self._enforcecostlimit()
+
             return
 
-        if self._size < self._capacity:
+        if self._size < self.capacity:
             node = self._addcapacity()
         else:
             # Grab the last/oldest item.
@@ -1273,17 +1301,27 @@
 
         # At capacity. Kill the old entry.
         if node.key is not _notset:
+            self.totalcost -= node.cost
             del self._cache[node.key]
 
         node.key = k
         node.value = v
+        node.cost = cost
+        self.totalcost += cost
         self._cache[k] = node
         # And mark it as newest entry. No need to adjust order since it
         # is already self._head.prev.
         self._head = node
 
+        if self.maxcost:
+            self._enforcecostlimit()
+
+    def __setitem__(self, k, v):
+        self.insert(k, v)
+
     def __delitem__(self, k):
         node = self._cache.pop(k)
+        self.totalcost -= node.cost
         node.markempty()
 
         # Temporarily mark as newest item before re-adjusting head to make
@@ -1295,27 +1333,73 @@
 
     def get(self, k, default=None):
         try:
-            return self._cache[k].value
+            return self.__getitem__(k)
         except KeyError:
             return default
 
     def clear(self):
         n = self._head
         while n.key is not _notset:
+            self.totalcost -= n.cost
             n.markempty()
             n = n.next
 
         self._cache.clear()
 
-    def copy(self):
-        result = lrucachedict(self._capacity)
+    def copy(self, capacity=None, maxcost=0):
+        """Create a new cache as a copy of the current one.
+
+        By default, the new cache has the same capacity as the existing one.
+        But, the cache capacity can be changed as part of performing the
+        copy.
+
+        Items in the copy have an insertion/access order matching this
+        instance.
+        """
+
+        capacity = capacity or self.capacity
+        maxcost = maxcost or self.maxcost
+        result = lrucachedict(capacity, maxcost=maxcost)
+
+        # We copy entries by iterating in oldest-to-newest order so the copy
+        # has the correct ordering.
+
+        # Find the first non-empty entry.
         n = self._head.prev
-        # Iterate in oldest-to-newest order, so the copy has the right ordering
+        while n.key is _notset and n is not self._head:
+            n = n.prev
+
+        # We could potentially skip the first N items when decreasing capacity.
+        # But let's keep it simple unless it is a performance problem.
         for i in range(len(self._cache)):
-            result[n.key] = n.value
+            result.insert(n.key, n.value, cost=n.cost)
             n = n.prev
+
         return result
 
+    def popoldest(self):
+        """Remove the oldest item from the cache.
+
+        Returns the (key, value) describing the removed cache entry.
+        """
+        if not self._cache:
+            return
+
+        # Walk the linked list backwards starting at tail node until we hit
+        # a non-empty node.
+        n = self._head.prev
+        while n.key is _notset:
+            n = n.prev
+
+        key, value = n.key, n.value
+
+        # And remove it from the cache and mark it as empty.
+        del self._cache[n.key]
+        self.totalcost -= n.cost
+        n.markempty()
+
+        return key, value
+
     def _movetohead(self, node):
         """Mark a node as the newest, making it the new head.
 
@@ -1377,6 +1461,38 @@
         self._size += 1
         return node
 
+    def _enforcecostlimit(self):
+        # This should run after an insertion. It should only be called if total
+        # cost limits are being enforced.
+        # The most recently inserted node is never evicted.
+        if len(self) <= 1 or self.totalcost <= self.maxcost:
+            return
+
+        # This is logically equivalent to calling popoldest() until we
+        # free up enough cost. We don't do that since popoldest() needs
+        # to walk the linked list and doing this in a loop would be
+        # quadratic. So we find the first non-empty node and then
+        # walk nodes until we free up enough capacity.
+        #
+        # If we only removed the minimum number of nodes to free enough
+        # cost at insert time, chances are high that the next insert would
+        # also require pruning. This would effectively constitute quadratic
+        # behavior for insert-heavy workloads. To mitigate this, we set a
+        # target cost that is a percentage of the max cost. This will tend
+        # to free more nodes when the high water mark is reached, which
+        # lowers the chances of needing to prune on the subsequent insert.
+        targetcost = int(self.maxcost * 0.75)
+
+        n = self._head.prev
+        while n.key is _notset:
+            n = n.prev
+
+        while len(self) > 1 and self.totalcost > targetcost:
+            del self._cache[n.key]
+            self.totalcost -= n.cost
+            n.markempty()
+            n = n.prev
+
 def lrucachefunc(func):
     '''cache most recent results of function calls'''
     cache = {}
@@ -1726,16 +1842,14 @@
 
 def readlock(pathname):
     try:
-        return os.readlink(pathname)
+        return readlink(pathname)
     except OSError as why:
         if why.errno not in (errno.EINVAL, errno.ENOSYS):
             raise
     except AttributeError: # no symlink in os
         pass
-    fp = posixfile(pathname, 'rb')
-    r = fp.read()
-    fp.close()
-    return r
+    with posixfile(pathname, 'rb') as fp:
+        return fp.read()
 
 def fstat(fp):
     '''stat file object that may not have fileno method.'''
@@ -2874,7 +2988,44 @@
     (1, 0.000000001, _('%.3f ns')),
     )
 
-_timenesting = [0]
+@attr.s
+class timedcmstats(object):
+    """Stats information produced by the timedcm context manager on entering."""
+
+    # the starting value of the timer as a float (meaning and resulution is
+    # platform dependent, see util.timer)
+    start = attr.ib(default=attr.Factory(lambda: timer()))
+    # the number of seconds as a floating point value; starts at 0, updated when
+    # the context is exited.
+    elapsed = attr.ib(default=0)
+    # the number of nested timedcm context managers.
+    level = attr.ib(default=1)
+
+    def __bytes__(self):
+        return timecount(self.elapsed) if self.elapsed else '<unknown>'
+
+    __str__ = encoding.strmethod(__bytes__)
+
+@contextlib.contextmanager
+def timedcm(whencefmt, *whenceargs):
+    """A context manager that produces timing information for a given context.
+
+    On entering a timedcmstats instance is produced.
+
+    This context manager is reentrant.
+
+    """
+    # track nested context managers
+    timedcm._nested += 1
+    timing_stats = timedcmstats(level=timedcm._nested)
+    try:
+        with tracing.log(whencefmt, *whenceargs):
+            yield timing_stats
+    finally:
+        timing_stats.elapsed = timer() - timing_stats.start
+        timedcm._nested -= 1
+
+timedcm._nested = 0
 
 def timed(func):
     '''Report the execution time of a function call to stderr.
@@ -2888,18 +3039,13 @@
     '''
 
     def wrapper(*args, **kwargs):
-        start = timer()
-        indent = 2
-        _timenesting[0] += indent
-        try:
-            return func(*args, **kwargs)
-        finally:
-            elapsed = timer() - start
-            _timenesting[0] -= indent
-            stderr = procutil.stderr
-            stderr.write('%s%s: %s\n' %
-                         (' ' * _timenesting[0], func.__name__,
-                          timecount(elapsed)))
+        with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
+            result = func(*args, **kwargs)
+        stderr = procutil.stderr
+        stderr.write('%s%s: %s\n' % (
+            ' ' * time_stats.level * 2, pycompat.bytestr(func.__name__),
+            time_stats))
+        return result
     return wrapper
 
 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
@@ -3301,7 +3447,7 @@
         The object has a ``decompress(data)`` method that decompresses
         data. The method will only be called if ``data`` begins with
         ``revlogheader()``. The method should return the raw, uncompressed
-        data or raise a ``RevlogError``.
+        data or raise a ``StorageError``.
 
         The object is reusable but is not thread safe.
         """
@@ -3343,6 +3489,9 @@
                 return ''.join(buf)
             chunk = self._reader(65536)
             self._decompress(chunk)
+            if not chunk and not self._pending and not self._eof:
+                # No progress and no new data, bail out
+                return ''.join(buf)
 
 class _GzipCompressedStreamReader(_CompressedStreamReader):
     def __init__(self, fh):
@@ -3476,8 +3625,8 @@
             try:
                 return zlib.decompress(data)
             except zlib.error as e:
-                raise error.RevlogError(_('revlog decompress error: %s') %
-                                        stringutil.forcebytestr(e))
+                raise error.StorageError(_('revlog decompress error: %s') %
+                                         stringutil.forcebytestr(e))
 
     def revlogcompressor(self, opts=None):
         return self.zlibrevlogcompressor()
@@ -3688,8 +3837,8 @@
 
                 return ''.join(chunks)
             except Exception as e:
-                raise error.RevlogError(_('revlog decompress error: %s') %
-                                        stringutil.forcebytestr(e))
+                raise error.StorageError(_('revlog decompress error: %s') %
+                                         stringutil.forcebytestr(e))
 
     def revlogcompressor(self, opts=None):
         opts = opts or {}
@@ -3718,11 +3867,10 @@
         if not bt or not bt[0]:
             continue
 
-        doc = pycompat.sysstr('``%s``\n    %s') % (
-            bt[0], engine.bundletype.__doc__)
+        doc = b'``%s``\n    %s' % (bt[0], pycompat.getdoc(engine.bundletype))
 
         value = docobject()
-        value.__doc__ = doc
+        value.__doc__ = pycompat.sysstr(doc)
         value._origdoc = engine.bundletype.__doc__
         value._origfunc = engine.bundletype
 
--- a/mercurial/utils/cborutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/utils/cborutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -8,10 +8,9 @@
 from __future__ import absolute_import
 
 import struct
+import sys
 
-from ..thirdparty.cbor.cbor2 import (
-    decoder as decodermod,
-)
+from .. import pycompat
 
 # Very short very of RFC 7049...
 #
@@ -35,11 +34,16 @@
 
 SUBTYPE_MASK = 0b00011111
 
+SUBTYPE_FALSE = 20
+SUBTYPE_TRUE = 21
+SUBTYPE_NULL = 22
 SUBTYPE_HALF_FLOAT = 25
 SUBTYPE_SINGLE_FLOAT = 26
 SUBTYPE_DOUBLE_FLOAT = 27
 SUBTYPE_INDEFINITE = 31
 
+SEMANTIC_TAG_FINITE_SET = 258
+
 # Indefinite types begin with their major type ORd with information value 31.
 BEGIN_INDEFINITE_BYTESTRING = struct.pack(
     r'>B', MAJOR_TYPE_BYTESTRING << 5 | SUBTYPE_INDEFINITE)
@@ -146,7 +150,7 @@
 def streamencodeset(s):
     # https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml defines
     # semantic tag 258 for finite sets.
-    yield encodelength(MAJOR_TYPE_SEMANTIC, 258)
+    yield encodelength(MAJOR_TYPE_SEMANTIC, SEMANTIC_TAG_FINITE_SET)
 
     for chunk in streamencodearray(sorted(s, key=_mixedtypesortkey)):
         yield chunk
@@ -188,6 +192,7 @@
 STREAM_ENCODERS = {
     bytes: streamencodebytestring,
     int: streamencodeint,
+    pycompat.long: streamencodeint,
     list: streamencodearray,
     tuple: streamencodearray,
     dict: streamencodemap,
@@ -213,50 +218,778 @@
 
     return fn(v)
 
-def readindefinitebytestringtoiter(fh, expectheader=True):
-    """Read an indefinite bytestring to a generator.
+class CBORDecodeError(Exception):
+    """Represents an error decoding CBOR."""
+
+if sys.version_info.major >= 3:
+    def _elementtointeger(b, i):
+        return b[i]
+else:
+    def _elementtointeger(b, i):
+        return ord(b[i])
+
+STRUCT_BIG_UBYTE = struct.Struct(r'>B')
+STRUCT_BIG_USHORT = struct.Struct('>H')
+STRUCT_BIG_ULONG = struct.Struct('>L')
+STRUCT_BIG_ULONGLONG = struct.Struct('>Q')
+
+SPECIAL_NONE = 0
+SPECIAL_START_INDEFINITE_BYTESTRING = 1
+SPECIAL_START_ARRAY = 2
+SPECIAL_START_MAP = 3
+SPECIAL_START_SET = 4
+SPECIAL_INDEFINITE_BREAK = 5
+
+def decodeitem(b, offset=0):
+    """Decode a new CBOR value from a buffer at offset.
+
+    This function attempts to decode up to one complete CBOR value
+    from ``b`` starting at offset ``offset``.
+
+    The beginning of a collection (such as an array, map, set, or
+    indefinite length bytestring) counts as a single value. For these
+    special cases, a state flag will indicate that a special value was seen.
+
+    When called, the function either returns a decoded value or gives
+    a hint as to how many more bytes are needed to do so. By calling
+    the function repeatedly given a stream of bytes, the caller can
+    build up the original values.
+
+    Returns a tuple with the following elements:
+
+    * Bool indicating whether a complete value was decoded.
+    * A decoded value if first value is True otherwise None
+    * Integer number of bytes. If positive, the number of bytes
+      read. If negative, the number of bytes we need to read to
+      decode this value or the next chunk in this value.
+    * One of the ``SPECIAL_*`` constants indicating special treatment
+      for this value. ``SPECIAL_NONE`` means this is a fully decoded
+      simple value (such as an integer or bool).
+    """
+
+    initial = _elementtointeger(b, offset)
+    offset += 1
+
+    majortype = initial >> 5
+    subtype = initial & SUBTYPE_MASK
+
+    if majortype == MAJOR_TYPE_UINT:
+        complete, value, readcount = decodeuint(subtype, b, offset)
+
+        if complete:
+            return True, value, readcount + 1, SPECIAL_NONE
+        else:
+            return False, None, readcount, SPECIAL_NONE
+
+    elif majortype == MAJOR_TYPE_NEGINT:
+        # Negative integers are the same as UINT except inverted minus 1.
+        complete, value, readcount = decodeuint(subtype, b, offset)
+
+        if complete:
+            return True, -value - 1, readcount + 1, SPECIAL_NONE
+        else:
+            return False, None, readcount, SPECIAL_NONE
+
+    elif majortype == MAJOR_TYPE_BYTESTRING:
+        # Beginning of bytestrings are treated as uints in order to
+        # decode their length, which may be indefinite.
+        complete, size, readcount = decodeuint(subtype, b, offset,
+                                               allowindefinite=True)
+
+        # We don't know the size of the bytestring. It must be a definitive
+        # length since the indefinite subtype would be encoded in the initial
+        # byte.
+        if not complete:
+            return False, None, readcount, SPECIAL_NONE
+
+        # We know the length of the bytestring.
+        if size is not None:
+            # And the data is available in the buffer.
+            if offset + readcount + size <= len(b):
+                value = b[offset + readcount:offset + readcount + size]
+                return True, value, readcount + size + 1, SPECIAL_NONE
 
-    Receives an object with a ``read(X)`` method to read N bytes.
+            # And we need more data in order to return the bytestring.
+            else:
+                wanted = len(b) - offset - readcount - size
+                return False, None, wanted, SPECIAL_NONE
+
+        # It is an indefinite length bytestring.
+        else:
+            return True, None, 1, SPECIAL_START_INDEFINITE_BYTESTRING
+
+    elif majortype == MAJOR_TYPE_STRING:
+        raise CBORDecodeError('string major type not supported')
+
+    elif majortype == MAJOR_TYPE_ARRAY:
+        # Beginning of arrays are treated as uints in order to decode their
+        # length. We don't allow indefinite length arrays.
+        complete, size, readcount = decodeuint(subtype, b, offset)
+
+        if complete:
+            return True, size, readcount + 1, SPECIAL_START_ARRAY
+        else:
+            return False, None, readcount, SPECIAL_NONE
+
+    elif majortype == MAJOR_TYPE_MAP:
+        # Beginning of maps are treated as uints in order to decode their
+        # number of elements. We don't allow indefinite length arrays.
+        complete, size, readcount = decodeuint(subtype, b, offset)
+
+        if complete:
+            return True, size, readcount + 1, SPECIAL_START_MAP
+        else:
+            return False, None, readcount, SPECIAL_NONE
+
+    elif majortype == MAJOR_TYPE_SEMANTIC:
+        # Semantic tag value is read the same as a uint.
+        complete, tagvalue, readcount = decodeuint(subtype, b, offset)
+
+        if not complete:
+            return False, None, readcount, SPECIAL_NONE
+
+        # This behavior here is a little wonky. The main type being "decorated"
+        # by this semantic tag follows. A more robust parser would probably emit
+        # a special flag indicating this as a semantic tag and let the caller
+        # deal with the types that follow. But since we don't support many
+        # semantic tags, it is easier to deal with the special cases here and
+        # hide complexity from the caller. If we add support for more semantic
+        # tags, we should probably move semantic tag handling into the caller.
+        if tagvalue == SEMANTIC_TAG_FINITE_SET:
+            if offset + readcount >= len(b):
+                return False, None, -1, SPECIAL_NONE
 
-    If ``expectheader`` is True, it is expected that the first byte read
-    will represent an indefinite length bytestring. Otherwise, we
-    expect the first byte to be part of the first bytestring chunk.
+            complete, size, readcount2, special = decodeitem(b,
+                                                             offset + readcount)
+
+            if not complete:
+                return False, None, readcount2, SPECIAL_NONE
+
+            if special != SPECIAL_START_ARRAY:
+                raise CBORDecodeError('expected array after finite set '
+                                      'semantic tag')
+
+            return True, size, readcount + readcount2 + 1, SPECIAL_START_SET
+
+        else:
+            raise CBORDecodeError('semantic tag %d not allowed' % tagvalue)
+
+    elif majortype == MAJOR_TYPE_SPECIAL:
+        # Only specific values for the information field are allowed.
+        if subtype == SUBTYPE_FALSE:
+            return True, False, 1, SPECIAL_NONE
+        elif subtype == SUBTYPE_TRUE:
+            return True, True, 1, SPECIAL_NONE
+        elif subtype == SUBTYPE_NULL:
+            return True, None, 1, SPECIAL_NONE
+        elif subtype == SUBTYPE_INDEFINITE:
+            return True, None, 1, SPECIAL_INDEFINITE_BREAK
+        # If value is 24, subtype is in next byte.
+        else:
+            raise CBORDecodeError('special type %d not allowed' % subtype)
+    else:
+        assert False
+
+def decodeuint(subtype, b, offset=0, allowindefinite=False):
+    """Decode an unsigned integer.
+
+    ``subtype`` is the lower 5 bits from the initial byte CBOR item
+    "header." ``b`` is a buffer containing bytes. ``offset`` points to
+    the index of the first byte after the byte that ``subtype`` was
+    derived from.
+
+    ``allowindefinite`` allows the special indefinite length value
+    indicator.
+
+    Returns a 3-tuple of (successful, value, count).
+
+    The first element is a bool indicating if decoding completed. The 2nd
+    is the decoded integer value or None if not fully decoded or the subtype
+    is 31 and ``allowindefinite`` is True. The 3rd value is the count of bytes.
+    If positive, it is the number of additional bytes decoded. If negative,
+    it is the number of additional bytes needed to decode this value.
     """
-    read = fh.read
-    decodeuint = decodermod.decode_uint
-    byteasinteger = decodermod.byte_as_integer
+
+    # Small values are inline.
+    if subtype < 24:
+        return True, subtype, 0
+    # Indefinite length specifier.
+    elif subtype == 31:
+        if allowindefinite:
+            return True, None, 0
+        else:
+            raise CBORDecodeError('indefinite length uint not allowed here')
+    elif subtype >= 28:
+        raise CBORDecodeError('unsupported subtype on integer type: %d' %
+                              subtype)
+
+    if subtype == 24:
+        s = STRUCT_BIG_UBYTE
+    elif subtype == 25:
+        s = STRUCT_BIG_USHORT
+    elif subtype == 26:
+        s = STRUCT_BIG_ULONG
+    elif subtype == 27:
+        s = STRUCT_BIG_ULONGLONG
+    else:
+        raise CBORDecodeError('bounds condition checking violation')
+
+    if len(b) - offset >= s.size:
+        return True, s.unpack_from(b, offset)[0], s.size
+    else:
+        return False, None, len(b) - offset - s.size
+
+class bytestringchunk(bytes):
+    """Represents a chunk/segment in an indefinite length bytestring.
+
+    This behaves like a ``bytes`` but in addition has the ``isfirst``
+    and ``islast`` attributes indicating whether this chunk is the first
+    or last in an indefinite length bytestring.
+    """
+
+    def __new__(cls, v, first=False, last=False):
+        self = bytes.__new__(cls, v)
+        self.isfirst = first
+        self.islast = last
+
+        return self
+
+class sansiodecoder(object):
+    """A CBOR decoder that doesn't perform its own I/O.
 
-    if expectheader:
-        initial = decodermod.byte_as_integer(read(1))
+    To use, construct an instance and feed it segments containing
+    CBOR-encoded bytes via ``decode()``. The return value from ``decode()``
+    indicates whether a fully-decoded value is available, how many bytes
+    were consumed, and offers a hint as to how many bytes should be fed
+    in next time to decode the next value.
+
+    The decoder assumes it will decode N discrete CBOR values, not just
+    a single value. i.e. if the bytestream contains uints packed one after
+    the other, the decoder will decode them all, rather than just the initial
+    one.
+
+    When ``decode()`` indicates a value is available, call ``getavailable()``
+    to return all fully decoded values.
+
+    ``decode()`` can partially decode input. It is up to the caller to keep
+    track of what data was consumed and to pass unconsumed data in on the
+    next invocation.
+
+    The decoder decodes atomically at the *item* level. See ``decodeitem()``.
+    If an *item* cannot be fully decoded, the decoder won't record it as
+    partially consumed. Instead, the caller will be instructed to pass in
+    the initial bytes of this item on the next invocation. This does result
+    in some redundant parsing. But the overhead should be minimal.
+
+    This decoder only supports a subset of CBOR as required by Mercurial.
+    It lacks support for:
+
+    * Indefinite length arrays
+    * Indefinite length maps
+    * Use of indefinite length bytestrings as keys or values within
+      arrays, maps, or sets.
+    * Nested arrays, maps, or sets within sets
+    * Any semantic tag that isn't a mathematical finite set
+    * Floating point numbers
+    * Undefined special value
+
+    CBOR types are decoded to Python types as follows:
+
+    uint -> int
+    negint -> int
+    bytestring -> bytes
+    map -> dict
+    array -> list
+    True -> bool
+    False -> bool
+    null -> None
+    indefinite length bytestring chunk -> [bytestringchunk]
 
-        majortype = initial >> 5
-        subtype = initial & SUBTYPE_MASK
+    The only non-obvious mapping here is an indefinite length bytestring
+    to the ``bytestringchunk`` type. This is to facilitate streaming
+    indefinite length bytestrings out of the decoder and to differentiate
+    a regular bytestring from an indefinite length bytestring.
+    """
+
+    _STATE_NONE = 0
+    _STATE_WANT_MAP_KEY = 1
+    _STATE_WANT_MAP_VALUE = 2
+    _STATE_WANT_ARRAY_VALUE = 3
+    _STATE_WANT_SET_VALUE = 4
+    _STATE_WANT_BYTESTRING_CHUNK_FIRST = 5
+    _STATE_WANT_BYTESTRING_CHUNK_SUBSEQUENT = 6
+
+    def __init__(self):
+        # TODO add support for limiting size of bytestrings
+        # TODO add support for limiting number of keys / values in collections
+        # TODO add support for limiting size of buffered partial values
+
+        self.decodedbytecount = 0
+
+        self._state = self._STATE_NONE
+
+        # Stack of active nested collections. Each entry is a dict describing
+        # the collection.
+        self._collectionstack = []
+
+        # Fully decoded key to use for the current map.
+        self._currentmapkey = None
+
+        # Fully decoded values available for retrieval.
+        self._decodedvalues = []
+
+    @property
+    def inprogress(self):
+        """Whether the decoder has partially decoded a value."""
+        return self._state != self._STATE_NONE
+
+    def decode(self, b, offset=0):
+        """Attempt to decode bytes from an input buffer.
+
+        ``b`` is a collection of bytes and ``offset`` is the byte
+        offset within that buffer from which to begin reading data.
+
+        ``b`` must support ``len()`` and accessing bytes slices via
+        ``__slice__``. Typically ``bytes`` instances are used.
+
+        Returns a tuple with the following fields:
 
-        if majortype != MAJOR_TYPE_BYTESTRING:
-            raise decodermod.CBORDecodeError(
-                'expected major type %d; got %d' % (MAJOR_TYPE_BYTESTRING,
-                                                    majortype))
+        * Bool indicating whether values are available for retrieval.
+        * Integer indicating the number of bytes that were fully consumed,
+          starting from ``offset``.
+        * Integer indicating the number of bytes that are desired for the
+          next call in order to decode an item.
+        """
+        if not b:
+            return bool(self._decodedvalues), 0, 0
+
+        initialoffset = offset
+
+        # We could easily split the body of this loop into a function. But
+        # Python performance is sensitive to function calls and collections
+        # are composed of many items. So leaving as a while loop could help
+        # with performance. One thing that may not help is the use of
+        # if..elif versus a lookup/dispatch table. There may be value
+        # in switching that.
+        while offset < len(b):
+            # Attempt to decode an item. This could be a whole value or a
+            # special value indicating an event, such as start or end of a
+            # collection or indefinite length type.
+            complete, value, readcount, special = decodeitem(b, offset)
+
+            if readcount > 0:
+                self.decodedbytecount += readcount
+
+            if not complete:
+                assert readcount < 0
+                return (
+                    bool(self._decodedvalues),
+                    offset - initialoffset,
+                    -readcount,
+                )
+
+            offset += readcount
+
+            # No nested state. We either have a full value or beginning of a
+            # complex value to deal with.
+            if self._state == self._STATE_NONE:
+                # A normal value.
+                if special == SPECIAL_NONE:
+                    self._decodedvalues.append(value)
+
+                elif special == SPECIAL_START_ARRAY:
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': [],
+                    })
+                    self._state = self._STATE_WANT_ARRAY_VALUE
 
-        if subtype != SUBTYPE_INDEFINITE:
-            raise decodermod.CBORDecodeError(
-                'expected indefinite subtype; got %d' % subtype)
+                elif special == SPECIAL_START_MAP:
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': {},
+                    })
+                    self._state = self._STATE_WANT_MAP_KEY
+
+                elif special == SPECIAL_START_SET:
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': set(),
+                    })
+                    self._state = self._STATE_WANT_SET_VALUE
+
+                elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
+                    self._state = self._STATE_WANT_BYTESTRING_CHUNK_FIRST
+
+                else:
+                    raise CBORDecodeError('unhandled special state: %d' %
+                                          special)
+
+            # This value becomes an element of the current array.
+            elif self._state == self._STATE_WANT_ARRAY_VALUE:
+                # Simple values get appended.
+                if special == SPECIAL_NONE:
+                    c = self._collectionstack[-1]
+                    c['v'].append(value)
+                    c['remaining'] -= 1
+
+                    # self._state doesn't need changed.
+
+                # An array nested within an array.
+                elif special == SPECIAL_START_ARRAY:
+                    lastc = self._collectionstack[-1]
+                    newvalue = []
+
+                    lastc['v'].append(newvalue)
+                    lastc['remaining'] -= 1
+
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue,
+                    })
+
+                    # self._state doesn't need changed.
+
+                # A map nested within an array.
+                elif special == SPECIAL_START_MAP:
+                    lastc = self._collectionstack[-1]
+                    newvalue = {}
+
+                    lastc['v'].append(newvalue)
+                    lastc['remaining'] -= 1
+
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue
+                    })
+
+                    self._state = self._STATE_WANT_MAP_KEY
+
+                elif special == SPECIAL_START_SET:
+                    lastc = self._collectionstack[-1]
+                    newvalue = set()
+
+                    lastc['v'].append(newvalue)
+                    lastc['remaining'] -= 1
+
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue,
+                    })
+
+                    self._state = self._STATE_WANT_SET_VALUE
+
+                elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
+                    raise CBORDecodeError('indefinite length bytestrings '
+                                          'not allowed as array values')
+
+                else:
+                    raise CBORDecodeError('unhandled special item when '
+                                          'expecting array value: %d' % special)
+
+            # This value becomes the key of the current map instance.
+            elif self._state == self._STATE_WANT_MAP_KEY:
+                if special == SPECIAL_NONE:
+                    self._currentmapkey = value
+                    self._state = self._STATE_WANT_MAP_VALUE
+
+                elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
+                    raise CBORDecodeError('indefinite length bytestrings '
+                                          'not allowed as map keys')
 
-    # The indefinite bytestring is composed of chunks of normal bytestrings.
-    # Read chunks until we hit a BREAK byte.
+                elif special in (SPECIAL_START_ARRAY, SPECIAL_START_MAP,
+                                 SPECIAL_START_SET):
+                    raise CBORDecodeError('collections not supported as map '
+                                          'keys')
+
+                # We do not allow special values to be used as map keys.
+                else:
+                    raise CBORDecodeError('unhandled special item when '
+                                          'expecting map key: %d' % special)
+
+            # This value becomes the value of the current map key.
+            elif self._state == self._STATE_WANT_MAP_VALUE:
+                # Simple values simply get inserted into the map.
+                if special == SPECIAL_NONE:
+                    lastc = self._collectionstack[-1]
+                    lastc['v'][self._currentmapkey] = value
+                    lastc['remaining'] -= 1
+
+                    self._state = self._STATE_WANT_MAP_KEY
+
+                # A new array is used as the map value.
+                elif special == SPECIAL_START_ARRAY:
+                    lastc = self._collectionstack[-1]
+                    newvalue = []
+
+                    lastc['v'][self._currentmapkey] = newvalue
+                    lastc['remaining'] -= 1
+
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue,
+                    })
+
+                    self._state = self._STATE_WANT_ARRAY_VALUE
+
+                # A new map is used as the map value.
+                elif special == SPECIAL_START_MAP:
+                    lastc = self._collectionstack[-1]
+                    newvalue = {}
+
+                    lastc['v'][self._currentmapkey] = newvalue
+                    lastc['remaining'] -= 1
+
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue,
+                    })
 
-    while True:
-        # We need to sniff for the BREAK byte.
-        initial = byteasinteger(read(1))
+                    self._state = self._STATE_WANT_MAP_KEY
+
+                # A new set is used as the map value.
+                elif special == SPECIAL_START_SET:
+                    lastc = self._collectionstack[-1]
+                    newvalue = set()
+
+                    lastc['v'][self._currentmapkey] = newvalue
+                    lastc['remaining'] -= 1
+
+                    self._collectionstack.append({
+                        'remaining': value,
+                        'v': newvalue,
+                    })
+
+                    self._state = self._STATE_WANT_SET_VALUE
+
+                elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
+                    raise CBORDecodeError('indefinite length bytestrings not '
+                                          'allowed as map values')
+
+                else:
+                    raise CBORDecodeError('unhandled special item when '
+                                          'expecting map value: %d' % special)
+
+                self._currentmapkey = None
+
+            # This value is added to the current set.
+            elif self._state == self._STATE_WANT_SET_VALUE:
+                if special == SPECIAL_NONE:
+                    lastc = self._collectionstack[-1]
+                    lastc['v'].add(value)
+                    lastc['remaining'] -= 1
+
+                elif special == SPECIAL_START_INDEFINITE_BYTESTRING:
+                    raise CBORDecodeError('indefinite length bytestrings not '
+                                          'allowed as set values')
+
+                elif special in (SPECIAL_START_ARRAY,
+                                 SPECIAL_START_MAP,
+                                 SPECIAL_START_SET):
+                    raise CBORDecodeError('collections not allowed as set '
+                                          'values')
+
+                # We don't allow non-trivial types to exist as set values.
+                else:
+                    raise CBORDecodeError('unhandled special item when '
+                                          'expecting set value: %d' % special)
 
-        if initial == BREAK_INT:
-            break
+            # This value represents the first chunk in an indefinite length
+            # bytestring.
+            elif self._state == self._STATE_WANT_BYTESTRING_CHUNK_FIRST:
+                # We received a full chunk.
+                if special == SPECIAL_NONE:
+                    self._decodedvalues.append(bytestringchunk(value,
+                                                               first=True))
+
+                    self._state = self._STATE_WANT_BYTESTRING_CHUNK_SUBSEQUENT
+
+                # The end of stream marker. This means it is an empty
+                # indefinite length bytestring.
+                elif special == SPECIAL_INDEFINITE_BREAK:
+                    # We /could/ convert this to a b''. But we want to preserve
+                    # the nature of the underlying data so consumers expecting
+                    # an indefinite length bytestring get one.
+                    self._decodedvalues.append(bytestringchunk(b'',
+                                                               first=True,
+                                                               last=True))
+
+                    # Since indefinite length bytestrings can't be used in
+                    # collections, we must be at the root level.
+                    assert not self._collectionstack
+                    self._state = self._STATE_NONE
+
+                else:
+                    raise CBORDecodeError('unexpected special value when '
+                                          'expecting bytestring chunk: %d' %
+                                          special)
+
+            # This value represents the non-initial chunk in an indefinite
+            # length bytestring.
+            elif self._state == self._STATE_WANT_BYTESTRING_CHUNK_SUBSEQUENT:
+                # We received a full chunk.
+                if special == SPECIAL_NONE:
+                    self._decodedvalues.append(bytestringchunk(value))
+
+                # The end of stream marker.
+                elif special == SPECIAL_INDEFINITE_BREAK:
+                    self._decodedvalues.append(bytestringchunk(b'', last=True))
+
+                    # Since indefinite length bytestrings can't be used in
+                    # collections, we must be at the root level.
+                    assert not self._collectionstack
+                    self._state = self._STATE_NONE
 
-        length = decodeuint(fh, initial & SUBTYPE_MASK)
-        chunk = read(length)
+                else:
+                    raise CBORDecodeError('unexpected special value when '
+                                          'expecting bytestring chunk: %d' %
+                                          special)
+
+            else:
+                raise CBORDecodeError('unhandled decoder state: %d' %
+                                      self._state)
+
+            # We could have just added the final value in a collection. End
+            # all complete collections at the top of the stack.
+            while True:
+                # Bail if we're not waiting on a new collection item.
+                if self._state not in (self._STATE_WANT_ARRAY_VALUE,
+                                       self._STATE_WANT_MAP_KEY,
+                                       self._STATE_WANT_SET_VALUE):
+                    break
+
+                # Or we are expecting more items for this collection.
+                lastc = self._collectionstack[-1]
+
+                if lastc['remaining']:
+                    break
+
+                # The collection at the top of the stack is complete.
+
+                # Discard it, as it isn't needed for future items.
+                self._collectionstack.pop()
+
+                # If this is a nested collection, we don't emit it, since it
+                # will be emitted by its parent collection. But we do need to
+                # update state to reflect what the new top-most collection
+                # on the stack is.
+                if self._collectionstack:
+                    self._state = {
+                        list: self._STATE_WANT_ARRAY_VALUE,
+                        dict: self._STATE_WANT_MAP_KEY,
+                        set: self._STATE_WANT_SET_VALUE,
+                    }[type(self._collectionstack[-1]['v'])]
+
+                # If this is the root collection, emit it.
+                else:
+                    self._decodedvalues.append(lastc['v'])
+                    self._state = self._STATE_NONE
+
+        return (
+            bool(self._decodedvalues),
+            offset - initialoffset,
+            0,
+        )
+
+    def getavailable(self):
+        """Returns an iterator over fully decoded values.
 
-        if len(chunk) != length:
-            raise decodermod.CBORDecodeError(
-                'failed to read bytestring chunk: got %d bytes; expected %d' % (
-                    len(chunk), length))
+        Once values are retrieved, they won't be available on the next call.
+        """
+
+        l = list(self._decodedvalues)
+        self._decodedvalues = []
+        return l
+
+class bufferingdecoder(object):
+    """A CBOR decoder that buffers undecoded input.
+
+    This is a glorified wrapper around ``sansiodecoder`` that adds a buffering
+    layer. All input that isn't consumed by ``sansiodecoder`` will be buffered
+    and concatenated with any new input that arrives later.
+
+    TODO consider adding limits as to the maximum amount of data that can
+    be buffered.
+    """
+    def __init__(self):
+        self._decoder = sansiodecoder()
+        self._chunks = []
+        self._wanted = 0
+
+    def decode(self, b):
+        """Attempt to decode bytes to CBOR values.
+
+        Returns a tuple with the following fields:
+
+        * Bool indicating whether new values are available for retrieval.
+        * Integer number of bytes decoded from the new input.
+        * Integer number of bytes wanted to decode the next value.
+        """
+        # We /might/ be able to support passing a bytearray all the
+        # way through. For now, let's cheat.
+        if isinstance(b, bytearray):
+            b = bytes(b)
+
+        # Our strategy for buffering is to aggregate the incoming chunks in a
+        # list until we've received enough data to decode the next item.
+        # This is slightly more complicated than using an ``io.BytesIO``
+        # or continuously concatenating incoming data. However, because it
+        # isn't constantly reallocating backing memory for a growing buffer,
+        # it prevents excessive memory thrashing and is significantly faster,
+        # especially in cases where the percentage of input chunks that don't
+        # decode into a full item is high.
 
-        yield chunk
+        if self._chunks:
+            # A previous call said we needed N bytes to decode the next item.
+            # But this call doesn't provide enough data. We buffer the incoming
+            # chunk without attempting to decode.
+            if len(b) < self._wanted:
+                self._chunks.append(b)
+                self._wanted -= len(b)
+                return False, 0, self._wanted
+
+            # Else we may have enough data to decode the next item. Aggregate
+            # old data with new and reset the buffer.
+            newlen = len(b)
+            self._chunks.append(b)
+            b = b''.join(self._chunks)
+            self._chunks = []
+            oldlen = len(b) - newlen
+
+        else:
+            oldlen = 0
+
+        available, readcount, wanted = self._decoder.decode(b)
+        self._wanted = wanted
+
+        if readcount < len(b):
+            self._chunks.append(b[readcount:])
+
+        return available, readcount - oldlen, wanted
+
+    def getavailable(self):
+        return self._decoder.getavailable()
+
+def decodeall(b):
+    """Decode all CBOR items present in an iterable of bytes.
+
+    In addition to regular decode errors, raises CBORDecodeError if the
+    entirety of the passed buffer does not fully decode to complete CBOR
+    values. This includes failure to decode any value, incomplete collection
+    types, incomplete indefinite length items, and extra data at the end of
+    the buffer.
+    """
+    if not b:
+        return []
+
+    decoder = sansiodecoder()
+
+    havevalues, readcount, wantbytes = decoder.decode(b)
+
+    if readcount != len(b):
+        raise CBORDecodeError('input data not fully consumed')
+
+    if decoder.inprogress:
+        raise CBORDecodeError('input data not complete')
+
+    return decoder.getavailable()
--- a/mercurial/utils/dateutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/utils/dateutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -303,17 +303,17 @@
 
     if not date:
         raise error.Abort(_("dates cannot consist entirely of whitespace"))
-    elif date[0] == "<":
+    elif date[0:1] == b"<":
         if not date[1:]:
             raise error.Abort(_("invalid day spec, use '<DATE'"))
         when = upper(date[1:])
         return lambda x: x <= when
-    elif date[0] == ">":
+    elif date[0:1] == b">":
         if not date[1:]:
             raise error.Abort(_("invalid day spec, use '>DATE'"))
         when = lower(date[1:])
         return lambda x: x >= when
-    elif date[0] == "-":
+    elif date[0:1] == b"-":
         try:
             days = int(date[1:])
         except ValueError:
@@ -323,8 +323,8 @@
                 % date[1:])
         when = makedate()[0] - days * 3600 * 24
         return lambda x: x >= when
-    elif " to " in date:
-        a, b = date.split(" to ")
+    elif b" to " in date:
+        a, b = date.split(b" to ")
         start, stop = lower(a), upper(b)
         return lambda x: x >= start and x <= stop
     else:
--- a/mercurial/utils/procutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/utils/procutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -120,13 +120,15 @@
     raise error.ProgrammingError('unsupported mode: %r' % mode)
 
 def _popenreader(cmd, bufsize):
-    p = subprocess.Popen(quotecommand(cmd), shell=True, bufsize=bufsize,
+    p = subprocess.Popen(tonativestr(quotecommand(cmd)),
+                         shell=True, bufsize=bufsize,
                          close_fds=closefds,
                          stdout=subprocess.PIPE)
     return _pfile(p, p.stdout)
 
 def _popenwriter(cmd, bufsize):
-    p = subprocess.Popen(quotecommand(cmd), shell=True, bufsize=bufsize,
+    p = subprocess.Popen(tonativestr(quotecommand(cmd)),
+                         shell=True, bufsize=bufsize,
                          close_fds=closefds,
                          stdin=subprocess.PIPE)
     return _pfile(p, p.stdin)
@@ -135,10 +137,11 @@
     # Setting bufsize to -1 lets the system decide the buffer size.
     # The default for bufsize is 0, meaning unbuffered. This leads to
     # poor performance on Mac OS X: http://bugs.python.org/issue4194
-    p = subprocess.Popen(cmd, shell=True, bufsize=-1,
+    p = subprocess.Popen(tonativestr(cmd),
+                         shell=True, bufsize=-1,
                          close_fds=closefds,
                          stdin=subprocess.PIPE, stdout=subprocess.PIPE,
-                         env=env)
+                         env=tonativeenv(env))
     return p.stdin, p.stdout
 
 def popen3(cmd, env=None):
@@ -146,16 +149,18 @@
     return stdin, stdout, stderr
 
 def popen4(cmd, env=None, bufsize=-1):
-    p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
+    p = subprocess.Popen(tonativestr(cmd),
+                         shell=True, bufsize=bufsize,
                          close_fds=closefds,
                          stdin=subprocess.PIPE, stdout=subprocess.PIPE,
                          stderr=subprocess.PIPE,
-                         env=env)
+                         env=tonativeenv(env))
     return p.stdin, p.stdout, p.stderr, p
 
 def pipefilter(s, cmd):
     '''filter string S through command CMD, returning its output'''
-    p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
+    p = subprocess.Popen(tonativestr(cmd),
+                         shell=True, close_fds=closefds,
                          stdin=subprocess.PIPE, stdout=subprocess.PIPE)
     pout, perr = p.communicate(s)
     return pout
@@ -320,10 +325,19 @@
 if pycompat.iswindows:
     def shelltonative(cmd, env):
         return platform.shelltocmdexe(cmd, shellenviron(env))
+
+    tonativestr = encoding.strfromlocal
 else:
     def shelltonative(cmd, env):
         return cmd
 
+    tonativestr = pycompat.identity
+
+def tonativeenv(env):
+    '''convert the environment from bytes to strings suitable for Popen(), etc.
+    '''
+    return pycompat.rapply(tonativestr, env)
+
 def system(cmd, environ=None, cwd=None, out=None):
     '''enhanced shell command execution.
     run with environment maybe modified, maybe in different dir.
@@ -337,11 +351,16 @@
     cmd = quotecommand(cmd)
     env = shellenviron(environ)
     if out is None or isstdout(out):
-        rc = subprocess.call(cmd, shell=True, close_fds=closefds,
-                             env=env, cwd=cwd)
+        rc = subprocess.call(tonativestr(cmd),
+                             shell=True, close_fds=closefds,
+                             env=tonativeenv(env),
+                             cwd=pycompat.rapply(tonativestr, cwd))
     else:
-        proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
-                                env=env, cwd=cwd, stdout=subprocess.PIPE,
+        proc = subprocess.Popen(tonativestr(cmd),
+                                shell=True, close_fds=closefds,
+                                env=tonativeenv(env),
+                                cwd=pycompat.rapply(tonativestr, cwd),
+                                stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT)
         for line in iter(proc.stdout.readline, ''):
             out.write(line)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/utils/storageutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,480 @@
+# storageutil.py - Storage functionality agnostic of backend implementation.
+#
+# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import hashlib
+import re
+import struct
+
+from ..i18n import _
+from ..node import (
+    bin,
+    nullid,
+    nullrev,
+)
+from .. import (
+    dagop,
+    error,
+    mdiff,
+    pycompat,
+)
+
+_nullhash = hashlib.sha1(nullid)
+
+def hashrevisionsha1(text, p1, p2):
+    """Compute the SHA-1 for revision data and its parents.
+
+    This hash combines both the current file contents and its history
+    in a manner that makes it easy to distinguish nodes with the same
+    content in the revision graph.
+    """
+    # As of now, if one of the parent node is null, p2 is null
+    if p2 == nullid:
+        # deep copy of a hash is faster than creating one
+        s = _nullhash.copy()
+        s.update(p1)
+    else:
+        # none of the parent nodes are nullid
+        if p1 < p2:
+            a = p1
+            b = p2
+        else:
+            a = p2
+            b = p1
+        s = hashlib.sha1(a)
+        s.update(b)
+    s.update(text)
+    return s.digest()
+
+METADATA_RE = re.compile(b'\x01\n')
+
+def parsemeta(text):
+    """Parse metadata header from revision data.
+
+    Returns a 2-tuple of (metadata, offset), where both can be None if there
+    is no metadata.
+    """
+    # text can be buffer, so we can't use .startswith or .index
+    if text[:2] != b'\x01\n':
+        return None, None
+    s = METADATA_RE.search(text, 2).start()
+    mtext = text[2:s]
+    meta = {}
+    for l in mtext.splitlines():
+        k, v = l.split(b': ', 1)
+        meta[k] = v
+    return meta, s + 2
+
+def packmeta(meta, text):
+    """Add metadata to fulltext to produce revision text."""
+    keys = sorted(meta)
+    metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
+    return b'\x01\n%s\x01\n%s' % (metatext, text)
+
+def iscensoredtext(text):
+    meta = parsemeta(text)[0]
+    return meta and b'censored' in meta
+
+def filtermetadata(text):
+    """Extract just the revision data from source text.
+
+    Returns ``text`` unless it has a metadata header, in which case we return
+    a new buffer without hte metadata.
+    """
+    if not text.startswith(b'\x01\n'):
+        return text
+
+    offset = text.index(b'\x01\n', 2)
+    return text[offset + 2:]
+
+def filerevisioncopied(store, node):
+    """Resolve file revision copy metadata.
+
+    Returns ``False`` if the file has no copy metadata. Otherwise a
+    2-tuple of the source filename and node.
+    """
+    if store.parents(node)[0] != nullid:
+        return False
+
+    meta = parsemeta(store.revision(node))[0]
+
+    # copy and copyrev occur in pairs. In rare cases due to old bugs,
+    # one can occur without the other. So ensure both are present to flag
+    # as a copy.
+    if meta and b'copy' in meta and b'copyrev' in meta:
+        return meta[b'copy'], bin(meta[b'copyrev'])
+
+    return False
+
+def filedataequivalent(store, node, filedata):
+    """Determines whether file data is equivalent to a stored node.
+
+    Returns True if the passed file data would hash to the same value
+    as a stored revision and False otherwise.
+
+    When a stored revision is censored, filedata must be empty to have
+    equivalence.
+
+    When a stored revision has copy metadata, it is ignored as part
+    of the compare.
+    """
+
+    if filedata.startswith(b'\x01\n'):
+        revisiontext = b'\x01\n\x01\n' + filedata
+    else:
+        revisiontext = filedata
+
+    p1, p2 = store.parents(node)
+
+    computednode = hashrevisionsha1(revisiontext, p1, p2)
+
+    if computednode == node:
+        return True
+
+    # Censored files compare against the empty file.
+    if store.iscensored(store.rev(node)):
+        return filedata == b''
+
+    # Renaming a file produces a different hash, even if the data
+    # remains unchanged. Check if that's the case.
+    if store.renamed(node):
+        return store.read(node) == filedata
+
+    return False
+
+def iterrevs(storelen, start=0, stop=None):
+    """Iterate over revision numbers in a store."""
+    step = 1
+
+    if stop is not None:
+        if start > stop:
+            step = -1
+        stop += step
+        if stop > storelen:
+            stop = storelen
+    else:
+        stop = storelen
+
+    return pycompat.xrange(start, stop, step)
+
+def fileidlookup(store, fileid, identifier):
+    """Resolve the file node for a value.
+
+    ``store`` is an object implementing the ``ifileindex`` interface.
+
+    ``fileid`` can be:
+
+    * A 20 byte binary node.
+    * An integer revision number
+    * A 40 byte hex node.
+    * A bytes that can be parsed as an integer representing a revision number.
+
+    ``identifier`` is used to populate ``error.LookupError`` with an identifier
+    for the store.
+
+    Raises ``error.LookupError`` on failure.
+    """
+    if isinstance(fileid, int):
+        try:
+            return store.node(fileid)
+        except IndexError:
+            raise error.LookupError('%d' % fileid, identifier,
+                                    _('no match found'))
+
+    if len(fileid) == 20:
+        try:
+            store.rev(fileid)
+            return fileid
+        except error.LookupError:
+            pass
+
+    if len(fileid) == 40:
+        try:
+            rawnode = bin(fileid)
+            store.rev(rawnode)
+            return rawnode
+        except TypeError:
+            pass
+
+    try:
+        rev = int(fileid)
+
+        if b'%d' % rev != fileid:
+            raise ValueError
+
+        try:
+            return store.node(rev)
+        except (IndexError, TypeError):
+            pass
+    except (ValueError, OverflowError):
+        pass
+
+    raise error.LookupError(fileid, identifier, _('no match found'))
+
+def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
+    """Resolve information needed to strip revisions.
+
+    Finds the minimum revision number that must be stripped in order to
+    strip ``minlinkrev``.
+
+    Returns a 2-tuple of the minimum revision number to do that and a set
+    of all revision numbers that have linkrevs that would be broken
+    by that strip.
+
+    ``tiprev`` is the current tip-most revision. It is ``len(store) - 1``.
+    ``headrevs`` is an iterable of head revisions.
+    ``linkrevfn`` is a callable that receives a revision and returns a linked
+    revision.
+    ``parentrevsfn`` is a callable that receives a revision number and returns
+    an iterable of its parent revision numbers.
+    """
+    brokenrevs = set()
+    strippoint = tiprev + 1
+
+    heads = {}
+    futurelargelinkrevs = set()
+    for head in headrevs:
+        headlinkrev = linkrevfn(head)
+        heads[head] = headlinkrev
+        if headlinkrev >= minlinkrev:
+            futurelargelinkrevs.add(headlinkrev)
+
+    # This algorithm involves walking down the rev graph, starting at the
+    # heads. Since the revs are topologically sorted according to linkrev,
+    # once all head linkrevs are below the minlink, we know there are
+    # no more revs that could have a linkrev greater than minlink.
+    # So we can stop walking.
+    while futurelargelinkrevs:
+        strippoint -= 1
+        linkrev = heads.pop(strippoint)
+
+        if linkrev < minlinkrev:
+            brokenrevs.add(strippoint)
+        else:
+            futurelargelinkrevs.remove(linkrev)
+
+        for p in parentrevsfn(strippoint):
+            if p != nullrev:
+                plinkrev = linkrevfn(p)
+                heads[p] = plinkrev
+                if plinkrev >= minlinkrev:
+                    futurelargelinkrevs.add(plinkrev)
+
+    return strippoint, brokenrevs
+
+def emitrevisions(store, nodes, nodesorder, resultcls, deltaparentfn=None,
+                  candeltafn=None, rawsizefn=None, revdifffn=None, flagsfn=None,
+                  sendfulltext=False,
+                  revisiondata=False, assumehaveparentrevisions=False,
+                  deltaprevious=False):
+    """Generic implementation of ifiledata.emitrevisions().
+
+    Emitting revision data is subtly complex. This function attempts to
+    encapsulate all the logic for doing so in a backend-agnostic way.
+
+    ``store``
+       Object conforming to ``ifilestorage`` interface.
+
+    ``nodes``
+       List of revision nodes whose data to emit.
+
+    ``resultcls``
+       A type implementing the ``irevisiondelta`` interface that will be
+       constructed and returned.
+
+    ``deltaparentfn`` (optional)
+       Callable receiving a revision number and returning the revision number
+       of a revision that the internal delta is stored against. This delta
+       will be preferred over computing a new arbitrary delta.
+
+       If not defined, a delta will always be computed from raw revision
+       data.
+
+    ``candeltafn`` (optional)
+       Callable receiving a pair of revision numbers that returns a bool
+       indicating whether a delta between them can be produced.
+
+       If not defined, it is assumed that any two revisions can delta with
+       each other.
+
+    ``rawsizefn`` (optional)
+       Callable receiving a revision number and returning the length of the
+       ``store.revision(rev, raw=True)``.
+
+       If not defined, ``len(store.revision(rev, raw=True))`` will be called.
+
+    ``revdifffn`` (optional)
+       Callable receiving a pair of revision numbers that returns a delta
+       between them.
+
+       If not defined, a delta will be computed by invoking mdiff code
+       on ``store.revision()`` results.
+
+       Defining this function allows a precomputed or stored delta to be
+       used without having to compute on.
+
+    ``flagsfn`` (optional)
+       Callable receiving a revision number and returns the integer flags
+       value for it. If not defined, flags value will be 0.
+
+    ``sendfulltext``
+       Whether to send fulltext revisions instead of deltas, if allowed.
+
+    ``nodesorder``
+    ``revisiondata``
+    ``assumehaveparentrevisions``
+    ``deltaprevious``
+       See ``ifiledata.emitrevisions()`` interface documentation.
+    """
+
+    fnode = store.node
+    frev = store.rev
+
+    if nodesorder == 'nodes':
+        revs = [frev(n) for n in nodes]
+    elif nodesorder == 'storage':
+        revs = sorted(frev(n) for n in nodes)
+    else:
+        revs = set(frev(n) for n in nodes)
+        revs = dagop.linearize(revs, store.parentrevs)
+
+    prevrev = None
+
+    if deltaprevious or assumehaveparentrevisions:
+        prevrev = store.parentrevs(revs[0])[0]
+
+    # Set of revs available to delta against.
+    available = set()
+
+    for rev in revs:
+        if rev == nullrev:
+            continue
+
+        node = fnode(rev)
+        p1rev, p2rev = store.parentrevs(rev)
+
+        if deltaparentfn:
+            deltaparentrev = deltaparentfn(rev)
+        else:
+            deltaparentrev = nullrev
+
+        # Forced delta against previous mode.
+        if deltaprevious:
+            baserev = prevrev
+
+        # We're instructed to send fulltext. Honor that.
+        elif sendfulltext:
+            baserev = nullrev
+
+        # There is a delta in storage. We try to use that because it
+        # amounts to effectively copying data from storage and is
+        # therefore the fastest.
+        elif deltaparentrev != nullrev:
+            # Base revision was already emitted in this group. We can
+            # always safely use the delta.
+            if deltaparentrev in available:
+                baserev = deltaparentrev
+
+            # Base revision is a parent that hasn't been emitted already.
+            # Use it if we can assume the receiver has the parent revision.
+            elif (assumehaveparentrevisions
+                  and deltaparentrev in (p1rev, p2rev)):
+                baserev = deltaparentrev
+
+            # No guarantee the receiver has the delta parent. Send delta
+            # against last revision (if possible), which in the common case
+            # should be similar enough to this revision that the delta is
+            # reasonable.
+            elif prevrev is not None:
+                baserev = prevrev
+            else:
+                baserev = nullrev
+
+        # Storage has a fulltext revision.
+
+        # Let's use the previous revision, which is as good a guess as any.
+        # There is definitely room to improve this logic.
+        elif prevrev is not None:
+            baserev = prevrev
+        else:
+            baserev = nullrev
+
+        # But we can't actually use our chosen delta base for whatever
+        # reason. Reset to fulltext.
+        if baserev != nullrev and (candeltafn and not candeltafn(baserev, rev)):
+            baserev = nullrev
+
+        revision = None
+        delta = None
+        baserevisionsize = None
+
+        if revisiondata:
+            if store.iscensored(baserev) or store.iscensored(rev):
+                try:
+                    revision = store.revision(node, raw=True)
+                except error.CensoredNodeError as e:
+                    revision = e.tombstone
+
+                if baserev != nullrev:
+                    if rawsizefn:
+                        baserevisionsize = rawsizefn(baserev)
+                    else:
+                        baserevisionsize = len(store.revision(baserev,
+                                                              raw=True))
+
+            elif baserev == nullrev and not deltaprevious:
+                revision = store.revision(node, raw=True)
+                available.add(rev)
+            else:
+                if revdifffn:
+                    delta = revdifffn(baserev, rev)
+                else:
+                    delta = mdiff.textdiff(store.revision(baserev, raw=True),
+                                           store.revision(rev, raw=True))
+
+                available.add(rev)
+
+        yield resultcls(
+            node=node,
+            p1node=fnode(p1rev),
+            p2node=fnode(p2rev),
+            basenode=fnode(baserev),
+            flags=flagsfn(rev) if flagsfn else 0,
+            baserevisionsize=baserevisionsize,
+            revision=revision,
+            delta=delta)
+
+        prevrev = rev
+
+def deltaiscensored(delta, baserev, baselenfn):
+    """Determine if a delta represents censored revision data.
+
+    ``baserev`` is the base revision this delta is encoded against.
+    ``baselenfn`` is a callable receiving a revision number that resolves the
+    length of the revision fulltext.
+
+    Returns a bool indicating if the result of the delta represents a censored
+    revision.
+    """
+    # Fragile heuristic: unless new file meta keys are added alphabetically
+    # preceding "censored", all censored revisions are prefixed by
+    # "\1\ncensored:". A delta producing such a censored revision must be a
+    # full-replacement delta, so we inspect the first and only patch in the
+    # delta for this prefix.
+    hlen = struct.calcsize(">lll")
+    if len(delta) <= hlen:
+        return False
+
+    oldlen = baselenfn(baserev)
+    newlen = len(delta) - hlen
+    if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
+        return False
+
+    add = "\1\ncensored:"
+    addlen = len(add)
+    return newlen >= addlen and delta[hlen:hlen + addlen] == add
--- a/mercurial/utils/stringutil.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/utils/stringutil.py	Mon Oct 22 14:46:06 2018 -0400
@@ -13,6 +13,7 @@
 import codecs
 import re as remod
 import textwrap
+import types
 
 from ..i18n import _
 from ..thirdparty import attr
@@ -42,27 +43,202 @@
         return pat
     return pat.encode('latin1')
 
-def pprint(o, bprefix=False):
+def pprint(o, bprefix=False, indent=0, level=0):
     """Pretty print an object."""
+    return b''.join(pprintgen(o, bprefix=bprefix, indent=indent, level=level))
+
+def pprintgen(o, bprefix=False, indent=0, level=0):
+    """Pretty print an object to a generator of atoms.
+
+    ``bprefix`` is a flag influencing whether bytestrings are preferred with
+    a ``b''`` prefix.
+
+    ``indent`` controls whether collections and nested data structures
+    span multiple lines via the indentation amount in spaces. By default,
+    no newlines are emitted.
+
+    ``level`` specifies the initial indent level. Used if ``indent > 0``.
+    """
+
     if isinstance(o, bytes):
         if bprefix:
-            return "b'%s'" % escapestr(o)
-        return "'%s'" % escapestr(o)
+            yield "b'%s'" % escapestr(o)
+        else:
+            yield "'%s'" % escapestr(o)
     elif isinstance(o, bytearray):
         # codecs.escape_encode() can't handle bytearray, so escapestr fails
         # without coercion.
-        return "bytearray['%s']" % escapestr(bytes(o))
+        yield "bytearray['%s']" % escapestr(bytes(o))
     elif isinstance(o, list):
-        return '[%s]' % (b', '.join(pprint(a, bprefix=bprefix) for a in o))
+        if not o:
+            yield '[]'
+            return
+
+        yield '['
+
+        if indent:
+            level += 1
+            yield '\n'
+            yield ' ' * (level * indent)
+
+        for i, a in enumerate(o):
+            for chunk in pprintgen(a, bprefix=bprefix, indent=indent,
+                                   level=level):
+                yield chunk
+
+            if i + 1 < len(o):
+                if indent:
+                    yield ',\n'
+                    yield ' ' * (level * indent)
+                else:
+                    yield ', '
+
+        if indent:
+            level -= 1
+            yield '\n'
+            yield ' ' * (level * indent)
+
+        yield ']'
     elif isinstance(o, dict):
-        return '{%s}' % (b', '.join(
-            '%s: %s' % (pprint(k, bprefix=bprefix),
-                        pprint(v, bprefix=bprefix))
-            for k, v in sorted(o.items())))
+        if not o:
+            yield '{}'
+            return
+
+        yield '{'
+
+        if indent:
+            level += 1
+            yield '\n'
+            yield ' ' * (level * indent)
+
+        for i, (k, v) in enumerate(sorted(o.items())):
+            for chunk in pprintgen(k, bprefix=bprefix, indent=indent,
+                                   level=level):
+                yield chunk
+
+            yield ': '
+
+            for chunk in pprintgen(v, bprefix=bprefix, indent=indent,
+                                   level=level):
+                yield chunk
+
+            if i + 1 < len(o):
+                if indent:
+                    yield ',\n'
+                    yield ' ' * (level * indent)
+                else:
+                    yield ', '
+
+        if indent:
+            level -= 1
+            yield '\n'
+            yield ' ' * (level * indent)
+
+        yield '}'
+    elif isinstance(o, set):
+        if not o:
+            yield 'set([])'
+            return
+
+        yield 'set(['
+
+        if indent:
+            level += 1
+            yield '\n'
+            yield ' ' * (level * indent)
+
+        for i, k in enumerate(sorted(o)):
+            for chunk in pprintgen(k, bprefix=bprefix, indent=indent,
+                                   level=level):
+                yield chunk
+
+            if i + 1 < len(o):
+                if indent:
+                    yield ',\n'
+                    yield ' ' * (level * indent)
+                else:
+                    yield ', '
+
+        if indent:
+            level -= 1
+            yield '\n'
+            yield ' ' * (level * indent)
+
+        yield '])'
     elif isinstance(o, tuple):
-        return '(%s)' % (b', '.join(pprint(a, bprefix=bprefix) for a in o))
+        if not o:
+            yield '()'
+            return
+
+        yield '('
+
+        if indent:
+            level += 1
+            yield '\n'
+            yield ' ' * (level * indent)
+
+        for i, a in enumerate(o):
+            for chunk in pprintgen(a, bprefix=bprefix, indent=indent,
+                                   level=level):
+                yield chunk
+
+            if i + 1 < len(o):
+                if indent:
+                    yield ',\n'
+                    yield ' ' * (level * indent)
+                else:
+                    yield ', '
+
+        if indent:
+            level -= 1
+            yield '\n'
+            yield ' ' * (level * indent)
+
+        yield ')'
+    elif isinstance(o, types.GeneratorType):
+        # Special case of empty generator.
+        try:
+            nextitem = next(o)
+        except StopIteration:
+            yield 'gen[]'
+            return
+
+        yield 'gen['
+
+        if indent:
+            level += 1
+            yield '\n'
+            yield ' ' * (level * indent)
+
+        last = False
+
+        while not last:
+            current = nextitem
+
+            try:
+                nextitem = next(o)
+            except StopIteration:
+                last = True
+
+            for chunk in pprintgen(current, bprefix=bprefix, indent=indent,
+                                   level=level):
+                yield chunk
+
+            if not last:
+                if indent:
+                    yield ',\n'
+                    yield ' ' * (level * indent)
+                else:
+                    yield ', '
+
+        if indent:
+            level -= 1
+            yield '\n'
+            yield ' ' * (level * indent)
+
+        yield ']'
     else:
-        return pycompat.byterepr(o)
+        yield pycompat.byterepr(o)
 
 def prettyrepr(o):
     """Pretty print a representation of a possibly-nested object"""
@@ -111,7 +287,7 @@
     elif callable(r):
         return r()
     else:
-        return pycompat.byterepr(r)
+        return pprint(r)
 
 def binary(s):
     """return true if a string is binary data"""
@@ -424,6 +600,8 @@
     return encoding.trim(text, maxlength, ellipsis='...')
 
 def escapestr(s):
+    if isinstance(s, memoryview):
+        s = bytes(s)
     # call underlying function of s.encode('string_escape') directly for
     # Python 3 compatibility
     return codecs.escape_encode(s)[0]
@@ -464,7 +642,7 @@
         def _cutdown(self, ucstr, space_left):
             l = 0
             colwidth = encoding.ucolwidth
-            for i in xrange(len(ucstr)):
+            for i in pycompat.xrange(len(ucstr)):
                 l += colwidth(ucstr[i])
                 if space_left < l:
                     return (ucstr[:i], ucstr[i:])
--- a/mercurial/verify.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/verify.py	Mon Oct 22 14:46:06 2018 -0400
@@ -19,7 +19,6 @@
     error,
     pycompat,
     revlog,
-    scmutil,
     util,
 )
 
@@ -35,17 +34,15 @@
     return f
 
 class verifier(object):
-    # The match argument is always None in hg core, but e.g. the narrowhg
-    # extension will pass in a matcher here.
-    def __init__(self, repo, match=None):
+    def __init__(self, repo):
         self.repo = repo.unfiltered()
         self.ui = repo.ui
-        self.match = match or scmutil.matchall(repo)
+        self.match = repo.narrowmatch()
         self.badrevs = set()
         self.errors = 0
         self.warnings = 0
         self.havecl = len(repo.changelog) > 0
-        self.havemf = len(repo.manifestlog._revlog) > 0
+        self.havemf = len(repo.manifestlog.getstorage(b'')) > 0
         self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
         self.lrugetctx = util.lrucachefunc(repo.__getitem__)
         self.refersmf = False
@@ -153,8 +150,8 @@
 
         totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs)
 
-        ui.status(_("%d files, %d changesets, %d total revisions\n") %
-                       (totalfiles, len(repo.changelog), filerevisions))
+        ui.status(_("checked %d changesets with %d changes to %d files\n") %
+                       (len(repo.changelog), filerevisions, totalfiles))
         if self.warnings:
             ui.warn(_("%d warnings encountered!\n") % self.warnings)
         if self.fncachewarned:
@@ -205,7 +202,7 @@
         ui = self.ui
         match = self.match
         mfl = self.repo.manifestlog
-        mf = mfl._revlog.dirlog(dir)
+        mf = mfl.getstorage(dir)
 
         if not dir:
             self.ui.status(_("checking manifests\n"))
@@ -341,6 +338,14 @@
             elif (size > 0 or not revlogv1) and f.startswith('data/'):
                 storefiles.add(_normpath(f))
 
+        state = {
+            # TODO this assumes revlog storage for changelog.
+            'expectedversion': self.repo.changelog.version & 0xFFFF,
+            'skipflags': self.skipflags,
+            # experimental config: censor.policy
+            'erroroncensored': ui.config('censor', 'policy') == 'abort',
+        }
+
         files = sorted(set(filenodes) | set(filelinkrevs))
         revisions = 0
         progress = ui.makeprogress(_('checking'), unit=_('files'),
@@ -360,7 +365,7 @@
 
             try:
                 fl = repo.file(f)
-            except error.RevlogError as e:
+            except error.StorageError as e:
                 self.err(lr, _("broken revlog! (%s)") % e, f)
                 continue
 
@@ -373,9 +378,28 @@
                                   ff)
                         self.fncachewarned = True
 
-            self.checklog(fl, f, lr)
+            if not len(fl) and (self.havecl or self.havemf):
+                self.err(lr, _("empty or missing %s") % f)
+            else:
+                # Guard against implementations not setting this.
+                state['skipread'] = set()
+                for problem in fl.verifyintegrity(state):
+                    if problem.node is not None:
+                        linkrev = fl.linkrev(fl.rev(problem.node))
+                    else:
+                        linkrev = None
+
+                    if problem.warning:
+                        self.warn(problem.warning)
+                    elif problem.error:
+                        self.err(linkrev if linkrev is not None else lr,
+                                 problem.error, f)
+                    else:
+                        raise error.ProgrammingError(
+                            'problem instance does not set warning or error '
+                            'attribute: %s' % problem.msg)
+
             seen = {}
-            rp = None
             for i in fl:
                 revisions += 1
                 n = fl.node(i)
@@ -386,75 +410,15 @@
                     else:
                         del filenodes[f][n]
 
-                # Verify contents. 4 cases to care about:
-                #
-                #   common: the most common case
-                #   rename: with a rename
-                #   meta: file content starts with b'\1\n', the metadata
-                #         header defined in filelog.py, but without a rename
-                #   ext: content stored externally
-                #
-                # More formally, their differences are shown below:
-                #
-                #                       | common | rename | meta  | ext
-                #  -------------------------------------------------------
-                #   flags()             | 0      | 0      | 0     | not 0
-                #   renamed()           | False  | True   | False | ?
-                #   rawtext[0:2]=='\1\n'| False  | True   | True  | ?
-                #
-                # "rawtext" means the raw text stored in revlog data, which
-                # could be retrieved by "revision(rev, raw=True)". "text"
-                # mentioned below is "revision(rev, raw=False)".
-                #
-                # There are 3 different lengths stored physically:
-                #  1. L1: rawsize, stored in revlog index
-                #  2. L2: len(rawtext), stored in revlog data
-                #  3. L3: len(text), stored in revlog data if flags==0, or
-                #     possibly somewhere else if flags!=0
-                #
-                # L1 should be equal to L2. L3 could be different from them.
-                # "text" may or may not affect commit hash depending on flag
-                # processors (see revlog.addflagprocessor).
-                #
-                #              | common  | rename | meta  | ext
-                # -------------------------------------------------
-                #    rawsize() | L1      | L1     | L1    | L1
-                #       size() | L1      | L2-LM  | L1(*) | L1 (?)
-                # len(rawtext) | L2      | L2     | L2    | L2
-                #    len(text) | L2      | L2     | L2    | L3
-                #  len(read()) | L2      | L2-LM  | L2-LM | L3 (?)
-                #
-                # LM:  length of metadata, depending on rawtext
-                # (*): not ideal, see comment in filelog.size
-                # (?): could be "- len(meta)" if the resolved content has
-                #      rename metadata
-                #
-                # Checks needed to be done:
-                #  1. length check: L1 == L2, in all cases.
-                #  2. hash check: depending on flag processor, we may need to
-                #     use either "text" (external), or "rawtext" (in revlog).
-                try:
-                    skipflags = self.skipflags
-                    if skipflags:
-                        skipflags &= fl.flags(i)
-                    if not skipflags:
-                        fl.read(n) # side effect: read content and do checkhash
-                        rp = fl.renamed(n)
-                    # the "L1 == L2" check
-                    l1 = fl.rawsize(i)
-                    l2 = len(fl.revision(n, raw=True))
-                    if l1 != l2:
-                        self.err(lr, _("unpacked size is %s, %s expected") %
-                                 (l2, l1), f)
-                except error.CensoredNodeError:
-                    # experimental config: censor.policy
-                    if ui.config("censor", "policy") == "abort":
-                        self.err(lr, _("censored file data"), f)
-                except Exception as inst:
-                    self.exc(lr, _("unpacking %s") % short(n), inst, f)
+                if n in state['skipread']:
+                    continue
 
                 # check renames
                 try:
+                    # This requires resolving fulltext (at least on revlogs). We
+                    # may want ``verifyintegrity()`` to pass a set of nodes with
+                    # rename metadata as an optimization.
+                    rp = fl.renamed(n)
                     if rp:
                         if lr is not None and ui.verbose:
                             ctx = lrugetctx(lr)
--- a/mercurial/vfs.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/vfs.py	Mon Oct 22 14:46:06 2018 -0400
@@ -206,13 +206,17 @@
         return util.rename(srcpath, dstpath)
 
     def readlink(self, path):
-        return os.readlink(self.join(path))
+        return util.readlink(self.join(path))
 
     def removedirs(self, path=None):
         """Remove a leaf directory and all empty intermediate ones
         """
         return util.removedirs(self.join(path))
 
+    def rmdir(self, path=None):
+        """Remove an empty directory."""
+        return os.rmdir(self.join(path))
+
     def rmtree(self, path=None, ignore_errors=False, forcibly=False):
         """Remove a directory tree recursively
 
--- a/mercurial/win32.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/win32.py	Mon Oct 22 14:46:06 2018 -0400
@@ -56,20 +56,20 @@
     _LPARAM = ctypes.c_longlong
 
 class _FILETIME(ctypes.Structure):
-    _fields_ = [('dwLowDateTime', _DWORD),
-                ('dwHighDateTime', _DWORD)]
+    _fields_ = [(r'dwLowDateTime', _DWORD),
+                (r'dwHighDateTime', _DWORD)]
 
 class _BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
-    _fields_ = [('dwFileAttributes', _DWORD),
-                ('ftCreationTime', _FILETIME),
-                ('ftLastAccessTime', _FILETIME),
-                ('ftLastWriteTime', _FILETIME),
-                ('dwVolumeSerialNumber', _DWORD),
-                ('nFileSizeHigh', _DWORD),
-                ('nFileSizeLow', _DWORD),
-                ('nNumberOfLinks', _DWORD),
-                ('nFileIndexHigh', _DWORD),
-                ('nFileIndexLow', _DWORD)]
+    _fields_ = [(r'dwFileAttributes', _DWORD),
+                (r'ftCreationTime', _FILETIME),
+                (r'ftLastAccessTime', _FILETIME),
+                (r'ftLastWriteTime', _FILETIME),
+                (r'dwVolumeSerialNumber', _DWORD),
+                (r'nFileSizeHigh', _DWORD),
+                (r'nFileSizeLow', _DWORD),
+                (r'nNumberOfLinks', _DWORD),
+                (r'nFileIndexHigh', _DWORD),
+                (r'nFileIndexLow', _DWORD)]
 
 # CreateFile
 _FILE_SHARE_READ = 0x00000001
@@ -91,50 +91,50 @@
 _STILL_ACTIVE = 259
 
 class _STARTUPINFO(ctypes.Structure):
-    _fields_ = [('cb', _DWORD),
-                ('lpReserved', _LPSTR),
-                ('lpDesktop', _LPSTR),
-                ('lpTitle', _LPSTR),
-                ('dwX', _DWORD),
-                ('dwY', _DWORD),
-                ('dwXSize', _DWORD),
-                ('dwYSize', _DWORD),
-                ('dwXCountChars', _DWORD),
-                ('dwYCountChars', _DWORD),
-                ('dwFillAttribute', _DWORD),
-                ('dwFlags', _DWORD),
-                ('wShowWindow', _WORD),
-                ('cbReserved2', _WORD),
-                ('lpReserved2', ctypes.c_char_p),
-                ('hStdInput', _HANDLE),
-                ('hStdOutput', _HANDLE),
-                ('hStdError', _HANDLE)]
+    _fields_ = [(r'cb', _DWORD),
+                (r'lpReserved', _LPSTR),
+                (r'lpDesktop', _LPSTR),
+                (r'lpTitle', _LPSTR),
+                (r'dwX', _DWORD),
+                (r'dwY', _DWORD),
+                (r'dwXSize', _DWORD),
+                (r'dwYSize', _DWORD),
+                (r'dwXCountChars', _DWORD),
+                (r'dwYCountChars', _DWORD),
+                (r'dwFillAttribute', _DWORD),
+                (r'dwFlags', _DWORD),
+                (r'wShowWindow', _WORD),
+                (r'cbReserved2', _WORD),
+                (r'lpReserved2', ctypes.c_char_p),
+                (r'hStdInput', _HANDLE),
+                (r'hStdOutput', _HANDLE),
+                (r'hStdError', _HANDLE)]
 
 class _PROCESS_INFORMATION(ctypes.Structure):
-    _fields_ = [('hProcess', _HANDLE),
-                ('hThread', _HANDLE),
-                ('dwProcessId', _DWORD),
-                ('dwThreadId', _DWORD)]
+    _fields_ = [(r'hProcess', _HANDLE),
+                (r'hThread', _HANDLE),
+                (r'dwProcessId', _DWORD),
+                (r'dwThreadId', _DWORD)]
 
 _CREATE_NO_WINDOW = 0x08000000
 _SW_HIDE = 0
 
 class _COORD(ctypes.Structure):
-    _fields_ = [('X', ctypes.c_short),
-                ('Y', ctypes.c_short)]
+    _fields_ = [(r'X', ctypes.c_short),
+                (r'Y', ctypes.c_short)]
 
 class _SMALL_RECT(ctypes.Structure):
-    _fields_ = [('Left', ctypes.c_short),
-                ('Top', ctypes.c_short),
-                ('Right', ctypes.c_short),
-                ('Bottom', ctypes.c_short)]
+    _fields_ = [(r'Left', ctypes.c_short),
+                (r'Top', ctypes.c_short),
+                (r'Right', ctypes.c_short),
+                (r'Bottom', ctypes.c_short)]
 
 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
-    _fields_ = [('dwSize', _COORD),
-                ('dwCursorPosition', _COORD),
-                ('wAttributes', _WORD),
-                ('srWindow', _SMALL_RECT),
-                ('dwMaximumWindowSize', _COORD)]
+    _fields_ = [(r'dwSize', _COORD),
+                (r'dwCursorPosition', _COORD),
+                (r'wAttributes', _WORD),
+                (r'srWindow', _SMALL_RECT),
+                (r'dwMaximumWindowSize', _COORD)]
 
 _STD_OUTPUT_HANDLE = _DWORD(-11).value
 _STD_ERROR_HANDLE = _DWORD(-12).value
@@ -149,40 +149,40 @@
 # These structs are only complete enough to achieve what we need.
 class CERT_CHAIN_CONTEXT(ctypes.Structure):
     _fields_ = (
-        ("cbSize", _DWORD),
+        (r"cbSize", _DWORD),
 
         # CERT_TRUST_STATUS struct
-        ("dwErrorStatus", _DWORD),
-        ("dwInfoStatus", _DWORD),
+        (r"dwErrorStatus", _DWORD),
+        (r"dwInfoStatus", _DWORD),
 
-        ("cChain", _DWORD),
-        ("rgpChain", ctypes.c_void_p),
-        ("cLowerQualityChainContext", _DWORD),
-        ("rgpLowerQualityChainContext", ctypes.c_void_p),
-        ("fHasRevocationFreshnessTime", _BOOL),
-        ("dwRevocationFreshnessTime", _DWORD),
+        (r"cChain", _DWORD),
+        (r"rgpChain", ctypes.c_void_p),
+        (r"cLowerQualityChainContext", _DWORD),
+        (r"rgpLowerQualityChainContext", ctypes.c_void_p),
+        (r"fHasRevocationFreshnessTime", _BOOL),
+        (r"dwRevocationFreshnessTime", _DWORD),
     )
 
 class CERT_USAGE_MATCH(ctypes.Structure):
     _fields_ = (
-        ("dwType", _DWORD),
+        (r"dwType", _DWORD),
 
          # CERT_ENHKEY_USAGE struct
-        ("cUsageIdentifier", _DWORD),
-        ("rgpszUsageIdentifier", ctypes.c_void_p), # LPSTR *
+        (r"cUsageIdentifier", _DWORD),
+        (r"rgpszUsageIdentifier", ctypes.c_void_p), # LPSTR *
     )
 
 class CERT_CHAIN_PARA(ctypes.Structure):
     _fields_ = (
-        ("cbSize", _DWORD),
-        ("RequestedUsage", CERT_USAGE_MATCH),
-        ("RequestedIssuancePolicy", CERT_USAGE_MATCH),
-        ("dwUrlRetrievalTimeout", _DWORD),
-        ("fCheckRevocationFreshnessTime", _BOOL),
-        ("dwRevocationFreshnessTime", _DWORD),
-        ("pftCacheResync", ctypes.c_void_p), # LPFILETIME
-        ("pStrongSignPara", ctypes.c_void_p), # PCCERT_STRONG_SIGN_PARA
-        ("dwStrongSignFlags", _DWORD),
+        (r"cbSize", _DWORD),
+        (r"RequestedUsage", CERT_USAGE_MATCH),
+        (r"RequestedIssuancePolicy", CERT_USAGE_MATCH),
+        (r"dwUrlRetrievalTimeout", _DWORD),
+        (r"fCheckRevocationFreshnessTime", _BOOL),
+        (r"dwRevocationFreshnessTime", _DWORD),
+        (r"pftCacheResync", ctypes.c_void_p), # LPFILETIME
+        (r"pStrongSignPara", ctypes.c_void_p), # PCCERT_STRONG_SIGN_PARA
+        (r"dwStrongSignFlags", _DWORD),
     )
 
 # types of parameters of C functions used (required by pypy)
@@ -307,8 +307,8 @@
     if code > 0x7fffffff:
         code -= 2**32
     err = ctypes.WinError(code=code)
-    raise OSError(err.errno, '%s: %s' % (name,
-                                         encoding.strtolocal(err.strerror)))
+    raise OSError(err.errno, r'%s: %s' % (encoding.strfromlocal(name),
+                                          err.strerror))
 
 def _getfileinfo(name):
     fh = _kernel32.CreateFileA(name, 0,
@@ -579,11 +579,12 @@
         env = '\0'
     env += '\0'
 
-    args = subprocess.list2cmdline(args)
+    args = subprocess.list2cmdline(pycompat.rapply(encoding.strfromlocal, args))
 
+    # TODO: CreateProcessW on py3?
     res = _kernel32.CreateProcessA(
-        None, args, None, None, False, _CREATE_NO_WINDOW,
-        env, pycompat.getcwd(), ctypes.byref(si), ctypes.byref(pi))
+        None, encoding.strtolocal(args), None, None, False, _CREATE_NO_WINDOW,
+        env, encoding.getcwd(), ctypes.byref(si), ctypes.byref(pi))
     if not res:
         raise ctypes.WinError()
 
@@ -596,7 +597,8 @@
         # use EPERM because it is POSIX prescribed value, even though
         # unlink(2) on directories returns EISDIR on Linux
         raise IOError(errno.EPERM,
-                      "Unlinking directory not permitted: '%s'" % f)
+                      r"Unlinking directory not permitted: '%s'"
+                      % encoding.strfromlocal(f))
 
     # POSIX allows to unlink and rename open files. Windows has serious
     # problems with doing that:
@@ -615,7 +617,7 @@
     # callers to recreate f immediately while having other readers do their
     # implicit zombie filename blocking on a temporary name.
 
-    for tries in xrange(10):
+    for tries in pycompat.xrange(10):
         temp = '%s-%08x' % (f, random.randint(0, 0xffffffff))
         try:
             os.rename(f, temp)  # raises OSError EEXIST if temp exists
@@ -624,7 +626,7 @@
             if e.errno != errno.EEXIST:
                 raise
     else:
-        raise IOError(errno.EEXIST, "No usable temporary filename found")
+        raise IOError(errno.EEXIST, r"No usable temporary filename found")
 
     try:
         os.unlink(temp)
--- a/mercurial/windows.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/windows.py	Mon Oct 22 14:46:06 2018 -0400
@@ -123,11 +123,36 @@
         object.__setattr__(self, r'_lastop', self.OPREAD)
         return self._fp.readlines(*args, **kwargs)
 
+class fdproxy(object):
+    """Wraps osutil.posixfile() to override the name attribute to reflect the
+    underlying file name.
+    """
+    def __init__(self, name, fp):
+        self.name = name
+        self._fp = fp
+
+    def __enter__(self):
+        return self._fp.__enter__()
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self._fp.__exit__(exc_type, exc_value, traceback)
+
+    def __iter__(self):
+        return iter(self._fp)
+
+    def __getattr__(self, name):
+        return getattr(self._fp, name)
+
 def posixfile(name, mode='r', buffering=-1):
     '''Open a file with even more POSIX-like semantics'''
     try:
         fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError
 
+        # PyFile_FromFd() ignores the name, and seems to report fp.name as the
+        # underlying file descriptor.
+        if pycompat.ispy3:
+            fp = fdproxy(name, fp)
+
         # The position when opening in append mode is implementation defined, so
         # make it consistent with other platforms, which position at EOF.
         if 'a' in mode:
@@ -139,8 +164,8 @@
         return fp
     except WindowsError as err:
         # convert to a friendlier exception
-        raise IOError(err.errno, '%s: %s' % (
-            name, encoding.strtolocal(err.strerror)))
+        raise IOError(err.errno, r'%s: %s' % (
+            encoding.strfromlocal(name), err.strerror))
 
 # may be wrapped by win32mbcs extension
 listdir = osutil.listdir
@@ -176,7 +201,7 @@
             if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst):
                 raise
             self.close()
-            raise IOError(errno.EPIPE, 'Broken pipe')
+            raise IOError(errno.EPIPE, r'Broken pipe')
 
     def flush(self):
         try:
@@ -184,7 +209,7 @@
         except IOError as inst:
             if not win32.lasterrorwaspipeerror(inst):
                 raise
-            raise IOError(errno.EPIPE, 'Broken pipe')
+            raise IOError(errno.EPIPE, r'Broken pipe')
 
 def _is_win_9x():
     '''return true if run on windows 95, 98 or me.'''
@@ -289,7 +314,7 @@
     index = 0
     pathlen = len(path)
     while index < pathlen:
-        c = path[index]
+        c = path[index:index + 1]
         if c == b'\'':   # no expansion within single quotes
             path = path[index + 1:]
             pathlen = len(path)
@@ -319,7 +344,7 @@
                     var = path[:index]
 
                     # See below for why empty variables are handled specially
-                    if env.get(var, '') != '':
+                    if env.get(var, b'') != b'':
                         res += b'%' + var + b'%'
                     else:
                         res += b'${' + var + b'}'
@@ -340,20 +365,20 @@
                 # VAR, and that really confuses things like revset expressions.
                 # OTOH, if it's left in Unix format and the hook runs sh.exe, it
                 # will substitute to an empty string, and everything is happy.
-                if env.get(var, '') != '':
+                if env.get(var, b'') != b'':
                     res += b'%' + var + b'%'
                 else:
                     res += b'$' + var
 
-                if c != '':
+                if c != b'':
                     index -= 1
         elif (c == b'~' and index + 1 < pathlen
-              and path[index + 1] in (b'\\', b'/')):
+              and path[index + 1:index + 2] in (b'\\', b'/')):
             res += "%USERPROFILE%"
         elif (c == b'\\' and index + 1 < pathlen
-              and path[index + 1] in (b'$', b'~')):
+              and path[index + 1:index + 2] in (b'$', b'~')):
             # Skip '\', but only if it is escaping $ or ~
-            res += path[index + 1]
+            res += path[index + 1:index + 2]
             index += 1
         else:
             res += c
@@ -389,7 +414,7 @@
     """
     global _quotere
     if _quotere is None:
-        _quotere = re.compile(r'(\\*)("|\\$)')
+        _quotere = re.compile(br'(\\*)("|\\$)')
     global _needsshellquote
     if _needsshellquote is None:
         # ":" is also treated as "safe character", because it is used as a part
@@ -397,11 +422,11 @@
         # safe because shlex.split() (kind of) treats it as an escape char and
         # drops it.  It will leave the next character, even if it is another
         # "\".
-        _needsshellquote = re.compile(r'[^a-zA-Z0-9._:/-]').search
+        _needsshellquote = re.compile(br'[^a-zA-Z0-9._:/-]').search
     if s and not _needsshellquote(s) and not _quotere.search(s):
         # "s" shouldn't have to be quoted
         return s
-    return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
+    return b'"%s"' % _quotere.sub(br'\1\1\\\2', s)
 
 def _unquote(s):
     if s.startswith(b'"') and s.endswith(b'"'):
@@ -494,6 +519,9 @@
     If gid is None, return the name of the current group."""
     return None
 
+def readlink(pathname):
+    return pycompat.fsencode(os.readlink(pycompat.fsdecode(pathname)))
+
 def removedirs(name):
     """special version of os.removedirs that does not remove symlinked
     directories or junction points if they actually contain files"""
@@ -523,7 +551,7 @@
         os.rename(src, dst)
 
 def gethgcmd():
-    return [sys.executable] + sys.argv[:1]
+    return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]]
 
 def groupmembers(name):
     # Don't support groups on Windows for now
@@ -554,9 +582,11 @@
         scope = (scope,)
     for s in scope:
         try:
-            val = winreg.QueryValueEx(winreg.OpenKey(s, key), valname)[0]
-            # never let a Unicode string escape into the wild
-            return encoding.unitolocal(val)
+            with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey:
+                name = valname and encoding.strfromlocal(valname) or valname
+                val = winreg.QueryValueEx(hkey, name)[0]
+                # never let a Unicode string escape into the wild
+                return encoding.unitolocal(val)
         except EnvironmentError:
             pass
 
@@ -590,4 +620,4 @@
     return ''.join(chunks)
 
 def bindunixsocket(sock, path):
-    raise NotImplementedError('unsupported platform')
+    raise NotImplementedError(r'unsupported platform')
--- a/mercurial/wireprotoframing.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/wireprotoframing.py	Mon Oct 22 14:46:06 2018 -0400
@@ -17,14 +17,16 @@
 from .i18n import _
 from .thirdparty import (
     attr,
-    cbor,
 )
 from . import (
     encoding,
     error,
+    pycompat,
     util,
+    wireprototypes,
 )
 from .utils import (
+    cborutil,
     stringutil,
 )
 
@@ -47,7 +49,8 @@
 FRAME_TYPE_ERROR_RESPONSE = 0x05
 FRAME_TYPE_TEXT_OUTPUT = 0x06
 FRAME_TYPE_PROGRESS = 0x07
-FRAME_TYPE_STREAM_SETTINGS = 0x08
+FRAME_TYPE_SENDER_PROTOCOL_SETTINGS = 0x08
+FRAME_TYPE_STREAM_SETTINGS = 0x09
 
 FRAME_TYPES = {
     b'command-request': FRAME_TYPE_COMMAND_REQUEST,
@@ -56,6 +59,7 @@
     b'error-response': FRAME_TYPE_ERROR_RESPONSE,
     b'text-output': FRAME_TYPE_TEXT_OUTPUT,
     b'progress': FRAME_TYPE_PROGRESS,
+    b'sender-protocol-settings': FRAME_TYPE_SENDER_PROTOCOL_SETTINGS,
     b'stream-settings': FRAME_TYPE_STREAM_SETTINGS,
 }
 
@@ -87,6 +91,22 @@
     b'eos': FLAG_COMMAND_RESPONSE_EOS,
 }
 
+FLAG_SENDER_PROTOCOL_SETTINGS_CONTINUATION = 0x01
+FLAG_SENDER_PROTOCOL_SETTINGS_EOS = 0x02
+
+FLAGS_SENDER_PROTOCOL_SETTINGS = {
+    b'continuation': FLAG_SENDER_PROTOCOL_SETTINGS_CONTINUATION,
+    b'eos': FLAG_SENDER_PROTOCOL_SETTINGS_EOS,
+}
+
+FLAG_STREAM_ENCODING_SETTINGS_CONTINUATION = 0x01
+FLAG_STREAM_ENCODING_SETTINGS_EOS = 0x02
+
+FLAGS_STREAM_ENCODING_SETTINGS = {
+    b'continuation': FLAG_STREAM_ENCODING_SETTINGS_CONTINUATION,
+    b'eos': FLAG_STREAM_ENCODING_SETTINGS_EOS,
+}
+
 # Maps frame types to their available flags.
 FRAME_TYPE_FLAGS = {
     FRAME_TYPE_COMMAND_REQUEST: FLAGS_COMMAND_REQUEST,
@@ -95,7 +115,8 @@
     FRAME_TYPE_ERROR_RESPONSE: {},
     FRAME_TYPE_TEXT_OUTPUT: {},
     FRAME_TYPE_PROGRESS: {},
-    FRAME_TYPE_STREAM_SETTINGS: {},
+    FRAME_TYPE_SENDER_PROTOCOL_SETTINGS: FLAGS_SENDER_PROTOCOL_SETTINGS,
+    FRAME_TYPE_STREAM_SETTINGS: FLAGS_STREAM_ENCODING_SETTINGS,
 }
 
 ARGUMENT_RECORD_HEADER = struct.Struct(r'<HH')
@@ -217,8 +238,8 @@
             finalflags |= int(flag)
 
     if payload.startswith(b'cbor:'):
-        payload = cbor.dumps(stringutil.evalpythonliteral(payload[5:]),
-                             canonical=True)
+        payload = b''.join(cborutil.streamencode(
+            stringutil.evalpythonliteral(payload[5:])))
 
     else:
         payload = stringutil.unescapestr(payload)
@@ -279,7 +300,8 @@
                  payload)
 
 def createcommandframes(stream, requestid, cmd, args, datafh=None,
-                        maxframesize=DEFAULT_MAX_FRAME_SIZE):
+                        maxframesize=DEFAULT_MAX_FRAME_SIZE,
+                        redirect=None):
     """Create frames necessary to transmit a request to run a command.
 
     This is a generator of bytearrays. Each item represents a frame
@@ -289,7 +311,10 @@
     if args:
         data[b'args'] = args
 
-    data = cbor.dumps(data, canonical=True)
+    if redirect:
+        data[b'redirect'] = redirect
+
+    data = b''.join(cborutil.streamencode(data))
 
     offset = 0
 
@@ -340,38 +365,35 @@
             if done:
                 break
 
-def createcommandresponseframesfrombytes(stream, requestid, data,
-                                         maxframesize=DEFAULT_MAX_FRAME_SIZE):
-    """Create a raw frame to send a bytes response from static bytes input.
+def createcommandresponseokframe(stream, requestid):
+    overall = b''.join(cborutil.streamencode({b'status': b'ok'}))
 
-    Returns a generator of bytearrays.
-    """
-    # Automatically send the overall CBOR response map.
-    overall = cbor.dumps({b'status': b'ok'}, canonical=True)
-    if len(overall) > maxframesize:
-        raise error.ProgrammingError('not yet implemented')
+    if stream.streamsettingssent:
+        overall = stream.encode(overall)
+        encoded = True
 
-    # Simple case where we can fit the full response in a single frame.
-    if len(overall) + len(data) <= maxframesize:
-        flags = FLAG_COMMAND_RESPONSE_EOS
-        yield stream.makeframe(requestid=requestid,
-                               typeid=FRAME_TYPE_COMMAND_RESPONSE,
-                               flags=flags,
-                               payload=overall + data)
-        return
+        if not overall:
+            return None
+    else:
+        encoded = False
 
-    # It's easier to send the overall CBOR map in its own frame than to track
-    # offsets.
-    yield stream.makeframe(requestid=requestid,
-                           typeid=FRAME_TYPE_COMMAND_RESPONSE,
-                           flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
-                           payload=overall)
+    return stream.makeframe(requestid=requestid,
+                            typeid=FRAME_TYPE_COMMAND_RESPONSE,
+                            flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
+                            payload=overall,
+                            encoded=encoded)
+
+def createcommandresponseeosframes(stream, requestid,
+                                   maxframesize=DEFAULT_MAX_FRAME_SIZE):
+    """Create an empty payload frame representing command end-of-stream."""
+    payload = stream.flush()
 
     offset = 0
     while True:
-        chunk = data[offset:offset + maxframesize]
+        chunk = payload[offset:offset + maxframesize]
         offset += len(chunk)
-        done = offset == len(data)
+
+        done = offset == len(payload)
 
         if done:
             flags = FLAG_COMMAND_RESPONSE_EOS
@@ -381,44 +403,44 @@
         yield stream.makeframe(requestid=requestid,
                                typeid=FRAME_TYPE_COMMAND_RESPONSE,
                                flags=flags,
-                               payload=chunk)
+                               payload=chunk,
+                               encoded=payload != b'')
 
         if done:
             break
 
-def createbytesresponseframesfromgen(stream, requestid, gen,
-                                     maxframesize=DEFAULT_MAX_FRAME_SIZE):
-    overall = cbor.dumps({b'status': b'ok'}, canonical=True)
-
-    yield stream.makeframe(requestid=requestid,
-                           typeid=FRAME_TYPE_COMMAND_RESPONSE,
-                           flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
-                           payload=overall)
-
-    cb = util.chunkbuffer(gen)
-
-    flags = 0
+def createalternatelocationresponseframe(stream, requestid, location):
+    data = {
+        b'status': b'redirect',
+        b'location': {
+            b'url': location.url,
+            b'mediatype': location.mediatype,
+        }
+    }
 
-    while True:
-        chunk = cb.read(maxframesize)
-        if not chunk:
-            break
+    for a in (r'size', r'fullhashes', r'fullhashseed', r'serverdercerts',
+              r'servercadercerts'):
+        value = getattr(location, a)
+        if value is not None:
+            data[b'location'][pycompat.bytestr(a)] = value
+
+    payload = b''.join(cborutil.streamencode(data))
 
-        yield stream.makeframe(requestid=requestid,
-                               typeid=FRAME_TYPE_COMMAND_RESPONSE,
-                               flags=flags,
-                               payload=chunk)
-
-        flags |= FLAG_COMMAND_RESPONSE_CONTINUATION
+    if stream.streamsettingssent:
+        payload = stream.encode(payload)
+        encoded = True
+    else:
+        encoded = False
 
-    flags ^= FLAG_COMMAND_RESPONSE_CONTINUATION
-    flags |= FLAG_COMMAND_RESPONSE_EOS
-    yield stream.makeframe(requestid=requestid,
-                           typeid=FRAME_TYPE_COMMAND_RESPONSE,
-                           flags=flags,
-                           payload=b'')
+    return stream.makeframe(requestid=requestid,
+                            typeid=FRAME_TYPE_COMMAND_RESPONSE,
+                            flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
+                            payload=payload,
+                            encoded=encoded)
 
 def createcommanderrorresponse(stream, requestid, message, args=None):
+    # TODO should this be using a list of {'msg': ..., 'args': {}} so atom
+    # formatting works consistently?
     m = {
         b'status': b'error',
         b'error': {
@@ -429,7 +451,7 @@
     if args:
         m[b'error'][b'args'] = args
 
-    overall = cbor.dumps(m, canonical=True)
+    overall = b''.join(cborutil.streamencode(m))
 
     yield stream.makeframe(requestid=requestid,
                            typeid=FRAME_TYPE_COMMAND_RESPONSE,
@@ -440,10 +462,10 @@
     # TODO properly handle frame size limits.
     assert len(msg) <= DEFAULT_MAX_FRAME_SIZE
 
-    payload = cbor.dumps({
+    payload = b''.join(cborutil.streamencode({
         b'type': errtype,
         b'message': [{b'msg': msg}],
-    }, canonical=True)
+    }))
 
     yield stream.makeframe(requestid=requestid,
                            typeid=FRAME_TYPE_ERROR_RESPONSE,
@@ -493,7 +515,7 @@
 
         atomdicts.append(atom)
 
-    payload = cbor.dumps(atomdicts, canonical=True)
+    payload = b''.join(cborutil.streamencode(atomdicts))
 
     if len(payload) > maxframesize:
         raise ValueError('cannot encode data in a single frame')
@@ -503,6 +525,242 @@
                            flags=0,
                            payload=payload)
 
+class bufferingcommandresponseemitter(object):
+    """Helper object to emit command response frames intelligently.
+
+    Raw command response data is likely emitted in chunks much smaller
+    than what can fit in a single frame. This class exists to buffer
+    chunks until enough data is available to fit in a single frame.
+
+    TODO we'll need something like this when compression is supported.
+    So it might make sense to implement this functionality at the stream
+    level.
+    """
+    def __init__(self, stream, requestid, maxframesize=DEFAULT_MAX_FRAME_SIZE):
+        self._stream = stream
+        self._requestid = requestid
+        self._maxsize = maxframesize
+        self._chunks = []
+        self._chunkssize = 0
+
+    def send(self, data):
+        """Send new data for emission.
+
+        Is a generator of new frames that were derived from the new input.
+
+        If the special input ``None`` is received, flushes all buffered
+        data to frames.
+        """
+
+        if data is None:
+            for frame in self._flush():
+                yield frame
+            return
+
+        data = self._stream.encode(data)
+
+        # There is a ton of potential to do more complicated things here.
+        # Our immediate goal is to coalesce small chunks into big frames,
+        # not achieve the fewest number of frames possible. So we go with
+        # a simple implementation:
+        #
+        # * If a chunk is too large for a frame, we flush and emit frames
+        #   for the new chunk.
+        # * If a chunk can be buffered without total buffered size limits
+        #   being exceeded, we do that.
+        # * If a chunk causes us to go over our buffering limit, we flush
+        #   and then buffer the new chunk.
+
+        if not data:
+            return
+
+        if len(data) > self._maxsize:
+            for frame in self._flush():
+                yield frame
+
+            # Now emit frames for the big chunk.
+            offset = 0
+            while True:
+                chunk = data[offset:offset + self._maxsize]
+                offset += len(chunk)
+
+                yield self._stream.makeframe(
+                    self._requestid,
+                    typeid=FRAME_TYPE_COMMAND_RESPONSE,
+                    flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
+                    payload=chunk,
+                    encoded=True)
+
+                if offset == len(data):
+                    return
+
+        # If we don't have enough to constitute a full frame, buffer and
+        # return.
+        if len(data) + self._chunkssize < self._maxsize:
+            self._chunks.append(data)
+            self._chunkssize += len(data)
+            return
+
+        # Else flush what we have and buffer the new chunk. We could do
+        # something more intelligent here, like break the chunk. Let's
+        # keep things simple for now.
+        for frame in self._flush():
+            yield frame
+
+        self._chunks.append(data)
+        self._chunkssize = len(data)
+
+    def _flush(self):
+        payload = b''.join(self._chunks)
+        assert len(payload) <= self._maxsize
+
+        self._chunks[:] = []
+        self._chunkssize = 0
+
+        if not payload:
+            return
+
+        yield self._stream.makeframe(
+            self._requestid,
+            typeid=FRAME_TYPE_COMMAND_RESPONSE,
+            flags=FLAG_COMMAND_RESPONSE_CONTINUATION,
+            payload=payload,
+            encoded=True)
+
+# TODO consider defining encoders/decoders using the util.compressionengine
+# mechanism.
+
+class identityencoder(object):
+    """Encoder for the "identity" stream encoding profile."""
+    def __init__(self, ui):
+        pass
+
+    def encode(self, data):
+        return data
+
+    def flush(self):
+        return b''
+
+    def finish(self):
+        return b''
+
+class identitydecoder(object):
+    """Decoder for the "identity" stream encoding profile."""
+
+    def __init__(self, ui, extraobjs):
+        if extraobjs:
+            raise error.Abort(_('identity decoder received unexpected '
+                                'additional values'))
+
+    def decode(self, data):
+        return data
+
+class zlibencoder(object):
+    def __init__(self, ui):
+        import zlib
+        self._zlib = zlib
+        self._compressor = zlib.compressobj()
+
+    def encode(self, data):
+        return self._compressor.compress(data)
+
+    def flush(self):
+        # Z_SYNC_FLUSH doesn't reset compression context, which is
+        # what we want.
+        return self._compressor.flush(self._zlib.Z_SYNC_FLUSH)
+
+    def finish(self):
+        res = self._compressor.flush(self._zlib.Z_FINISH)
+        self._compressor = None
+        return res
+
+class zlibdecoder(object):
+    def __init__(self, ui, extraobjs):
+        import zlib
+
+        if extraobjs:
+            raise error.Abort(_('zlib decoder received unexpected '
+                                'additional values'))
+
+        self._decompressor = zlib.decompressobj()
+
+    def decode(self, data):
+        # Python 2's zlib module doesn't use the buffer protocol and can't
+        # handle all bytes-like types.
+        if not pycompat.ispy3 and isinstance(data, bytearray):
+            data = bytes(data)
+
+        return self._decompressor.decompress(data)
+
+class zstdbaseencoder(object):
+    def __init__(self, level):
+        from . import zstd
+
+        self._zstd = zstd
+        cctx = zstd.ZstdCompressor(level=level)
+        self._compressor = cctx.compressobj()
+
+    def encode(self, data):
+        return self._compressor.compress(data)
+
+    def flush(self):
+        # COMPRESSOBJ_FLUSH_BLOCK flushes all data previously fed into the
+        # compressor and allows a decompressor to access all encoded data
+        # up to this point.
+        return self._compressor.flush(self._zstd.COMPRESSOBJ_FLUSH_BLOCK)
+
+    def finish(self):
+        res = self._compressor.flush(self._zstd.COMPRESSOBJ_FLUSH_FINISH)
+        self._compressor = None
+        return res
+
+class zstd8mbencoder(zstdbaseencoder):
+    def __init__(self, ui):
+        super(zstd8mbencoder, self).__init__(3)
+
+class zstdbasedecoder(object):
+    def __init__(self, maxwindowsize):
+        from . import zstd
+        dctx = zstd.ZstdDecompressor(max_window_size=maxwindowsize)
+        self._decompressor = dctx.decompressobj()
+
+    def decode(self, data):
+        return self._decompressor.decompress(data)
+
+class zstd8mbdecoder(zstdbasedecoder):
+    def __init__(self, ui, extraobjs):
+        if extraobjs:
+            raise error.Abort(_('zstd8mb decoder received unexpected '
+                                'additional values'))
+
+        super(zstd8mbdecoder, self).__init__(maxwindowsize=8 * 1048576)
+
+# We lazily populate this to avoid excessive module imports when importing
+# this module.
+STREAM_ENCODERS = {}
+STREAM_ENCODERS_ORDER = []
+
+def populatestreamencoders():
+    if STREAM_ENCODERS:
+        return
+
+    try:
+        from . import zstd
+        zstd.__version__
+    except ImportError:
+        zstd = None
+
+    # zstandard is fastest and is preferred.
+    if zstd:
+        STREAM_ENCODERS[b'zstd-8mb'] = (zstd8mbencoder, zstd8mbdecoder)
+        STREAM_ENCODERS_ORDER.append(b'zstd-8mb')
+
+    STREAM_ENCODERS[b'zlib'] = (zlibencoder, zlibdecoder)
+    STREAM_ENCODERS_ORDER.append(b'zlib')
+
+    STREAM_ENCODERS[b'identity'] = (identityencoder, identitydecoder)
+    STREAM_ENCODERS_ORDER.append(b'identity')
+
 class stream(object):
     """Represents a logical unidirectional series of frames."""
 
@@ -523,12 +781,125 @@
         return makeframe(requestid, self.streamid, streamflags, typeid, flags,
                          payload)
 
+class inputstream(stream):
+    """Represents a stream used for receiving data."""
+
+    def __init__(self, streamid, active=False):
+        super(inputstream, self).__init__(streamid, active=active)
+        self._decoder = None
+
+    def setdecoder(self, ui, name, extraobjs):
+        """Set the decoder for this stream.
+
+        Receives the stream profile name and any additional CBOR objects
+        decoded from the stream encoding settings frame payloads.
+        """
+        if name not in STREAM_ENCODERS:
+            raise error.Abort(_('unknown stream decoder: %s') % name)
+
+        self._decoder = STREAM_ENCODERS[name][1](ui, extraobjs)
+
+    def decode(self, data):
+        # Default is identity decoder. We don't bother instantiating one
+        # because it is trivial.
+        if not self._decoder:
+            return data
+
+        return self._decoder.decode(data)
+
+    def flush(self):
+        if not self._decoder:
+            return b''
+
+        return self._decoder.flush()
+
+class outputstream(stream):
+    """Represents a stream used for sending data."""
+
+    def __init__(self, streamid, active=False):
+        super(outputstream, self).__init__(streamid, active=active)
+        self.streamsettingssent = False
+        self._encoder = None
+        self._encodername = None
+
+    def setencoder(self, ui, name):
+        """Set the encoder for this stream.
+
+        Receives the stream profile name.
+        """
+        if name not in STREAM_ENCODERS:
+            raise error.Abort(_('unknown stream encoder: %s') % name)
+
+        self._encoder = STREAM_ENCODERS[name][0](ui)
+        self._encodername = name
+
+    def encode(self, data):
+        if not self._encoder:
+            return data
+
+        return self._encoder.encode(data)
+
+    def flush(self):
+        if not self._encoder:
+            return b''
+
+        return self._encoder.flush()
+
+    def finish(self):
+        if not self._encoder:
+            return b''
+
+        self._encoder.finish()
+
+    def makeframe(self, requestid, typeid, flags, payload,
+                  encoded=False):
+        """Create a frame to be sent out over this stream.
+
+        Only returns the frame instance. Does not actually send it.
+        """
+        streamflags = 0
+        if not self._active:
+            streamflags |= STREAM_FLAG_BEGIN_STREAM
+            self._active = True
+
+        if encoded:
+            if not self.streamsettingssent:
+                raise error.ProgrammingError(
+                    b'attempting to send encoded frame without sending stream '
+                    b'settings')
+
+            streamflags |= STREAM_FLAG_ENCODING_APPLIED
+
+        if (typeid == FRAME_TYPE_STREAM_SETTINGS
+            and flags & FLAG_STREAM_ENCODING_SETTINGS_EOS):
+            self.streamsettingssent = True
+
+        return makeframe(requestid, self.streamid, streamflags, typeid, flags,
+                         payload)
+
+    def makestreamsettingsframe(self, requestid):
+        """Create a stream settings frame for this stream.
+
+        Returns frame data or None if no stream settings frame is needed or has
+        already been sent.
+        """
+        if not self._encoder or self.streamsettingssent:
+            return None
+
+        payload = b''.join(cborutil.streamencode(self._encodername))
+        return self.makeframe(requestid, FRAME_TYPE_STREAM_SETTINGS,
+                              FLAG_STREAM_ENCODING_SETTINGS_EOS, payload)
+
 def ensureserverstream(stream):
     if stream.streamid % 2:
         raise error.ProgrammingError('server should only write to even '
                                      'numbered streams; %d is not even' %
                                      stream.streamid)
 
+DEFAULT_PROTOCOL_SETTINGS = {
+    'contentencodings': [b'identity'],
+}
+
 class serverreactor(object):
     """Holds state of a server handling frame-based protocol requests.
 
@@ -594,7 +965,7 @@
     between who responds to what.
     """
 
-    def __init__(self, deferoutput=False):
+    def __init__(self, ui, deferoutput=False):
         """Construct a new server reactor.
 
         ``deferoutput`` can be used to indicate that no output frames should be
@@ -604,8 +975,9 @@
         send those frames. This is useful for half-duplex transports where the
         sender cannot receive until all data has been transmitted.
         """
+        self._ui = ui
         self._deferoutput = deferoutput
-        self._state = 'idle'
+        self._state = 'initial'
         self._nextoutgoingstreamid = 2
         self._bufferedframegens = []
         # stream id -> stream instance for all active streams from the client.
@@ -618,6 +990,13 @@
         # set.
         self._activecommands = set()
 
+        self._protocolsettingsdecoder = None
+
+        # Sender protocol settings are optional. Set implied default values.
+        self._sendersettings = dict(DEFAULT_PROTOCOL_SETTINGS)
+
+        populatestreamencoders()
+
     def onframerecv(self, frame):
         """Process a frame that has been received off the wire.
 
@@ -637,7 +1016,7 @@
                     _('received frame on unknown inactive stream without '
                       'beginning of stream flag set'))
 
-            self._incomingstreams[frame.streamid] = stream(frame.streamid)
+            self._incomingstreams[frame.streamid] = inputstream(frame.streamid)
 
         if frame.streamflags & STREAM_FLAG_ENCODING_APPLIED:
             # TODO handle decoding frames
@@ -649,6 +1028,8 @@
             del self._incomingstreams[frame.streamid]
 
         handlers = {
+            'initial': self._onframeinitial,
+            'protocol-settings-receiving': self._onframeprotocolsettings,
             'idle': self._onframeidle,
             'command-receiving': self._onframecommandreceiving,
             'errored': self._onframeerrored,
@@ -660,38 +1041,132 @@
 
         return meth(frame)
 
-    def oncommandresponseready(self, stream, requestid, data):
-        """Signal that a bytes response is ready to be sent to the client.
+    def oncommandresponsereadyobjects(self, stream, requestid, objs):
+        """Signal that objects are ready to be sent to the client.
 
-        The raw bytes response is passed as an argument.
+        ``objs`` is an iterable of objects (typically a generator) that will
+        be encoded via CBOR and added to frames, which will be sent to the
+        client.
         """
         ensureserverstream(stream)
 
-        def sendframes():
-            for frame in createcommandresponseframesfrombytes(stream, requestid,
-                                                              data):
-                yield frame
-
-            self._activecommands.remove(requestid)
-
-        result = sendframes()
+        # A more robust solution would be to check for objs.{next,__next__}.
+        if isinstance(objs, list):
+            objs = iter(objs)
 
-        if self._deferoutput:
-            self._bufferedframegens.append(result)
-            return 'noop', {}
-        else:
-            return 'sendframes', {
-                'framegen': result,
-            }
-
-    def oncommandresponsereadygen(self, stream, requestid, gen):
-        """Signal that a bytes response is ready, with data as a generator."""
-        ensureserverstream(stream)
+        # We need to take care over exception handling. Uncaught exceptions
+        # when generating frames could lead to premature end of the frame
+        # stream and the possibility of the server or client process getting
+        # in a bad state.
+        #
+        # Keep in mind that if ``objs`` is a generator, advancing it could
+        # raise exceptions that originated in e.g. wire protocol command
+        # functions. That is why we differentiate between exceptions raised
+        # when iterating versus other exceptions that occur.
+        #
+        # In all cases, when the function finishes, the request is fully
+        # handled and no new frames for it should be seen.
 
         def sendframes():
-            for frame in createbytesresponseframesfromgen(stream, requestid,
-                                                          gen):
-                yield frame
+            emitted = False
+            alternatelocationsent = False
+            emitter = bufferingcommandresponseemitter(stream, requestid)
+            while True:
+                try:
+                    o = next(objs)
+                except StopIteration:
+                    for frame in emitter.send(None):
+                        yield frame
+
+                    if emitted:
+                        for frame in createcommandresponseeosframes(
+                            stream, requestid):
+                            yield frame
+                    break
+
+                except error.WireprotoCommandError as e:
+                    for frame in createcommanderrorresponse(
+                        stream, requestid, e.message, e.messageargs):
+                        yield frame
+                    break
+
+                except Exception as e:
+                    for frame in createerrorframe(
+                        stream, requestid, '%s' % stringutil.forcebytestr(e),
+                        errtype='server'):
+
+                        yield frame
+
+                    break
+
+                try:
+                    # Alternate location responses can only be the first and
+                    # only object in the output stream.
+                    if isinstance(o, wireprototypes.alternatelocationresponse):
+                        if emitted:
+                            raise error.ProgrammingError(
+                                'alternatelocationresponse seen after initial '
+                                'output object')
+
+                        frame = stream.makestreamsettingsframe(requestid)
+                        if frame:
+                            yield frame
+
+                        yield createalternatelocationresponseframe(
+                            stream, requestid, o)
+
+                        alternatelocationsent = True
+                        emitted = True
+                        continue
+
+                    if alternatelocationsent:
+                        raise error.ProgrammingError(
+                            'object follows alternatelocationresponse')
+
+                    if not emitted:
+                        # Frame is optional.
+                        frame = stream.makestreamsettingsframe(requestid)
+                        if frame:
+                            yield frame
+
+                        # May be None if empty frame (due to encoding).
+                        frame = createcommandresponseokframe(stream, requestid)
+                        if frame:
+                            yield frame
+
+                        emitted = True
+
+                    # Objects emitted by command functions can be serializable
+                    # data structures or special types.
+                    # TODO consider extracting the content normalization to a
+                    # standalone function, as it may be useful for e.g. cachers.
+
+                    # A pre-encoded object is sent directly to the emitter.
+                    if isinstance(o, wireprototypes.encodedresponse):
+                        for frame in emitter.send(o.data):
+                            yield frame
+
+                    elif isinstance(
+                        o, wireprototypes.indefinitebytestringresponse):
+                        for chunk in cborutil.streamencodebytestringfromiter(
+                            o.chunks):
+
+                            for frame in emitter.send(chunk):
+                                yield frame
+
+                    # A regular object is CBOR encoded.
+                    else:
+                        for chunk in cborutil.streamencode(o):
+                            for frame in emitter.send(chunk):
+                                yield frame
+
+                except Exception as e:
+                    for frame in createerrorframe(stream, requestid,
+                                                  '%s' % e,
+                                                  errtype='server'):
+                        yield frame
+
+                    break
 
             self._activecommands.remove(requestid)
 
@@ -753,13 +1228,25 @@
         return self._handlesendframes(sendframes())
 
     def makeoutputstream(self):
-        """Create a stream to be used for sending data to the client."""
+        """Create a stream to be used for sending data to the client.
+
+        If this is called before protocol settings frames are received, we
+        don't know what stream encodings are supported by the client and
+        we will default to identity.
+        """
         streamid = self._nextoutgoingstreamid
         self._nextoutgoingstreamid += 2
 
-        s = stream(streamid)
+        s = outputstream(streamid)
         self._outgoingstreams[streamid] = s
 
+        # Always use the *server's* preferred encoder over the client's,
+        # as servers have more to lose from sub-optimal encoders being used.
+        for name in STREAM_ENCODERS_ORDER:
+            if name in self._sendersettings['contentencodings']:
+                s.setencoder(self._ui, name)
+                break
+
         return s
 
     def _makeerrorresult(self, msg):
@@ -784,7 +1271,7 @@
 
         # Decode the payloads as CBOR.
         entry['payload'].seek(0)
-        request = cbor.load(entry['payload'])
+        request = cborutil.decodeall(entry['payload'].getvalue())[0]
 
         if b'name' not in request:
             self._state = 'errored'
@@ -801,6 +1288,7 @@
             'requestid': requestid,
             'command': request[b'name'],
             'args': request[b'args'],
+            'redirect': request.get(b'redirect'),
             'data': entry['data'].getvalue() if entry['data'] else None,
         }
 
@@ -825,6 +1313,85 @@
                 _('received command request frame with neither new nor '
                   'continuation flags set'))
 
+    def _onframeinitial(self, frame):
+        # Called when we receive a frame when in the "initial" state.
+        if frame.typeid == FRAME_TYPE_SENDER_PROTOCOL_SETTINGS:
+            self._state = 'protocol-settings-receiving'
+            self._protocolsettingsdecoder = cborutil.bufferingdecoder()
+            return self._onframeprotocolsettings(frame)
+
+        elif frame.typeid == FRAME_TYPE_COMMAND_REQUEST:
+            self._state = 'idle'
+            return self._onframeidle(frame)
+
+        else:
+            self._state = 'errored'
+            return self._makeerrorresult(
+                _('expected sender protocol settings or command request '
+                  'frame; got %d') % frame.typeid)
+
+    def _onframeprotocolsettings(self, frame):
+        assert self._state == 'protocol-settings-receiving'
+        assert self._protocolsettingsdecoder is not None
+
+        if frame.typeid != FRAME_TYPE_SENDER_PROTOCOL_SETTINGS:
+            self._state = 'errored'
+            return self._makeerrorresult(
+                _('expected sender protocol settings frame; got %d') %
+                frame.typeid)
+
+        more = frame.flags & FLAG_SENDER_PROTOCOL_SETTINGS_CONTINUATION
+        eos = frame.flags & FLAG_SENDER_PROTOCOL_SETTINGS_EOS
+
+        if more and eos:
+            self._state = 'errored'
+            return self._makeerrorresult(
+                _('sender protocol settings frame cannot have both '
+                  'continuation and end of stream flags set'))
+
+        if not more and not eos:
+            self._state = 'errored'
+            return self._makeerrorresult(
+                _('sender protocol settings frame must have continuation or '
+                  'end of stream flag set'))
+
+        # TODO establish limits for maximum amount of data that can be
+        # buffered.
+        try:
+            self._protocolsettingsdecoder.decode(frame.payload)
+        except Exception as e:
+            self._state = 'errored'
+            return self._makeerrorresult(
+                _('error decoding CBOR from sender protocol settings frame: %s')
+                % stringutil.forcebytestr(e))
+
+        if more:
+            return self._makewantframeresult()
+
+        assert eos
+
+        decoded = self._protocolsettingsdecoder.getavailable()
+        self._protocolsettingsdecoder = None
+
+        if not decoded:
+            self._state = 'errored'
+            return self._makeerrorresult(
+                _('sender protocol settings frame did not contain CBOR data'))
+        elif len(decoded) > 1:
+            self._state = 'errored'
+            return self._makeerrorresult(
+                _('sender protocol settings frame contained multiple CBOR '
+                  'values'))
+
+        d = decoded[0]
+
+        if b'contentencodings' in d:
+            self._sendersettings['contentencodings'] = d[b'contentencodings']
+
+        self._state = 'idle'
+
+        return self._makewantframeresult()
+
     def _onframeidle(self, frame):
         # The only frame type that should be received in this state is a
         # command request.
@@ -963,11 +1530,12 @@
 class commandrequest(object):
     """Represents a request to run a command."""
 
-    def __init__(self, requestid, name, args, datafh=None):
+    def __init__(self, requestid, name, args, datafh=None, redirect=None):
         self.requestid = requestid
         self.name = name
         self.args = args
         self.datafh = datafh
+        self.redirect = redirect
         self.state = 'pending'
 
 class clientreactor(object):
@@ -981,8 +1549,43 @@
     the TCP socket. For transports where there are multiple discrete
     interactions (say tunneled within in HTTP request), there will be a
     separate instance for each distinct interaction.
+
+    Consumers are expected to tell instances when events occur by calling
+    various methods. These methods return a 2-tuple describing any follow-up
+    action(s) to take. The first element is the name of an action to
+    perform. The second is a data structure (usually a dict) specific to
+    that action that contains more information. e.g. if the reactor wants
+    to send frames to the server, the data structure will contain a reference
+    to those frames.
+
+    Valid actions that consumers can be instructed to take are:
+
+    noop
+       Indicates no additional action is required.
+
+    sendframes
+       Indicates that frames should be sent to the server. The ``framegen``
+       key contains a generator of frames that should be sent. The reactor
+       assumes that all frames in this generator are sent to the server.
+
+    error
+       Indicates that an error occurred. The ``message`` key contains an
+       error message describing the failure.
+
+    responsedata
+       Indicates a response to a previously-issued command was received.
+
+       The ``request`` key contains the ``commandrequest`` instance that
+       represents the request this data is for.
+
+       The ``data`` key contains the decoded data from the server.
+
+       ``expectmore`` and ``eos`` evaluate to True when more response data
+       is expected to follow or we're at the end of the response stream,
+       respectively.
     """
-    def __init__(self, hasmultiplesend=False, buffersends=True):
+    def __init__(self, ui, hasmultiplesend=False, buffersends=True,
+                 clientcontentencoders=None):
         """Create a new instance.
 
         ``hasmultiplesend`` indicates whether multiple sends are supported
@@ -992,21 +1595,32 @@
 
         ``buffercommands`` indicates whether sends should be buffered until the
         last request has been issued.
+
+        ``clientcontentencoders`` is an iterable of content encoders the client
+        will advertise to the server and that the server can use for encoding
+        data. If not defined, the client will not advertise content encoders
+        to the server.
         """
+        self._ui = ui
         self._hasmultiplesend = hasmultiplesend
         self._buffersends = buffersends
+        self._clientcontentencoders = clientcontentencoders
 
         self._canissuecommands = True
         self._cansend = True
+        self._protocolsettingssent = False
 
         self._nextrequestid = 1
         # We only support a single outgoing stream for now.
-        self._outgoingstream = stream(1)
+        self._outgoingstream = outputstream(1)
         self._pendingrequests = collections.deque()
         self._activerequests = {}
         self._incomingstreams = {}
+        self._streamsettingsdecoders = {}
 
-    def callcommand(self, name, args, datafh=None):
+        populatestreamencoders()
+
+    def callcommand(self, name, args, datafh=None, redirect=None):
         """Request that a command be executed.
 
         Receives the command name, a dict of arguments to pass to the command,
@@ -1020,7 +1634,8 @@
         requestid = self._nextrequestid
         self._nextrequestid += 2
 
-        request = commandrequest(requestid, name, args, datafh=datafh)
+        request = commandrequest(requestid, name, args, datafh=datafh,
+                                 redirect=redirect)
 
         if self._buffersends:
             self._pendingrequests.append(request)
@@ -1080,11 +1695,25 @@
         self._activerequests[request.requestid] = request
         request.state = 'sending'
 
+        if not self._protocolsettingssent and self._clientcontentencoders:
+            self._protocolsettingssent = True
+
+            payload = b''.join(cborutil.streamencode({
+                b'contentencodings': self._clientcontentencoders,
+            }))
+
+            yield self._outgoingstream.makeframe(
+                requestid=request.requestid,
+                typeid=FRAME_TYPE_SENDER_PROTOCOL_SETTINGS,
+                flags=FLAG_SENDER_PROTOCOL_SETTINGS_EOS,
+                payload=payload)
+
         res = createcommandframes(self._outgoingstream,
                                   request.requestid,
                                   request.name,
                                   request.args,
-                                  request.datafh)
+                                  datafh=request.datafh,
+                                  redirect=request.redirect)
 
         for frame in res:
             yield frame
@@ -1111,15 +1740,23 @@
                                  'without beginning of stream flag set'),
                 }
 
-            self._incomingstreams[frame.streamid] = stream(frame.streamid)
+            self._incomingstreams[frame.streamid] = inputstream(
+                frame.streamid)
 
+        stream = self._incomingstreams[frame.streamid]
+
+        # If the payload is encoded, ask the stream to decode it. We
+        # merely substitute the decoded result into the frame payload as
+        # if it had been transferred all along.
         if frame.streamflags & STREAM_FLAG_ENCODING_APPLIED:
-            raise error.ProgrammingError('support for decoding stream '
-                                         'payloads not yet implemneted')
+            frame.payload = stream.decode(frame.payload)
 
         if frame.streamflags & STREAM_FLAG_END_STREAM:
             del self._incomingstreams[frame.streamid]
 
+        if frame.typeid == FRAME_TYPE_STREAM_SETTINGS:
+            return self._onstreamsettingsframe(frame)
+
         if frame.requestid not in self._activerequests:
             return 'error', {
                 'message': (_('received frame for inactive request ID: %d') %
@@ -1141,6 +1778,65 @@
 
         return meth(request, frame)
 
+    def _onstreamsettingsframe(self, frame):
+        assert frame.typeid == FRAME_TYPE_STREAM_SETTINGS
+
+        more = frame.flags & FLAG_STREAM_ENCODING_SETTINGS_CONTINUATION
+        eos = frame.flags & FLAG_STREAM_ENCODING_SETTINGS_EOS
+
+        if more and eos:
+            return 'error', {
+                'message': (_('stream encoding settings frame cannot have both '
+                              'continuation and end of stream flags set')),
+            }
+
+        if not more and not eos:
+            return 'error', {
+                'message': _('stream encoding settings frame must have '
+                             'continuation or end of stream flag set'),
+            }
+
+        if frame.streamid not in self._streamsettingsdecoders:
+            decoder = cborutil.bufferingdecoder()
+            self._streamsettingsdecoders[frame.streamid] = decoder
+
+        decoder = self._streamsettingsdecoders[frame.streamid]
+
+        try:
+            decoder.decode(frame.payload)
+        except Exception as e:
+            return 'error', {
+                'message': (_('error decoding CBOR from stream encoding '
+                             'settings frame: %s') %
+                           stringutil.forcebytestr(e)),
+            }
+
+        if more:
+            return 'noop', {}
+
+        assert eos
+
+        decoded = decoder.getavailable()
+        del self._streamsettingsdecoders[frame.streamid]
+
+        if not decoded:
+            return 'error', {
+                'message': _('stream encoding settings frame did not contain '
+                             'CBOR data'),
+            }
+
+        try:
+            self._incomingstreams[frame.streamid].setdecoder(self._ui,
+                                                             decoded[0],
+                                                             decoded[1:])
+        except Exception as e:
+            return 'error', {
+                'message': (_('error setting stream decoder: %s') %
+                            stringutil.forcebytestr(e)),
+            }
+
+        return 'noop', {}
+
     def _oncommandresponseframe(self, request, frame):
         if frame.flags & FLAG_COMMAND_RESPONSE_EOS:
             request.state = 'received'
@@ -1158,7 +1854,7 @@
         del self._activerequests[request.requestid]
 
         # The payload should be a CBOR map.
-        m = cbor.loads(frame.payload)
+        m = cborutil.decodeall(frame.payload)[0]
 
         return 'error', {
             'request': request,
--- a/mercurial/wireprotoserver.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/wireprotoserver.py	Mon Oct 22 14:46:06 2018 -0400
@@ -12,9 +12,6 @@
 import threading
 
 from .i18n import _
-from .thirdparty import (
-    cbor,
-)
 from . import (
     encoding,
     error,
@@ -25,6 +22,7 @@
     wireprotov2server,
 )
 from .utils import (
+    cborutil,
     interfaceutil,
     procutil,
 )
@@ -389,7 +387,7 @@
 
     res.status = b'200 OK'
     res.headers[b'Content-Type'] = b'application/mercurial-cbor'
-    res.setbodybytes(cbor.dumps(m, canonical=True))
+    res.setbodybytes(b''.join(cborutil.streamencode(m)))
 
     return True
 
@@ -502,14 +500,14 @@
     def getargs(self, args):
         data = {}
         keys = args.split()
-        for n in xrange(len(keys)):
+        for n in pycompat.xrange(len(keys)):
             argline = self._fin.readline()[:-1]
             arg, l = argline.split()
             if arg not in keys:
                 raise error.Abort(_("unexpected parameter %r") % arg)
             if arg == '*':
                 star = {}
-                for k in xrange(int(l)):
+                for k in pycompat.xrange(int(l)):
                     argline = self._fin.readline()[:-1]
                     arg, l = argline.split()
                     val = self._fin.read(int(l))
--- a/mercurial/wireprototypes.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/wireprototypes.py	Mon Oct 22 14:46:06 2018 -0400
@@ -10,6 +10,9 @@
     hex,
 )
 from .i18n import _
+from .thirdparty import (
+    attr,
+)
 from . import (
     error,
     util,
@@ -22,8 +25,11 @@
 SSHV1 = 'ssh-v1'
 # These are advertised over the wire. Increment the counters at the end
 # to reflect BC breakages.
-SSHV2 = 'exp-ssh-v2-0001'
-HTTP_WIREPROTO_V2 = 'exp-http-v2-0001'
+SSHV2 = 'exp-ssh-v2-0003'
+HTTP_WIREPROTO_V2 = 'exp-http-v2-0003'
+
+NARROWCAP = 'exp-narrow-1'
+ELLIPSESCAP = 'exp-ellipses-1'
 
 # All available wire protocol transports.
 TRANSPORTS = {
@@ -106,27 +112,6 @@
     def __init__(self, gen=None):
         self.gen = gen
 
-class cborresponse(object):
-    """Encode the response value as CBOR."""
-    def __init__(self, v):
-        self.value = v
-
-class v2errorresponse(object):
-    """Represents a command error for version 2 transports."""
-    def __init__(self, message, args=None):
-        self.message = message
-        self.args = args
-
-class v2streamingresponse(object):
-    """A response whose data is supplied by a generator.
-
-    The generator can either consist of data structures to CBOR
-    encode or a stream of already-encoded bytes.
-    """
-    def __init__(self, gen, compressible=True):
-        self.gen = gen
-        self.compressible = compressible
-
 # list of nodes encoding / decoding
 def decodelist(l, sep=' '):
     if l:
@@ -250,11 +235,13 @@
 class commandentry(object):
     """Represents a declared wire protocol command."""
     def __init__(self, func, args='', transports=None,
-                 permission='push'):
+                 permission='push', cachekeyfn=None, extracapabilitiesfn=None):
         self.func = func
         self.args = args
         self.transports = transports or set()
         self.permission = permission
+        self.cachekeyfn = cachekeyfn
+        self.extracapabilitiesfn = extracapabilitiesfn
 
     def _merge(self, func, args):
         """Merge this instance with an incoming 2-tuple.
@@ -373,3 +360,41 @@
                           ', '.sorted(validnames))
 
     return compengines
+
+@attr.s
+class encodedresponse(object):
+    """Represents response data that is already content encoded.
+
+    Wire protocol version 2 only.
+
+    Commands typically emit Python objects that are encoded and sent over the
+    wire. If commands emit an object of this type, the encoding step is bypassed
+    and the content from this object is used instead.
+    """
+    data = attr.ib()
+
+@attr.s
+class alternatelocationresponse(object):
+    """Represents a response available at an alternate location.
+
+    Instances are sent in place of actual response objects when the server
+    is sending a "content redirect" response.
+
+    Only compatible with wire protocol version 2.
+    """
+    url = attr.ib()
+    mediatype = attr.ib()
+    size = attr.ib(default=None)
+    fullhashes = attr.ib(default=None)
+    fullhashseed = attr.ib(default=None)
+    serverdercerts = attr.ib(default=None)
+    servercadercerts = attr.ib(default=None)
+
+@attr.s
+class indefinitebytestringresponse(object):
+    """Represents an object to be encoded to an indefinite length bytestring.
+
+    Instances are initialized from an iterable of chunks, with each chunk being
+    a bytes instance.
+    """
+    chunks = attr.ib()
--- a/mercurial/wireprotov1peer.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/wireprotov1peer.py	Mon Oct 22 14:46:06 2018 -0400
@@ -64,6 +64,7 @@
         encresref.set(self._submitone(cmd, encargsorres))
         return next(batchable)
     setattr(plain, 'batchable', f)
+    setattr(plain, '__name__', f.__name__)
     return plain
 
 class future(object):
@@ -497,7 +498,7 @@
     def between(self, pairs):
         batch = 8 # avoid giant requests
         r = []
-        for i in xrange(0, len(pairs), batch):
+        for i in pycompat.xrange(0, len(pairs), batch):
             n = " ".join([wireprototypes.encodelist(p, '-')
                           for p in pairs[i:i + batch]])
             d = self._call("between", pairs=n)
--- a/mercurial/wireprotov1server.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/wireprotov1server.py	Mon Oct 22 14:46:06 2018 -0400
@@ -286,6 +286,11 @@
         caps.append('bundle2=' + urlreq.quote(capsblob))
     caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
 
+    if repo.ui.configbool('experimental', 'narrow'):
+        caps.append(wireprototypes.NARROWCAP)
+        if repo.ui.configbool('experimental', 'narrowservebrokenellipses'):
+            caps.append(wireprototypes.ELLIPSESCAP)
+
     return proto.addcapabilities(repo, caps)
 
 # If you are writing an extension and consider wrapping this function. Wrap
--- a/mercurial/wireprotov2peer.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/wireprotov2peer.py	Mon Oct 22 14:46:06 2018 -0400
@@ -7,15 +7,21 @@
 
 from __future__ import absolute_import
 
+import threading
+
 from .i18n import _
-from .thirdparty import (
-    cbor,
-)
 from . import (
     encoding,
     error,
+    pycompat,
+    sslutil,
+    url as urlmod,
     util,
     wireprotoframing,
+    wireprototypes,
+)
+from .utils import (
+    cborutil,
 )
 
 def formatrichmessage(atoms):
@@ -27,30 +33,209 @@
         msg = _(atom[b'msg'])
 
         if b'args' in atom:
-            msg = msg % atom[b'args']
+            msg = msg % tuple(atom[b'args'])
 
         chunks.append(msg)
 
     return b''.join(chunks)
 
+SUPPORTED_REDIRECT_PROTOCOLS = {
+    b'http',
+    b'https',
+}
+
+SUPPORTED_CONTENT_HASHES = {
+    b'sha1',
+    b'sha256',
+}
+
+def redirecttargetsupported(ui, target):
+    """Determine whether a redirect target entry is supported.
+
+    ``target`` should come from the capabilities data structure emitted by
+    the server.
+    """
+    if target.get(b'protocol') not in SUPPORTED_REDIRECT_PROTOCOLS:
+        ui.note(_('(remote redirect target %s uses unsupported protocol: %s)\n')
+                % (target[b'name'], target.get(b'protocol', b'')))
+        return False
+
+    if target.get(b'snirequired') and not sslutil.hassni:
+        ui.note(_('(redirect target %s requires SNI, which is unsupported)\n') %
+                target[b'name'])
+        return False
+
+    if b'tlsversions' in target:
+        tlsversions = set(target[b'tlsversions'])
+        supported = set()
+
+        for v in sslutil.supportedprotocols:
+            assert v.startswith(b'tls')
+            supported.add(v[3:])
+
+        if not tlsversions & supported:
+            ui.note(_('(remote redirect target %s requires unsupported TLS '
+                      'versions: %s)\n') % (
+                target[b'name'], b', '.join(sorted(tlsversions))))
+            return False
+
+    ui.note(_('(remote redirect target %s is compatible)\n') % target[b'name'])
+
+    return True
+
+def supportedredirects(ui, apidescriptor):
+    """Resolve the "redirect" command request key given an API descriptor.
+
+    Given an API descriptor returned by the server, returns a data structure
+    that can be used in hte "redirect" field of command requests to advertise
+    support for compatible redirect targets.
+
+    Returns None if no redirect targets are remotely advertised or if none are
+    supported.
+    """
+    if not apidescriptor or b'redirect' not in apidescriptor:
+        return None
+
+    targets = [t[b'name'] for t in apidescriptor[b'redirect'][b'targets']
+               if redirecttargetsupported(ui, t)]
+
+    hashes = [h for h in apidescriptor[b'redirect'][b'hashes']
+              if h in SUPPORTED_CONTENT_HASHES]
+
+    return {
+        b'targets': targets,
+        b'hashes': hashes,
+    }
+
 class commandresponse(object):
-    """Represents the response to a command request."""
+    """Represents the response to a command request.
+
+    Instances track the state of the command and hold its results.
 
-    def __init__(self, requestid, command):
+    An external entity is required to update the state of the object when
+    events occur.
+    """
+
+    def __init__(self, requestid, command, fromredirect=False):
         self.requestid = requestid
         self.command = command
+        self.fromredirect = fromredirect
 
-        self.b = util.bytesio()
+        # Whether all remote input related to this command has been
+        # received.
+        self._inputcomplete = False
+
+        # We have a lock that is acquired when important object state is
+        # mutated. This is to prevent race conditions between 1 thread
+        # sending us new data and another consuming it.
+        self._lock = threading.RLock()
+
+        # An event is set when state of the object changes. This event
+        # is waited on by the generator emitting objects.
+        self._serviceable = threading.Event()
+
+        self._pendingevents = []
+        self._pendingerror = None
+        self._decoder = cborutil.bufferingdecoder()
+        self._seeninitial = False
+        self._redirect = None
+
+    def _oninputcomplete(self):
+        with self._lock:
+            self._inputcomplete = True
+            self._serviceable.set()
+
+    def _onresponsedata(self, data):
+        available, readcount, wanted = self._decoder.decode(data)
+
+        if not available:
+            return
+
+        with self._lock:
+            for o in self._decoder.getavailable():
+                if not self._seeninitial and not self.fromredirect:
+                    self._handleinitial(o)
+                    continue
+
+                # We should never see an object after a content redirect,
+                # as the spec says the main status object containing the
+                # content redirect is the only object in the stream. Fail
+                # if we see a misbehaving server.
+                if self._redirect:
+                    raise error.Abort(_('received unexpected response data '
+                                        'after content redirect; the remote is '
+                                        'buggy'))
+
+                self._pendingevents.append(o)
+
+            self._serviceable.set()
+
+    def _onerror(self, e):
+        self._pendingerror = e
+
+        with self._lock:
+            self._serviceable.set()
 
-    def cborobjects(self):
-        """Obtain decoded CBOR objects from this response."""
-        size = self.b.tell()
-        self.b.seek(0)
+    def _handleinitial(self, o):
+        self._seeninitial = True
+        if o[b'status'] == b'ok':
+            return
+
+        elif o[b'status'] == b'redirect':
+            l = o[b'location']
+            self._redirect = wireprototypes.alternatelocationresponse(
+                url=l[b'url'],
+                mediatype=l[b'mediatype'],
+                size=l.get(b'size'),
+                fullhashes=l.get(b'fullhashes'),
+                fullhashseed=l.get(b'fullhashseed'),
+                serverdercerts=l.get(b'serverdercerts'),
+                servercadercerts=l.get(b'servercadercerts'))
+            return
+
+        atoms = [{'msg': o[b'error'][b'message']}]
+        if b'args' in o[b'error']:
+            atoms[0]['args'] = o[b'error'][b'args']
+
+        raise error.RepoError(formatrichmessage(atoms))
+
+    def objects(self):
+        """Obtained decoded objects from this response.
+
+        This is a generator of data structures that were decoded from the
+        command response.
 
-        decoder = cbor.CBORDecoder(self.b)
+        Obtaining the next member of the generator may block due to waiting
+        on external data to become available.
+
+        If the server encountered an error in the middle of serving the data
+        or if another error occurred, an exception may be raised when
+        advancing the generator.
+        """
+        while True:
+            # TODO this can infinite loop if self._inputcomplete is never
+            # set. We likely want to tie the lifetime of this object/state
+            # to that of the background thread receiving frames and updating
+            # our state.
+            self._serviceable.wait(1.0)
 
-        while self.b.tell() < size:
-            yield decoder.decode()
+            if self._pendingerror:
+                raise self._pendingerror
+
+            with self._lock:
+                self._serviceable.clear()
+
+                # Make copies because objects could be mutated during
+                # iteration.
+                stop = self._inputcomplete
+                pending = list(self._pendingevents)
+                self._pendingevents[:] = []
+
+            for o in pending:
+                yield o
+
+            if stop:
+                break
 
 class clienthandler(object):
     """Object to handle higher-level client activities.
@@ -63,19 +248,25 @@
     with the higher-level peer API.
     """
 
-    def __init__(self, ui, clientreactor):
+    def __init__(self, ui, clientreactor, opener=None,
+                 requestbuilder=util.urlreq.request):
         self._ui = ui
         self._reactor = clientreactor
         self._requests = {}
         self._futures = {}
         self._responses = {}
+        self._redirects = []
+        self._frameseof = False
+        self._opener = opener or urlmod.opener(ui)
+        self._requestbuilder = requestbuilder
 
-    def callcommand(self, command, args, f):
+    def callcommand(self, command, args, f, redirect=None):
         """Register a request to call a command.
 
         Returns an iterable of frames that should be sent over the wire.
         """
-        request, action, meta = self._reactor.callcommand(command, args)
+        request, action, meta = self._reactor.callcommand(command, args,
+                                                          redirect=redirect)
 
         if action != 'noop':
             raise error.ProgrammingError('%s not yet supported' % action)
@@ -83,6 +274,8 @@
         rid = request.requestid
         self._requests[rid] = request
         self._futures[rid] = f
+        # TODO we need some kind of lifetime on response instances otherwise
+        # objects() may deadlock.
         self._responses[rid] = commandresponse(rid, command)
 
         return iter(())
@@ -99,18 +292,28 @@
 
         return meta['framegen']
 
-    def readframe(self, fh):
-        """Attempt to read and process a frame.
+    def readdata(self, framefh):
+        """Attempt to read data and do work.
 
-        Returns None if no frame was read. Presumably this means EOF.
+        Returns None if no data was read. Presumably this means we're
+        done with all read I/O.
         """
-        frame = wireprotoframing.readframe(fh)
-        if frame is None:
-            # TODO tell reactor?
-            return
+        if not self._frameseof:
+            frame = wireprotoframing.readframe(framefh)
+            if frame is None:
+                # TODO tell reactor?
+                self._frameseof = True
+            else:
+                self._ui.note(_('received %r\n') % frame)
+                self._processframe(frame)
 
-        self._ui.note(_('received %r\n') % frame)
-        self._processframe(frame)
+        # Also try to read the first redirect.
+        if self._redirects:
+            if not self._processredirect(*self._redirects[0]):
+                self._redirects.pop(0)
+
+        if self._frameseof and not self._redirects:
+            return None
 
         return True
 
@@ -122,11 +325,24 @@
         if action == 'error':
             e = error.RepoError(meta['message'])
 
+            if frame.requestid in self._responses:
+                self._responses[frame.requestid]._oninputcomplete()
+
             if frame.requestid in self._futures:
                 self._futures[frame.requestid].set_exception(e)
+                del self._futures[frame.requestid]
             else:
                 raise e
 
+            return
+        elif action == 'noop':
+            return
+        elif action == 'responsedata':
+            # Handled below.
+            pass
+        else:
+            raise error.ProgrammingError('action not handled: %s' % action)
+
         if frame.requestid not in self._requests:
             raise error.ProgrammingError(
                 'received frame for unknown request; this is either a bug in '
@@ -136,35 +352,142 @@
         response = self._responses[frame.requestid]
 
         if action == 'responsedata':
-            response.b.write(meta['data'])
-
-            if meta['eos']:
-                # If the command has a decoder, resolve the future to the
-                # decoded value. Otherwise resolve to the rich response object.
-                decoder = COMMAND_DECODERS.get(response.command)
-
-                # TODO consider always resolving the overall status map.
-                if decoder:
-                    objs = response.cborobjects()
-
-                    overall = next(objs)
-
-                    if overall['status'] == 'ok':
-                        self._futures[frame.requestid].set_result(decoder(objs))
-                    else:
-                        e = error.RepoError(
-                            formatrichmessage(overall['error']['message']))
-                        self._futures[frame.requestid].set_exception(e)
+            # Any failures processing this frame should bubble up to the
+            # future tracking the request.
+            try:
+                self._processresponsedata(frame, meta, response)
+            except BaseException as e:
+                # If an exception occurs before the future is resolved,
+                # fail the future. Otherwise, we stuff the exception on
+                # the response object so it can be raised during objects()
+                # iteration. If nothing is consuming objects(), we could
+                # silently swallow this exception. That's a risk we'll have to
+                # take.
+                if frame.requestid in self._futures:
+                    self._futures[frame.requestid].set_exception(e)
+                    del self._futures[frame.requestid]
+                    response._oninputcomplete()
                 else:
-                    self._futures[frame.requestid].set_result(response)
-
-                del self._requests[frame.requestid]
-                del self._futures[frame.requestid]
-
+                    response._onerror(e)
         else:
             raise error.ProgrammingError(
                 'unhandled action from clientreactor: %s' % action)
 
+    def _processresponsedata(self, frame, meta, response):
+        # This can raise. The caller can handle it.
+        response._onresponsedata(meta['data'])
+
+        # If we got a content redirect response, we want to fetch it and
+        # expose the data as if we received it inline. But we also want to
+        # keep our internal request accounting in order. Our strategy is to
+        # basically put meaningful response handling on pause until EOS occurs
+        # and the stream accounting is in a good state. At that point, we follow
+        # the redirect and replace the response object with its data.
+
+        redirect = response._redirect
+        handlefuture = False if redirect else True
+
+        if meta['eos']:
+            response._oninputcomplete()
+            del self._requests[frame.requestid]
+
+            if redirect:
+                self._followredirect(frame.requestid, redirect)
+                return
+
+        if not handlefuture:
+            return
+
+        # If the command has a decoder, we wait until all input has been
+        # received before resolving the future. Otherwise we resolve the
+        # future immediately.
+        if frame.requestid not in self._futures:
+            return
+
+        if response.command not in COMMAND_DECODERS:
+            self._futures[frame.requestid].set_result(response.objects())
+            del self._futures[frame.requestid]
+        elif response._inputcomplete:
+            decoded = COMMAND_DECODERS[response.command](response.objects())
+            self._futures[frame.requestid].set_result(decoded)
+            del self._futures[frame.requestid]
+
+    def _followredirect(self, requestid, redirect):
+        """Called to initiate redirect following for a request."""
+        self._ui.note(_('(following redirect to %s)\n') % redirect.url)
+
+        # TODO handle framed responses.
+        if redirect.mediatype != b'application/mercurial-cbor':
+            raise error.Abort(_('cannot handle redirects for the %s media type')
+                              % redirect.mediatype)
+
+        if redirect.fullhashes:
+            self._ui.warn(_('(support for validating hashes on content '
+                            'redirects not supported)\n'))
+
+        if redirect.serverdercerts or redirect.servercadercerts:
+            self._ui.warn(_('(support for pinning server certificates on '
+                            'content redirects not supported)\n'))
+
+        headers = {
+            r'Accept': redirect.mediatype,
+        }
+
+        req = self._requestbuilder(pycompat.strurl(redirect.url), None, headers)
+
+        try:
+            res = self._opener.open(req)
+        except util.urlerr.httperror as e:
+            if e.code == 401:
+                raise error.Abort(_('authorization failed'))
+            raise
+        except util.httplib.HTTPException as e:
+            self._ui.debug('http error requesting %s\n' % req.get_full_url())
+            self._ui.traceback()
+            raise IOError(None, e)
+
+        urlmod.wrapresponse(res)
+
+        # The existing response object is associated with frame data. Rather
+        # than try to normalize its state, just create a new object.
+        oldresponse = self._responses[requestid]
+        self._responses[requestid] = commandresponse(requestid,
+                                                     oldresponse.command,
+                                                     fromredirect=True)
+
+        self._redirects.append((requestid, res))
+
+    def _processredirect(self, rid, res):
+        """Called to continue processing a response from a redirect."""
+        response = self._responses[rid]
+
+        try:
+            data = res.read(32768)
+            response._onresponsedata(data)
+
+            # We're at end of stream.
+            if not data:
+                response._oninputcomplete()
+
+            if rid not in self._futures:
+                return
+
+            if response.command not in COMMAND_DECODERS:
+                self._futures[rid].set_result(response.objects())
+                del self._futures[rid]
+            elif response._inputcomplete:
+                decoded = COMMAND_DECODERS[response.command](response.objects())
+                self._futures[rid].set_result(decoded)
+                del self._futures[rid]
+
+            return bool(data)
+
+        except BaseException as e:
+            self._futures[rid].set_exception(e)
+            del self._futures[rid]
+            response._oninputcomplete()
+            return False
+
 def decodebranchmap(objs):
     # Response should be a single CBOR map of branch name to array of nodes.
     bm = next(objs)
--- a/mercurial/wireprotov2server.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/mercurial/wireprotov2server.py	Mon Oct 22 14:46:06 2018 -0400
@@ -6,15 +6,21 @@
 
 from __future__ import absolute_import
 
+import collections
 import contextlib
+import hashlib
 
 from .i18n import _
-from .thirdparty import (
-    cbor,
+from .node import (
+    hex,
+    nullid,
 )
 from . import (
+    discovery,
     encoding,
     error,
+    match as matchmod,
+    narrowspec,
     pycompat,
     streamclone,
     util,
@@ -22,15 +28,22 @@
     wireprototypes,
 )
 from .utils import (
+    cborutil,
     interfaceutil,
+    stringutil,
 )
 
-FRAMINGTYPE = b'application/mercurial-exp-framing-0005'
+FRAMINGTYPE = b'application/mercurial-exp-framing-0006'
 
 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
 
 COMMANDS = wireprototypes.commanddict()
 
+# Value inserted into cache key computation function. Change the value to
+# force new cache keys for every command request. This should be done when
+# there is a change to how caching works, etc.
+GLOBAL_CACHE_VERSION = 1
+
 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
     from .hgweb import common as hgwebcommon
 
@@ -147,7 +160,7 @@
 
     # We assume we have a unified framing protocol request body.
 
-    reactor = wireprotoframing.serverreactor()
+    reactor = wireprotoframing.serverreactor(ui)
     states = []
 
     while True:
@@ -182,10 +195,10 @@
     # TODO Some HTTP clients are full duplex and can receive data before
     # the entire request is transmitted. Figure out a way to indicate support
     # for that so we can opt into full duplex mode.
-    reactor = wireprotoframing.serverreactor(deferoutput=True)
+    reactor = wireprotoframing.serverreactor(ui, deferoutput=True)
     seencommand = False
 
-    outstream = reactor.makeoutputstream()
+    outstream = None
 
     while True:
         frame = wireprotoframing.readframe(req.bodyfh)
@@ -198,6 +211,11 @@
             # Need more data before we can do anything.
             continue
         elif action == 'runcommand':
+            # Defer creating output stream because we need to wait for
+            # protocol settings frames so proper encoding can be applied.
+            if not outstream:
+                outstream = reactor.makeoutputstream()
+
             sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
                                            reqcommand, reactor, outstream,
                                            meta, issubsequent=seencommand)
@@ -297,28 +315,24 @@
             res.setbodybytes(_('command in frame must match command in URL'))
             return True
 
-    rsp = dispatch(repo, proto, command['command'])
-
     res.status = b'200 OK'
     res.headers[b'Content-Type'] = FRAMINGTYPE
 
-    if isinstance(rsp, wireprototypes.cborresponse):
-        encoded = cbor.dumps(rsp.value, canonical=True)
-        action, meta = reactor.oncommandresponseready(outstream,
-                                                      command['requestid'],
-                                                      encoded)
-    elif isinstance(rsp, wireprototypes.v2streamingresponse):
-        action, meta = reactor.oncommandresponsereadygen(outstream,
-                                                         command['requestid'],
-                                                         rsp.gen)
-    elif isinstance(rsp, wireprototypes.v2errorresponse):
-        action, meta = reactor.oncommanderror(outstream,
-                                              command['requestid'],
-                                              rsp.message,
-                                              rsp.args)
-    else:
+    try:
+        objs = dispatch(repo, proto, command['command'], command['redirect'])
+
+        action, meta = reactor.oncommandresponsereadyobjects(
+            outstream, command['requestid'], objs)
+
+    except error.WireprotoCommandError as e:
+        action, meta = reactor.oncommanderror(
+            outstream, command['requestid'], e.message, e.messageargs)
+
+    except Exception as e:
         action, meta = reactor.onservererror(
-            _('unhandled response type from wire proto command'))
+            outstream, command['requestid'],
+            _('exception when invoking command: %s') %
+            stringutil.forcebytestr(e))
 
     if action == 'sendframes':
         res.setbodygen(meta['framegen'])
@@ -332,13 +346,74 @@
 def getdispatchrepo(repo, proto, command):
     return repo.filtered('served')
 
-def dispatch(repo, proto, command):
+def dispatch(repo, proto, command, redirect):
+    """Run a wire protocol command.
+
+    Returns an iterable of objects that will be sent to the client.
+    """
     repo = getdispatchrepo(repo, proto, command)
 
-    func, spec = COMMANDS[command]
+    entry = COMMANDS[command]
+    func = entry.func
+    spec = entry.args
+
     args = proto.getargs(spec)
 
-    return func(repo, proto, **args)
+    # There is some duplicate boilerplate code here for calling the command and
+    # emitting objects. It is either that or a lot of indented code that looks
+    # like a pyramid (since there are a lot of code paths that result in not
+    # using the cacher).
+    callcommand = lambda: func(repo, proto, **pycompat.strkwargs(args))
+
+    # Request is not cacheable. Don't bother instantiating a cacher.
+    if not entry.cachekeyfn:
+        for o in callcommand():
+            yield o
+        return
+
+    if redirect:
+        redirecttargets = redirect[b'targets']
+        redirecthashes = redirect[b'hashes']
+    else:
+        redirecttargets = []
+        redirecthashes = []
+
+    cacher = makeresponsecacher(repo, proto, command, args,
+                                cborutil.streamencode,
+                                redirecttargets=redirecttargets,
+                                redirecthashes=redirecthashes)
+
+    # But we have no cacher. Do default handling.
+    if not cacher:
+        for o in callcommand():
+            yield o
+        return
+
+    with cacher:
+        cachekey = entry.cachekeyfn(repo, proto, cacher, **args)
+
+        # No cache key or the cacher doesn't like it. Do default handling.
+        if cachekey is None or not cacher.setcachekey(cachekey):
+            for o in callcommand():
+                yield o
+            return
+
+        # Serve it from the cache, if possible.
+        cached = cacher.lookup()
+
+        if cached:
+            for o in cached['objs']:
+                yield o
+            return
+
+        # Else call the command and feed its output into the cacher, allowing
+        # the cacher to buffer/mutate objects as it desires.
+        for o in callcommand():
+            for o in cacher.onobject(o):
+                yield o
+
+        for o in cacher.onfinished():
+            yield o
 
 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
 class httpv2protocolhandler(object):
@@ -352,13 +427,39 @@
         return HTTP_WIREPROTO_V2
 
     def getargs(self, args):
+        # First look for args that were passed but aren't registered on this
+        # command.
+        extra = set(self._args) - set(args)
+        if extra:
+            raise error.WireprotoCommandError(
+                'unsupported argument to command: %s' %
+                ', '.join(sorted(extra)))
+
+        # And look for required arguments that are missing.
+        missing = {a for a in args if args[a]['required']} - set(self._args)
+
+        if missing:
+            raise error.WireprotoCommandError(
+                'missing required arguments: %s' % ', '.join(sorted(missing)))
+
+        # Now derive the arguments to pass to the command, taking into
+        # account the arguments specified by the client.
         data = {}
-        for k, typ in args.items():
-            if k == '*':
-                raise NotImplementedError('do not support * args')
-            elif k in self._args:
-                # TODO consider validating value types.
-                data[k] = self._args[k]
+        for k, meta in sorted(args.items()):
+            # This argument wasn't passed by the client.
+            if k not in self._args:
+                data[k] = meta['default']()
+                continue
+
+            v = self._args[k]
+
+            # Sets may be expressed as lists. Silently normalize.
+            if meta['type'] == 'set' and isinstance(v, list):
+                v = set(v)
+
+            # TODO consider more/stronger type validation.
+
+            data[k] = v
 
         return data
 
@@ -393,42 +494,157 @@
     These capabilities are distinct from the capabilities for version 1
     transports.
     """
-    compression = []
-    for engine in wireprototypes.supportedcompengines(repo.ui, util.SERVERROLE):
-        compression.append({
-            b'name': engine.wireprotosupport().name,
-        })
-
     caps = {
         'commands': {},
-        'compression': compression,
         'framingmediatypes': [FRAMINGTYPE],
+        'pathfilterprefixes': set(narrowspec.VALID_PREFIXES),
     }
 
     for command, entry in COMMANDS.items():
+        args = {}
+
+        for arg, meta in entry.args.items():
+            args[arg] = {
+                # TODO should this be a normalized type using CBOR's
+                # terminology?
+                b'type': meta['type'],
+                b'required': meta['required'],
+            }
+
+            if not meta['required']:
+                args[arg][b'default'] = meta['default']()
+
+            if meta['validvalues']:
+                args[arg][b'validvalues'] = meta['validvalues']
+
+        # TODO this type of check should be defined in a per-command callback.
+        if (command == b'rawstorefiledata'
+            and not streamclone.allowservergeneration(repo)):
+            continue
+
         caps['commands'][command] = {
-            'args': entry.args,
+            'args': args,
             'permissions': [entry.permission],
         }
 
-    if streamclone.allowservergeneration(repo):
-        caps['rawrepoformats'] = sorted(repo.requirements &
-                                        repo.supportedformats)
+        if entry.extracapabilitiesfn:
+            extracaps = entry.extracapabilitiesfn(repo, proto)
+            caps['commands'][command].update(extracaps)
+
+    caps['rawrepoformats'] = sorted(repo.requirements &
+                                    repo.supportedformats)
+
+    targets = getadvertisedredirecttargets(repo, proto)
+    if targets:
+        caps[b'redirect'] = {
+            b'targets': [],
+            b'hashes': [b'sha256', b'sha1'],
+        }
+
+        for target in targets:
+            entry = {
+                b'name': target['name'],
+                b'protocol': target['protocol'],
+                b'uris': target['uris'],
+            }
+
+            for key in ('snirequired', 'tlsversions'):
+                if key in target:
+                    entry[key] = target[key]
+
+            caps[b'redirect'][b'targets'].append(entry)
 
     return proto.addcapabilities(repo, caps)
 
-def wireprotocommand(name, args=None, permission='push'):
+def getadvertisedredirecttargets(repo, proto):
+    """Obtain a list of content redirect targets.
+
+    Returns a list containing potential redirect targets that will be
+    advertised in capabilities data. Each dict MUST have the following
+    keys:
+
+    name
+       The name of this redirect target. This is the identifier clients use
+       to refer to a target. It is transferred as part of every command
+       request.
+
+    protocol
+       Network protocol used by this target. Typically this is the string
+       in front of the ``://`` in a URL. e.g. ``https``.
+
+    uris
+       List of representative URIs for this target. Clients can use the
+       URIs to test parsing for compatibility or for ordering preference
+       for which target to use.
+
+    The following optional keys are recognized:
+
+    snirequired
+       Bool indicating if Server Name Indication (SNI) is required to
+       connect to this target.
+
+    tlsversions
+       List of bytes indicating which TLS versions are supported by this
+       target.
+
+    By default, clients reflect the target order advertised by servers
+    and servers will use the first client-advertised target when picking
+    a redirect target. So targets should be advertised in the order the
+    server prefers they be used.
+    """
+    return []
+
+def wireprotocommand(name, args=None, permission='push', cachekeyfn=None,
+                     extracapabilitiesfn=None):
     """Decorator to declare a wire protocol command.
 
     ``name`` is the name of the wire protocol command being provided.
 
-    ``args`` is a dict of argument names to example values.
+    ``args`` is a dict defining arguments accepted by the command. Keys are
+    the argument name. Values are dicts with the following keys:
+
+       ``type``
+          The argument data type. Must be one of the following string
+          literals: ``bytes``, ``int``, ``list``, ``dict``, ``set``,
+          or ``bool``.
+
+       ``default``
+          A callable returning the default value for this argument. If not
+          specified, ``None`` will be the default value.
+
+       ``example``
+          An example value for this argument.
+
+       ``validvalues``
+          Set of recognized values for this argument.
 
     ``permission`` defines the permission type needed to run this command.
     Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
     respectively. Default is to assume command requires ``push`` permissions
     because otherwise commands not declaring their permissions could modify
     a repository that is supposed to be read-only.
+
+    ``cachekeyfn`` defines an optional callable that can derive the
+    cache key for this request.
+
+    ``extracapabilitiesfn`` defines an optional callable that defines extra
+    command capabilities/parameters that are advertised next to the command
+    in the capabilities data structure describing the server. The callable
+    receives as arguments the repository and protocol objects. It returns
+    a dict of extra fields to add to the command descriptor.
+
+    Wire protocol commands are generators of objects to be serialized and
+    sent to the client.
+
+    If a command raises an uncaught exception, this will be translated into
+    a command error.
+
+    All commands can opt in to being cacheable by defining a function
+    (``cachekeyfn``) that is called to derive a cache key. This function
+    receives the same arguments as the command itself plus a ``cacher``
+    argument containing the active cacher for the request and returns a bytes
+    containing the key in a cache the response to this command may be cached
+    under.
     """
     transports = {k for k, v in wireprototypes.TRANSPORTS.items()
                   if v['version'] == 2}
@@ -445,90 +661,800 @@
         raise error.ProgrammingError('arguments for version 2 commands '
                                      'must be declared as dicts')
 
+    for arg, meta in args.items():
+        if arg == '*':
+            raise error.ProgrammingError('* argument name not allowed on '
+                                         'version 2 commands')
+
+        if not isinstance(meta, dict):
+            raise error.ProgrammingError('arguments for version 2 commands '
+                                         'must declare metadata as a dict')
+
+        if 'type' not in meta:
+            raise error.ProgrammingError('%s argument for command %s does not '
+                                         'declare type field' % (arg, name))
+
+        if meta['type'] not in ('bytes', 'int', 'list', 'dict', 'set', 'bool'):
+            raise error.ProgrammingError('%s argument for command %s has '
+                                         'illegal type: %s' % (arg, name,
+                                                               meta['type']))
+
+        if 'example' not in meta:
+            raise error.ProgrammingError('%s argument for command %s does not '
+                                         'declare example field' % (arg, name))
+
+        meta['required'] = 'default' not in meta
+
+        meta.setdefault('default', lambda: None)
+        meta.setdefault('validvalues', None)
+
     def register(func):
         if name in COMMANDS:
             raise error.ProgrammingError('%s command already registered '
                                          'for version 2' % name)
 
         COMMANDS[name] = wireprototypes.commandentry(
-            func, args=args, transports=transports, permission=permission)
+            func, args=args, transports=transports, permission=permission,
+            cachekeyfn=cachekeyfn, extracapabilitiesfn=extracapabilitiesfn)
 
         return func
 
     return register
 
+def makecommandcachekeyfn(command, localversion=None, allargs=False):
+    """Construct a cache key derivation function with common features.
+
+    By default, the cache key is a hash of:
+
+    * The command name.
+    * A global cache version number.
+    * A local cache version number (passed via ``localversion``).
+    * All the arguments passed to the command.
+    * The media type used.
+    * Wire protocol version string.
+    * The repository path.
+    """
+    if not allargs:
+        raise error.ProgrammingError('only allargs=True is currently supported')
+
+    if localversion is None:
+        raise error.ProgrammingError('must set localversion argument value')
+
+    def cachekeyfn(repo, proto, cacher, **args):
+        spec = COMMANDS[command]
+
+        # Commands that mutate the repo can not be cached.
+        if spec.permission == 'push':
+            return None
+
+        # TODO config option to disable caching.
+
+        # Our key derivation strategy is to construct a data structure
+        # holding everything that could influence cacheability and to hash
+        # the CBOR representation of that. Using CBOR seems like it might
+        # be overkill. However, simpler hashing mechanisms are prone to
+        # duplicate input issues. e.g. if you just concatenate two values,
+        # "foo"+"bar" is identical to "fo"+"obar". Using CBOR provides
+        # "padding" between values and prevents these problems.
+
+        # Seed the hash with various data.
+        state = {
+            # To invalidate all cache keys.
+            b'globalversion': GLOBAL_CACHE_VERSION,
+            # More granular cache key invalidation.
+            b'localversion': localversion,
+            # Cache keys are segmented by command.
+            b'command': pycompat.sysbytes(command),
+            # Throw in the media type and API version strings so changes
+            # to exchange semantics invalid cache.
+            b'mediatype': FRAMINGTYPE,
+            b'version': HTTP_WIREPROTO_V2,
+            # So same requests for different repos don't share cache keys.
+            b'repo': repo.root,
+        }
+
+        # The arguments passed to us will have already been normalized.
+        # Default values will be set, etc. This is important because it
+        # means that it doesn't matter if clients send an explicit argument
+        # or rely on the default value: it will all normalize to the same
+        # set of arguments on the server and therefore the same cache key.
+        #
+        # Arguments by their very nature must support being encoded to CBOR.
+        # And the CBOR encoder is deterministic. So we hash the arguments
+        # by feeding the CBOR of their representation into the hasher.
+        if allargs:
+            state[b'args'] = pycompat.byteskwargs(args)
+
+        cacher.adjustcachekeystate(state)
+
+        hasher = hashlib.sha1()
+        for chunk in cborutil.streamencode(state):
+            hasher.update(chunk)
+
+        return pycompat.sysbytes(hasher.hexdigest())
+
+    return cachekeyfn
+
+def makeresponsecacher(repo, proto, command, args, objencoderfn,
+                       redirecttargets, redirecthashes):
+    """Construct a cacher for a cacheable command.
+
+    Returns an ``iwireprotocolcommandcacher`` instance.
+
+    Extensions can monkeypatch this function to provide custom caching
+    backends.
+    """
+    return None
+
+def resolvenodes(repo, revisions):
+    """Resolve nodes from a revisions specifier data structure."""
+    cl = repo.changelog
+    clhasnode = cl.hasnode
+
+    seen = set()
+    nodes = []
+
+    if not isinstance(revisions, list):
+        raise error.WireprotoCommandError('revisions must be defined as an '
+                                          'array')
+
+    for spec in revisions:
+        if b'type' not in spec:
+            raise error.WireprotoCommandError(
+                'type key not present in revision specifier')
+
+        typ = spec[b'type']
+
+        if typ == b'changesetexplicit':
+            if b'nodes' not in spec:
+                raise error.WireprotoCommandError(
+                    'nodes key not present in changesetexplicit revision '
+                    'specifier')
+
+            for node in spec[b'nodes']:
+                if node not in seen:
+                    nodes.append(node)
+                    seen.add(node)
+
+        elif typ == b'changesetexplicitdepth':
+            for key in (b'nodes', b'depth'):
+                if key not in spec:
+                    raise error.WireprotoCommandError(
+                        '%s key not present in changesetexplicitdepth revision '
+                        'specifier', (key,))
+
+            for rev in repo.revs(b'ancestors(%ln, %d)', spec[b'nodes'],
+                                 spec[b'depth'] - 1):
+                node = cl.node(rev)
+
+                if node not in seen:
+                    nodes.append(node)
+                    seen.add(node)
+
+        elif typ == b'changesetdagrange':
+            for key in (b'roots', b'heads'):
+                if key not in spec:
+                    raise error.WireprotoCommandError(
+                        '%s key not present in changesetdagrange revision '
+                        'specifier', (key,))
+
+            if not spec[b'heads']:
+                raise error.WireprotoCommandError(
+                    'heads key in changesetdagrange cannot be empty')
+
+            if spec[b'roots']:
+                common = [n for n in spec[b'roots'] if clhasnode(n)]
+            else:
+                common = [nullid]
+
+            for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
+                if n not in seen:
+                    nodes.append(n)
+                    seen.add(n)
+
+        else:
+            raise error.WireprotoCommandError(
+                'unknown revision specifier type: %s', (typ,))
+
+    return nodes
+
 @wireprotocommand('branchmap', permission='pull')
 def branchmapv2(repo, proto):
-    branchmap = {encoding.fromlocal(k): v
-                 for k, v in repo.branchmap().iteritems()}
-
-    return wireprototypes.cborresponse(branchmap)
+    yield {encoding.fromlocal(k): v
+           for k, v in repo.branchmap().iteritems()}
 
 @wireprotocommand('capabilities', permission='pull')
 def capabilitiesv2(repo, proto):
-    caps = _capabilitiesv2(repo, proto)
+    yield _capabilitiesv2(repo, proto)
+
+@wireprotocommand(
+    'changesetdata',
+    args={
+        'revisions': {
+            'type': 'list',
+            'example': [{
+                b'type': b'changesetexplicit',
+                b'nodes': [b'abcdef...'],
+            }],
+        },
+        'fields': {
+            'type': 'set',
+            'default': set,
+            'example': {b'parents', b'revision'},
+            'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'},
+        },
+    },
+    permission='pull')
+def changesetdata(repo, proto, revisions, fields):
+    # TODO look for unknown fields and abort when they can't be serviced.
+    # This could probably be validated by dispatcher using validvalues.
+
+    cl = repo.changelog
+    outgoing = resolvenodes(repo, revisions)
+    publishing = repo.publishing()
+
+    if outgoing:
+        repo.hook('preoutgoing', throw=True, source='serve')
+
+    yield {
+        b'totalitems': len(outgoing),
+    }
+
+    # The phases of nodes already transferred to the client may have changed
+    # since the client last requested data. We send phase-only records
+    # for these revisions, if requested.
+    # TODO actually do this. We'll probably want to emit phase heads
+    # in the ancestry set of the outgoing revisions. This will ensure
+    # that phase updates within that set are seen.
+    if b'phase' in fields:
+        pass
+
+    nodebookmarks = {}
+    for mark, node in repo._bookmarks.items():
+        nodebookmarks.setdefault(node, set()).add(mark)
+
+    # It is already topologically sorted by revision number.
+    for node in outgoing:
+        d = {
+            b'node': node,
+        }
+
+        if b'parents' in fields:
+            d[b'parents'] = cl.parents(node)
+
+        if b'phase' in fields:
+            if publishing:
+                d[b'phase'] = b'public'
+            else:
+                ctx = repo[node]
+                d[b'phase'] = ctx.phasestr()
+
+        if b'bookmarks' in fields and node in nodebookmarks:
+            d[b'bookmarks'] = sorted(nodebookmarks[node])
+            del nodebookmarks[node]
+
+        followingmeta = []
+        followingdata = []
+
+        if b'revision' in fields:
+            revisiondata = cl.revision(node, raw=True)
+            followingmeta.append((b'revision', len(revisiondata)))
+            followingdata.append(revisiondata)
+
+        # TODO make it possible for extensions to wrap a function or register
+        # a handler to service custom fields.
+
+        if followingmeta:
+            d[b'fieldsfollowing'] = followingmeta
+
+        yield d
+
+        for extra in followingdata:
+            yield extra
 
-    return wireprototypes.cborresponse(caps)
+    # If requested, send bookmarks from nodes that didn't have revision
+    # data sent so receiver is aware of any bookmark updates.
+    if b'bookmarks' in fields:
+        for node, marks in sorted(nodebookmarks.iteritems()):
+            yield {
+                b'node': node,
+                b'bookmarks': sorted(marks),
+            }
+
+class FileAccessError(Exception):
+    """Represents an error accessing a specific file."""
+
+    def __init__(self, path, msg, args):
+        self.path = path
+        self.msg = msg
+        self.args = args
+
+def getfilestore(repo, proto, path):
+    """Obtain a file storage object for use with wire protocol.
+
+    Exists as a standalone function so extensions can monkeypatch to add
+    access control.
+    """
+    # This seems to work even if the file doesn't exist. So catch
+    # "empty" files and return an error.
+    fl = repo.file(path)
+
+    if not len(fl):
+        raise FileAccessError(path, 'unknown file: %s', (path,))
+
+    return fl
+
+def emitfilerevisions(repo, path, revisions, fields):
+    clnode = repo.changelog.node
+
+    for revision in revisions:
+        d = {
+            b'node': revision.node,
+        }
+
+        if b'parents' in fields:
+            d[b'parents'] = [revision.p1node, revision.p2node]
+
+        if b'linknode' in fields:
+            # TODO by creating the filectx against a specific file revision
+            # instead of changeset, linkrev() is always used. This is wrong for
+            # cases where linkrev() may refer to a hidden changeset. We need an
+            # API for performing linkrev adjustment that takes this into
+            # account.
+            fctx = repo.filectx(path, fileid=revision.node)
+            d[b'linknode'] = clnode(fctx.introrev())
+
+        followingmeta = []
+        followingdata = []
+
+        if b'revision' in fields:
+            if revision.revision is not None:
+                followingmeta.append((b'revision', len(revision.revision)))
+                followingdata.append(revision.revision)
+            else:
+                d[b'deltabasenode'] = revision.basenode
+                followingmeta.append((b'delta', len(revision.delta)))
+                followingdata.append(revision.delta)
+
+        if followingmeta:
+            d[b'fieldsfollowing'] = followingmeta
+
+        yield d
+
+        for extra in followingdata:
+            yield extra
+
+def makefilematcher(repo, pathfilter):
+    """Construct a matcher from a path filter dict."""
+
+    # Validate values.
+    if pathfilter:
+        for key in (b'include', b'exclude'):
+            for pattern in pathfilter.get(key, []):
+                if not pattern.startswith((b'path:', b'rootfilesin:')):
+                    raise error.WireprotoCommandError(
+                        '%s pattern must begin with `path:` or `rootfilesin:`; '
+                        'got %s', (key, pattern))
+
+    if pathfilter:
+        matcher = matchmod.match(repo.root, b'',
+                                 include=pathfilter.get(b'include', []),
+                                 exclude=pathfilter.get(b'exclude', []))
+    else:
+        matcher = matchmod.match(repo.root, b'')
+
+    # Requested patterns could include files not in the local store. So
+    # filter those out.
+    return matchmod.intersectmatchers(repo.narrowmatch(), matcher)
 
-@wireprotocommand('heads',
-                  args={
-                      'publiconly': False,
-                  },
-                  permission='pull')
-def headsv2(repo, proto, publiconly=False):
+@wireprotocommand(
+    'filedata',
+    args={
+        'haveparents': {
+            'type': 'bool',
+            'default': lambda: False,
+            'example': True,
+        },
+        'nodes': {
+            'type': 'list',
+            'example': [b'0123456...'],
+        },
+        'fields': {
+            'type': 'set',
+            'default': set,
+            'example': {b'parents', b'revision'},
+            'validvalues': {b'parents', b'revision', b'linknode'},
+        },
+        'path': {
+            'type': 'bytes',
+            'example': b'foo.txt',
+        }
+    },
+    permission='pull',
+    # TODO censoring a file revision won't invalidate the cache.
+    # Figure out a way to take censoring into account when deriving
+    # the cache key.
+    cachekeyfn=makecommandcachekeyfn('filedata', 1, allargs=True))
+def filedata(repo, proto, haveparents, nodes, fields, path):
+    # TODO this API allows access to file revisions that are attached to
+    # secret changesets. filesdata does not have this problem. Maybe this
+    # API should be deleted?
+
+    try:
+        # Extensions may wish to access the protocol handler.
+        store = getfilestore(repo, proto, path)
+    except FileAccessError as e:
+        raise error.WireprotoCommandError(e.msg, e.args)
+
+    # Validate requested nodes.
+    for node in nodes:
+        try:
+            store.rev(node)
+        except error.LookupError:
+            raise error.WireprotoCommandError('unknown file node: %s',
+                                              (hex(node),))
+
+    revisions = store.emitrevisions(nodes,
+                                    revisiondata=b'revision' in fields,
+                                    assumehaveparentrevisions=haveparents)
+
+    yield {
+        b'totalitems': len(nodes),
+    }
+
+    for o in emitfilerevisions(repo, path, revisions, fields):
+        yield o
+
+def filesdatacapabilities(repo, proto):
+    batchsize = repo.ui.configint(
+        b'experimental', b'server.filesdata.recommended-batch-size')
+    return {
+        b'recommendedbatchsize': batchsize,
+    }
+
+@wireprotocommand(
+    'filesdata',
+    args={
+        'haveparents': {
+            'type': 'bool',
+            'default': lambda: False,
+            'example': True,
+        },
+        'fields': {
+            'type': 'set',
+            'default': set,
+            'example': {b'parents', b'revision'},
+            'validvalues': {b'firstchangeset', b'linknode', b'parents',
+                            b'revision'},
+        },
+        'pathfilter': {
+            'type': 'dict',
+            'default': lambda: None,
+            'example': {b'include': [b'path:tests']},
+        },
+        'revisions': {
+            'type': 'list',
+            'example': [{
+                b'type': b'changesetexplicit',
+                b'nodes': [b'abcdef...'],
+            }],
+        },
+    },
+    permission='pull',
+    # TODO censoring a file revision won't invalidate the cache.
+    # Figure out a way to take censoring into account when deriving
+    # the cache key.
+    cachekeyfn=makecommandcachekeyfn('filesdata', 1, allargs=True),
+    extracapabilitiesfn=filesdatacapabilities)
+def filesdata(repo, proto, haveparents, fields, pathfilter, revisions):
+    # TODO This should operate on a repo that exposes obsolete changesets. There
+    # is a race between a client making a push that obsoletes a changeset and
+    # another client fetching files data for that changeset. If a client has a
+    # changeset, it should probably be allowed to access files data for that
+    # changeset.
+
+    cl = repo.changelog
+    outgoing = resolvenodes(repo, revisions)
+    filematcher = makefilematcher(repo, pathfilter)
+
+    # Figure out what needs to be emitted.
+    changedpaths = set()
+    fnodes = collections.defaultdict(set)
+
+    for node in outgoing:
+        ctx = repo[node]
+        changedpaths.update(ctx.files())
+
+    changedpaths = sorted(p for p in changedpaths if filematcher(p))
+
+    # If ancestors are known, we send file revisions having a linkrev in the
+    # outgoing set of changeset revisions.
+    if haveparents:
+        outgoingclrevs = set(cl.rev(n) for n in outgoing)
+
+        for path in changedpaths:
+            try:
+                store = getfilestore(repo, proto, path)
+            except FileAccessError as e:
+                raise error.WireprotoCommandError(e.msg, e.args)
+
+            for rev in store:
+                linkrev = store.linkrev(rev)
+
+                if linkrev in outgoingclrevs:
+                    fnodes[path].add(store.node(rev))
+
+    # If ancestors aren't known, we walk the manifests and send all
+    # encountered file revisions.
+    else:
+        for node in outgoing:
+            mctx = repo[node].manifestctx()
+
+            for path, fnode in mctx.read().items():
+                if filematcher(path):
+                    fnodes[path].add(fnode)
+
+    yield {
+        b'totalpaths': len(fnodes),
+        b'totalitems': sum(len(v) for v in fnodes.values())
+    }
+
+    for path, filenodes in sorted(fnodes.items()):
+        try:
+            store = getfilestore(repo, proto, path)
+        except FileAccessError as e:
+            raise error.WireprotoCommandError(e.msg, e.args)
+
+        yield {
+            b'path': path,
+            b'totalitems': len(filenodes),
+        }
+
+        revisions = store.emitrevisions(filenodes,
+                                        revisiondata=b'revision' in fields,
+                                        assumehaveparentrevisions=haveparents)
+
+        for o in emitfilerevisions(repo, path, revisions, fields):
+            yield o
+
+@wireprotocommand(
+    'heads',
+    args={
+        'publiconly': {
+            'type': 'bool',
+            'default': lambda: False,
+            'example': False,
+        },
+    },
+    permission='pull')
+def headsv2(repo, proto, publiconly):
     if publiconly:
         repo = repo.filtered('immutable')
 
-    return wireprototypes.cborresponse(repo.heads())
+    yield repo.heads()
 
-@wireprotocommand('known',
-                  args={
-                      'nodes': [b'deadbeef'],
-                  },
-                  permission='pull')
-def knownv2(repo, proto, nodes=None):
-    nodes = nodes or []
+@wireprotocommand(
+    'known',
+    args={
+        'nodes': {
+            'type': 'list',
+            'default': list,
+            'example': [b'deadbeef'],
+        },
+    },
+    permission='pull')
+def knownv2(repo, proto, nodes):
     result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
-    return wireprototypes.cborresponse(result)
+    yield result
 
-@wireprotocommand('listkeys',
-                  args={
-                      'namespace': b'ns',
-                  },
-                  permission='pull')
-def listkeysv2(repo, proto, namespace=None):
+@wireprotocommand(
+    'listkeys',
+    args={
+        'namespace': {
+            'type': 'bytes',
+            'example': b'ns',
+        },
+    },
+    permission='pull')
+def listkeysv2(repo, proto, namespace):
     keys = repo.listkeys(encoding.tolocal(namespace))
     keys = {encoding.fromlocal(k): encoding.fromlocal(v)
             for k, v in keys.iteritems()}
 
-    return wireprototypes.cborresponse(keys)
+    yield keys
 
-@wireprotocommand('lookup',
-                  args={
-                      'key': b'foo',
-                  },
-                  permission='pull')
+@wireprotocommand(
+    'lookup',
+    args={
+        'key': {
+            'type': 'bytes',
+            'example': b'foo',
+        },
+    },
+    permission='pull')
 def lookupv2(repo, proto, key):
     key = encoding.tolocal(key)
 
     # TODO handle exception.
     node = repo.lookup(key)
 
-    return wireprototypes.cborresponse(node)
+    yield node
+
+def manifestdatacapabilities(repo, proto):
+    batchsize = repo.ui.configint(
+        b'experimental', b'server.manifestdata.recommended-batch-size')
+
+    return {
+        b'recommendedbatchsize': batchsize,
+    }
+
+@wireprotocommand(
+    'manifestdata',
+    args={
+        'nodes': {
+            'type': 'list',
+            'example': [b'0123456...'],
+        },
+        'haveparents': {
+            'type': 'bool',
+            'default': lambda: False,
+            'example': True,
+        },
+        'fields': {
+            'type': 'set',
+            'default': set,
+            'example': {b'parents', b'revision'},
+            'validvalues': {b'parents', b'revision'},
+        },
+        'tree': {
+            'type': 'bytes',
+            'example': b'',
+        },
+    },
+    permission='pull',
+    cachekeyfn=makecommandcachekeyfn('manifestdata', 1, allargs=True),
+    extracapabilitiesfn=manifestdatacapabilities)
+def manifestdata(repo, proto, haveparents, nodes, fields, tree):
+    store = repo.manifestlog.getstorage(tree)
+
+    # Validate the node is known and abort on unknown revisions.
+    for node in nodes:
+        try:
+            store.rev(node)
+        except error.LookupError:
+            raise error.WireprotoCommandError(
+                'unknown node: %s', (node,))
+
+    revisions = store.emitrevisions(nodes,
+                                    revisiondata=b'revision' in fields,
+                                    assumehaveparentrevisions=haveparents)
 
-@wireprotocommand('pushkey',
-                  args={
-                      'namespace': b'ns',
-                      'key': b'key',
-                      'old': b'old',
-                      'new': b'new',
-                  },
-                  permission='push')
+    yield {
+        b'totalitems': len(nodes),
+    }
+
+    for revision in revisions:
+        d = {
+            b'node': revision.node,
+        }
+
+        if b'parents' in fields:
+            d[b'parents'] = [revision.p1node, revision.p2node]
+
+        followingmeta = []
+        followingdata = []
+
+        if b'revision' in fields:
+            if revision.revision is not None:
+                followingmeta.append((b'revision', len(revision.revision)))
+                followingdata.append(revision.revision)
+            else:
+                d[b'deltabasenode'] = revision.basenode
+                followingmeta.append((b'delta', len(revision.delta)))
+                followingdata.append(revision.delta)
+
+        if followingmeta:
+            d[b'fieldsfollowing'] = followingmeta
+
+        yield d
+
+        for extra in followingdata:
+            yield extra
+
+@wireprotocommand(
+    'pushkey',
+    args={
+        'namespace': {
+            'type': 'bytes',
+            'example': b'ns',
+        },
+        'key': {
+            'type': 'bytes',
+            'example': b'key',
+        },
+        'old': {
+            'type': 'bytes',
+            'example': b'old',
+        },
+        'new': {
+            'type': 'bytes',
+            'example': 'new',
+        },
+    },
+    permission='push')
 def pushkeyv2(repo, proto, namespace, key, old, new):
     # TODO handle ui output redirection
-    r = repo.pushkey(encoding.tolocal(namespace),
-                     encoding.tolocal(key),
-                     encoding.tolocal(old),
-                     encoding.tolocal(new))
+    yield repo.pushkey(encoding.tolocal(namespace),
+                       encoding.tolocal(key),
+                       encoding.tolocal(old),
+                       encoding.tolocal(new))
+
+
+@wireprotocommand(
+    'rawstorefiledata',
+    args={
+        'files': {
+            'type': 'list',
+            'example': [b'changelog', b'manifestlog'],
+        },
+        'pathfilter': {
+            'type': 'list',
+            'default': lambda: None,
+            'example': {b'include': [b'path:tests']},
+        },
+    },
+    permission='pull')
+def rawstorefiledata(repo, proto, files, pathfilter):
+    if not streamclone.allowservergeneration(repo):
+        raise error.WireprotoCommandError(b'stream clone is disabled')
+
+    # TODO support dynamically advertising what store files "sets" are
+    # available. For now, we support changelog, manifestlog, and files.
+    files = set(files)
+    allowedfiles = {b'changelog', b'manifestlog'}
+
+    unsupported = files - allowedfiles
+    if unsupported:
+        raise error.WireprotoCommandError(b'unknown file type: %s',
+                                          (b', '.join(sorted(unsupported)),))
+
+    with repo.lock():
+        topfiles = list(repo.store.topfiles())
 
-    return wireprototypes.cborresponse(r)
+    sendfiles = []
+    totalsize = 0
+
+    # TODO this is a bunch of storage layer interface abstractions because
+    # it assumes revlogs.
+    for name, encodedname, size in topfiles:
+        if b'changelog' in files and name.startswith(b'00changelog'):
+            pass
+        elif b'manifestlog' in files and name.startswith(b'00manifest'):
+            pass
+        else:
+            continue
+
+        sendfiles.append((b'store', name, size))
+        totalsize += size
+
+    yield {
+        b'filecount': len(sendfiles),
+        b'totalsize': totalsize,
+    }
+
+    for location, name, size in sendfiles:
+        yield {
+            b'location': location,
+            b'path': name,
+            b'size': size,
+        }
+
+        # We have to use a closure for this to ensure the context manager is
+        # closed only after sending the final chunk.
+        def getfiledata():
+            with repo.svfs(name, 'rb', auditpath=False) as fh:
+                for chunk in util.filechunkiter(fh, limit=size):
+                    yield chunk
+
+        yield wireprototypes.indefinitebytestringresponse(
+            getfiledata())
--- a/rust/Cargo.lock	Wed Oct 10 12:25:28 2018 -0400
+++ b/rust/Cargo.lock	Mon Oct 22 14:46:06 2018 -0400
@@ -17,6 +17,10 @@
 ]
 
 [[package]]
+name = "hg-core"
+version = "0.1.0"
+
+[[package]]
 name = "hgcli"
 version = "0.1.0"
 dependencies = [
@@ -26,6 +30,14 @@
 ]
 
 [[package]]
+name = "hgdirectffi"
+version = "0.1.0"
+dependencies = [
+ "hg-core 0.1.0",
+ "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
 name = "kernel32-sys"
 version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
--- a/rust/Cargo.toml	Wed Oct 10 12:25:28 2018 -0400
+++ b/rust/Cargo.toml	Mon Oct 22 14:46:06 2018 -0400
@@ -1,2 +1,3 @@
 [workspace]
-members = ["hgcli"]
+members = ["hgcli", "hg-core", "hg-direct-ffi"]
+exclude = ["chg"]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/Cargo.lock	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,689 @@
+[[package]]
+name = "arrayvec"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "bitflags"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "byteorder"
+version = "1.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "bytes"
+version = "0.4.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "cc"
+version = "1.0.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "chg"
+version = "0.1.0"
+dependencies = [
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-hglib 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-process 0.2.2 (git+https://github.com/alexcrichton/tokio-process)",
+ "tokio-timer 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "cloudabi"
+version = "0.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-epoch 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "fuchsia-zircon"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "fuchsia-zircon-sys"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "futures"
+version = "0.1.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "iovec"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "kernel32-sys"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "lazycell"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "libc"
+version = "0.2.43"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "lock_api"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "log"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "memoffset"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "mio"
+version = "0.6.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazycell 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
+ "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "mio-named-pipes"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "mio-uds"
+version = "0.6.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "miow"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "miow"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "net2"
+version = "0.2.33"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "nodrop"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "num_cpus"
+version = "1.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "owning_ref"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "parking_lot"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "lock_api 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand"
+version = "0.5.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "redox_syscall"
+version = "0.1.40"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "rustc_version"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "scopeguard"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "semver"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "semver-parser"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "slab"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "smallvec"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "socket2"
+version = "0.3.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "stable_deref_trait"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "tokio"
+version = "0.1.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-current-thread 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-fs 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-reactor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-tcp 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-threadpool 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-timer 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-udp 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-uds 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-codec"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-current-thread"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-executor"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-fs"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-threadpool 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-hglib"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-process 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-uds 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-io"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-process"
+version = "0.2.2"
+source = "git+https://github.com/alexcrichton/tokio-process#2e805aad57e2639246cbf7394899bf7a27c18ebd"
+dependencies = [
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-reactor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-signal 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-process"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-reactor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-signal 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-reactor"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-signal"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-reactor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-tcp"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-reactor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-threadpool"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-deque 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-timer"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-udp"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-reactor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "tokio-uds"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-reactor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "unreachable"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "version_check"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "void"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "winapi-build"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "ws2_32-sys"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[metadata]
+"checksum arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a1e964f9e24d588183fcb43503abda40d288c8657dfc27311516ce2f05675aef"
+"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
+"checksum byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "90492c5858dd7d2e78691cfb89f90d273a2800fc11d98f60786e5d87e2f83781"
+"checksum bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0ce55bd354b095246fc34caf4e9e242f5297a7fd938b090cadfea6eee614aa62"
+"checksum cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "f159dfd43363c4d08055a07703eb7a3406b0dac4d0584d96965a3262db3c9d16"
+"checksum cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0c4e7bb64a8ebb0d856483e1e682ea3422f883c5f5615a90d51a2c82fe87fdd3"
+"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
+"checksum crossbeam-deque 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3486aefc4c0487b9cb52372c97df0a48b8c249514af1ee99703bf70d2f2ceda1"
+"checksum crossbeam-epoch 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "30fecfcac6abfef8771151f8be4abc9e4edc112c2bcb233314cafde2680536e9"
+"checksum crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "677d453a17e8bd2b913fa38e8b9cf04bcdbb5be790aa294f2389661d72036015"
+"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
+"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
+"checksum futures 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)" = "0c84b40c7e2de99ffd70602db314a7a8c26b2b3d830e6f7f7a142a8860ab3ca4"
+"checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"
+"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
+"checksum lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca488b89a5657b0a2ecd45b95609b3e848cf1755da332a0da46e2b2b1cb371a7"
+"checksum lazycell 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ddba4c30a78328befecec92fc94970e53b3ae385827d28620f0f5bb2493081e0"
+"checksum libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)" = "76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d"
+"checksum lock_api 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "775751a3e69bde4df9b38dd00a1b5d6ac13791e4223d4a0506577f0dd27cfb7a"
+"checksum log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fcce5fa49cc693c312001daf1d13411c4a5283796bac1084299ea3e567113f"
+"checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3"
+"checksum mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)" = "71646331f2619b1026cc302f87a2b8b648d5c6dd6937846a16cc8ce0f347f432"
+"checksum mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3"
+"checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125"
+"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
+"checksum miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "396aa0f2003d7df8395cb93e09871561ccc3e785f0acb369170e8cc74ddf9226"
+"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88"
+"checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2"
+"checksum num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30"
+"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"
+"checksum parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0802bff09003b291ba756dc7e79313e51cc31667e94afbe847def490424cde5"
+"checksum parking_lot_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad7f7e6ebdc79edff6fdcb87a55b620174f7a989e3eb31b65231f4af57f00b8c"
+"checksum rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e464cd887e869cddcae8792a4ee31d23c7edd516700695608f5b98c67ee0131c"
+"checksum rand_core 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "edecf0f94da5551fc9b492093e30b041a891657db7940ee221f9d2f66e82eef2"
+"checksum redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "c214e91d3ecf43e9a4e41e578973adeb14b474f2bee858742d127af75a0112b1"
+"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+"checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27"
+"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
+"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
+"checksum slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5f9776d6b986f77b35c6cf846c11ad986ff128fe0b2b63a3628e3755e8d3102d"
+"checksum smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "153ffa32fd170e9944f7e0838edf824a754ec4c1fc64746fcc9fe1f8fa602e5d"
+"checksum socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "c4d11a52082057d87cb5caa31ad812f4504b97ab44732cd8359df2e9ff9f48e7"
+"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8"
+"checksum tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6e93c78d23cc61aa245a8acd2c4a79c4d7fa7fb5c3ca90d5737029f043a84895"
+"checksum tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c501eceaf96f0e1793cf26beb63da3d11c738c4a943fdf3746d81d64684c39f"
+"checksum tokio-current-thread 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f90fcd90952f0a496d438a976afba8e5c205fb12123f813d8ab3aa1c8436638c"
+"checksum tokio-executor 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "c117b6cf86bb730aab4834f10df96e4dd586eff2c3c27d3781348da49e255bde"
+"checksum tokio-fs 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b5cbe4ca6e71cb0b62a66e4e6f53a8c06a6eefe46cc5f665ad6f274c9906f135"
+"checksum tokio-hglib 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a138c3cb866c8a95ceddae44634bb159eefeebcdba45aec2158f8ad6c201e6d"
+"checksum tokio-io 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "8b8a85fffbec3c5ab1ab62324570230dcd37ee5996a7859da5caf7b9d45e3e8c"
+"checksum tokio-process 0.2.2 (git+https://github.com/alexcrichton/tokio-process)" = "<none>"
+"checksum tokio-process 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0832648d1ff7ca42c06ca45dc76797b92c56500de828e33c77276fa1449947b6"
+"checksum tokio-reactor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4b26fd37f1125738b2170c80b551f69ff6fecb277e6e5ca885e53eec2b005018"
+"checksum tokio-signal 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b6893092932264944edee8486d54b578c7098bea794aedaf9bd7947b49e6b7bf"
+"checksum tokio-tcp 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7ad235e9dadd126b2d47f6736f65aa1fdcd6420e66ca63f44177bc78df89f912"
+"checksum tokio-threadpool 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bbd8a8b911301c60cbfaa2a6588fb210e5c1038375b8bdecc47aa09a94c3c05f"
+"checksum tokio-timer 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "3a52f00c97fedb6d535d27f65cccb7181c8dd4c6edc3eda9ea93f6d45d05168e"
+"checksum tokio-udp 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "da941144b816d0dcda4db3a1ba87596e4df5e860a72b70783fe435891f80601c"
+"checksum tokio-uds 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22e3aa6d1fcc19e635418dc0a30ab5bd65d347973d6f43f1a37bf8d9d1335fc9"
+"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
+"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd"
+"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
+"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
+"checksum winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "92c1eb33641e276cfa214a0522acad57be5c56b10cb348b3c5117db75f3ac4b0"
+"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
+"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/Cargo.toml	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,20 @@
+[package]
+name = "chg"
+version = "0.1.0"
+authors = ["Yuya Nishihara <yuya@tcha.org>"]
+description = "Client for Mercurial command server with cHg extension"
+license = "GPL-2.0+"
+
+[dependencies]
+bytes = "0.4"
+futures = "0.1"
+libc = "0.2"
+log = { version = "0.4", features = ["std"] }
+tokio = "0.1"
+tokio-hglib = "0.2"
+# TODO: "^0.2.3" once released. we need AsRawFd support.
+tokio-process = { git = "https://github.com/alexcrichton/tokio-process" }
+tokio-timer = "0.2"
+
+[build-dependencies]
+cc = "1.0"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/build.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,9 @@
+extern crate cc;
+
+fn main() {
+    cc::Build::new()
+        .warnings(true)
+        .file("src/sendfds.c")
+        .file("src/sighandlers.c")
+        .compile("procutil");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/src/attachio.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,97 @@
+// Copyright 2018 Yuya Nishihara <yuya@tcha.org>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Functions to send client-side fds over the command server channel.
+
+use futures::{Async, Future, Poll};
+use std::io;
+use std::os::unix::io::AsRawFd;
+use tokio_hglib::{Client, Connection};
+use tokio_hglib::codec::ChannelMessage;
+use tokio_hglib::protocol::MessageLoop;
+
+use super::message;
+use super::procutil;
+
+/// Future to send client-side fds over the command server channel.
+///
+/// This works as follows:
+/// 1. Client sends "attachio" request.
+/// 2. Server sends back 1-byte input request.
+/// 3. Client sends fds with 1-byte dummy payload in response.
+/// 4. Server returns the number of the fds received.
+///
+/// If the stderr is omitted, it will be redirected to the stdout. This
+/// allows us to attach the pager stdin to both stdout and stderr, and
+/// dispose of the client-side handle once attached.
+#[must_use = "futures do nothing unless polled"]
+pub struct AttachIo<C, I, O, E>
+    where C: Connection,
+{
+    msg_loop: MessageLoop<C>,
+    stdin: I,
+    stdout: O,
+    stderr: Option<E>,
+}
+
+impl<C, I, O, E> AttachIo<C, I, O, E>
+    where C: Connection + AsRawFd,
+          I: AsRawFd,
+          O: AsRawFd,
+          E: AsRawFd,
+{
+    pub fn with_client(client: Client<C>, stdin: I, stdout: O, stderr: Option<E>)
+                       -> AttachIo<C, I, O, E> {
+        let msg_loop = MessageLoop::start(client, b"attachio");
+        AttachIo { msg_loop, stdin, stdout, stderr }
+    }
+}
+
+impl<C, I, O, E> Future for AttachIo<C, I, O, E>
+    where C: Connection + AsRawFd,
+          I: AsRawFd,
+          O: AsRawFd,
+          E: AsRawFd,
+{
+    type Item = Client<C>;
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        loop {
+            let (client, msg) = try_ready!(self.msg_loop.poll());
+            match msg {
+                ChannelMessage::Data(b'r', data) => {
+                    let fd_cnt = message::parse_result_code(data)?;
+                    if fd_cnt == 3 {
+                        return Ok(Async::Ready(client));
+                    } else {
+                        return Err(io::Error::new(io::ErrorKind::InvalidData,
+                                                  "unexpected attachio result"));
+                    }
+                }
+                ChannelMessage::Data(..) => {
+                    // just ignore data sent to uninteresting (optional) channel
+                    self.msg_loop = MessageLoop::resume(client);
+                }
+                ChannelMessage::InputRequest(1) => {
+                    // this may fail with EWOULDBLOCK in theory, but the
+                    // payload is quite small, and the send buffer should
+                    // be empty so the operation will complete immediately
+                    let sock_fd = client.as_raw_fd();
+                    let ifd = self.stdin.as_raw_fd();
+                    let ofd = self.stdout.as_raw_fd();
+                    let efd = self.stderr.as_ref().map_or(ofd, |f| f.as_raw_fd());
+                    procutil::send_raw_fds(sock_fd, &[ifd, ofd, efd])?;
+                    self.msg_loop = MessageLoop::resume(client);
+                }
+                ChannelMessage::InputRequest(..) | ChannelMessage::LineRequest(..) |
+                ChannelMessage::SystemRequest(..) => {
+                    return Err(io::Error::new(io::ErrorKind::InvalidData,
+                                              "unsupported request while attaching io"));
+                }
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/src/clientext.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,64 @@
+// Copyright 2018 Yuya Nishihara <yuya@tcha.org>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! cHg extensions to command server client.
+
+use std::ffi::OsStr;
+use std::os::unix::ffi::OsStrExt;
+use std::os::unix::io::AsRawFd;
+use std::path::Path;
+use tokio_hglib::{Client, Connection};
+use tokio_hglib::protocol::OneShotRequest;
+
+use super::attachio::AttachIo;
+use super::message;
+use super::runcommand::ChgRunCommand;
+use super::uihandler::SystemHandler;
+
+pub trait ChgClientExt<C>
+    where C: Connection + AsRawFd,
+{
+    /// Attaches the client file descriptors to the server.
+    fn attach_io<I, O, E>(self, stdin: I, stdout: O, stderr: E) -> AttachIo<C, I, O, E>
+        where I: AsRawFd,
+              O: AsRawFd,
+              E: AsRawFd;
+
+    /// Changes the working directory of the server.
+    fn set_current_dir<P>(self, dir: P) -> OneShotRequest<C>
+        where P: AsRef<Path>;
+
+    /// Runs the specified Mercurial command with cHg extension.
+    fn run_command_chg<I, P, H>(self, handler: H, args: I) -> ChgRunCommand<C, H>
+        where I: IntoIterator<Item = P>,
+              P: AsRef<OsStr>,
+              H: SystemHandler;
+}
+
+impl<C> ChgClientExt<C> for Client<C>
+    where C: Connection + AsRawFd,
+{
+    fn attach_io<I, O, E>(self, stdin: I, stdout: O, stderr: E) -> AttachIo<C, I, O, E>
+        where I: AsRawFd,
+              O: AsRawFd,
+              E: AsRawFd,
+    {
+        AttachIo::with_client(self, stdin, stdout, Some(stderr))
+    }
+
+    fn set_current_dir<P>(self, dir: P) -> OneShotRequest<C>
+        where P: AsRef<Path>,
+    {
+        OneShotRequest::start_with_args(self, b"chdir", dir.as_ref().as_os_str().as_bytes())
+    }
+
+    fn run_command_chg<I, P, H>(self, handler: H, args: I) -> ChgRunCommand<C, H>
+        where I: IntoIterator<Item = P>,
+              P: AsRef<OsStr>,
+              H: SystemHandler,
+    {
+        ChgRunCommand::with_client(self, handler, message::pack_args_os(args))
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/src/lib.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,23 @@
+// Copyright 2018 Yuya Nishihara <yuya@tcha.org>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+extern crate bytes;
+#[macro_use]
+extern crate futures;
+extern crate libc;
+extern crate tokio;
+extern crate tokio_hglib;
+extern crate tokio_process;
+
+mod attachio;
+mod clientext;
+pub mod locator;
+pub mod message;
+pub mod procutil;
+mod runcommand;
+mod uihandler;
+
+pub use clientext::ChgClientExt;
+pub use uihandler::{ChgUiHandler, SystemHandler};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/src/locator.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,126 @@
+// Copyright 2011, 2018 Yuya Nishihara <yuya@tcha.org>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Utility for locating command-server process.
+
+use std::env;
+use std::ffi::{OsStr, OsString};
+use std::fs::{self, DirBuilder};
+use std::io;
+use std::os::unix::ffi::{OsStrExt, OsStringExt};
+use std::os::unix::fs::{DirBuilderExt, MetadataExt};
+use std::path::{Path, PathBuf};
+use std::process;
+use std::time::Duration;
+
+use super::procutil;
+
+/// Helper to connect to and spawn a server process.
+#[derive(Clone, Debug)]
+pub struct Locator {
+    hg_command: OsString,
+    current_dir: PathBuf,
+    env_vars: Vec<(OsString, OsString)>,
+    process_id: u32,
+    base_sock_path: PathBuf,
+    timeout: Duration,
+}
+
+impl Locator {
+    /// Creates locator capturing the current process environment.
+    ///
+    /// If no `$CHGSOCKNAME` is specified, the socket directory will be
+    /// created as necessary.
+    pub fn prepare_from_env() -> io::Result<Locator> {
+        Ok(Locator {
+            hg_command: default_hg_command(),
+            current_dir: env::current_dir()?,
+            env_vars: env::vars_os().collect(),
+            process_id: process::id(),
+            base_sock_path: prepare_server_socket_path()?,
+            timeout: default_timeout(),
+        })
+    }
+
+    /// Temporary socket path for this client process.
+    fn temp_sock_path(&self) -> PathBuf {
+        let src = self.base_sock_path.as_os_str().as_bytes();
+        let mut buf = Vec::with_capacity(src.len() + 6);
+        buf.extend_from_slice(src);
+        buf.extend_from_slice(format!(".{}", self.process_id).as_bytes());
+        OsString::from_vec(buf).into()
+    }
+}
+
+/// Determines the server socket to connect to.
+///
+/// If no `$CHGSOCKNAME` is specified, the socket directory will be created
+/// as necessary.
+pub fn prepare_server_socket_path() -> io::Result<PathBuf> {
+    if let Some(s) = env::var_os("CHGSOCKNAME") {
+        Ok(PathBuf::from(s))
+    } else {
+        let mut path = default_server_socket_dir();
+        create_secure_dir(&path)?;
+        path.push("server");
+        Ok(path)
+    }
+}
+
+/// Determines the default server socket path as follows.
+///
+/// 1. `$XDG_RUNTIME_DIR/chg`
+/// 2. `$TMPDIR/chg$UID`
+/// 3. `/tmp/chg$UID`
+pub fn default_server_socket_dir() -> PathBuf {
+    // XDG_RUNTIME_DIR should be ignored if it has an insufficient permission.
+    // https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+    if let Some(Ok(s)) = env::var_os("XDG_RUNTIME_DIR").map(check_secure_dir) {
+        let mut path = PathBuf::from(s);
+        path.push("chg");
+        path
+    } else {
+        let mut path = env::temp_dir();
+        path.push(format!("chg{}", procutil::get_effective_uid()));
+        path
+    }
+}
+
+/// Determines the default hg command.
+pub fn default_hg_command() -> OsString {
+    // TODO: maybe allow embedding the path at compile time (or load from hgrc)
+    env::var_os("CHGHG").or(env::var_os("HG")).unwrap_or(OsStr::new("hg").to_owned())
+}
+
+fn default_timeout() -> Duration {
+    let secs = env::var("CHGTIMEOUT").ok().and_then(|s| s.parse().ok()).unwrap_or(60);
+    Duration::from_secs(secs)
+}
+
+/// Creates a directory which the other users cannot access to.
+///
+/// If the directory already exists, tests its permission.
+fn create_secure_dir<P>(path: P) -> io::Result<()>
+    where P: AsRef<Path>,
+{
+    DirBuilder::new().mode(0o700).create(path.as_ref()).or_else(|err| {
+        if err.kind() == io::ErrorKind::AlreadyExists {
+            check_secure_dir(path).map(|_| ())
+        } else {
+            Err(err)
+        }
+    })
+}
+
+fn check_secure_dir<P>(path: P) -> io::Result<P>
+    where P: AsRef<Path>,
+{
+    let a = fs::symlink_metadata(path.as_ref())?;
+    if a.is_dir() && a.uid() == procutil::get_effective_uid() && (a.mode() & 0o777) == 0o700 {
+        Ok(path)
+    } else {
+        Err(io::Error::new(io::ErrorKind::Other, "insecure directory"))
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/src/main.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,98 @@
+// Copyright 2018 Yuya Nishihara <yuya@tcha.org>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+extern crate chg;
+extern crate futures;
+extern crate log;
+extern crate tokio;
+extern crate tokio_hglib;
+
+use chg::{ChgClientExt, ChgUiHandler};
+use chg::locator;
+use chg::procutil;
+use futures::sync::oneshot;
+use std::env;
+use std::io;
+use std::process;
+use std::time::Instant;
+use tokio::prelude::*;
+use tokio_hglib::UnixClient;
+
+struct DebugLogger {
+    start: Instant,
+}
+
+impl DebugLogger {
+    pub fn new() -> DebugLogger {
+        DebugLogger {
+            start: Instant::now(),
+        }
+    }
+}
+
+impl log::Log for DebugLogger {
+    fn enabled(&self, metadata: &log::Metadata) -> bool {
+        metadata.target().starts_with("chg::")
+    }
+
+    fn log(&self, record: &log::Record) {
+        if self.enabled(record.metadata()) {
+            // just make the output looks similar to chg of C
+            let l = format!("{}", record.level()).to_lowercase();
+            let t = self.start.elapsed();
+            writeln!(io::stderr(), "chg: {}: {}.{:06} {}",
+                     l, t.as_secs(), t.subsec_micros(), record.args()).unwrap_or(());
+        }
+    }
+
+    fn flush(&self) {
+    }
+}
+
+fn main() {
+    if env::var_os("CHGDEBUG").is_some() {
+        log::set_boxed_logger(Box::new(DebugLogger::new()))
+            .expect("any logger should not be installed yet");
+        log::set_max_level(log::LevelFilter::Debug);
+    }
+
+    let code = run().unwrap_or_else(|err| {
+        writeln!(io::stderr(), "chg: abort: {}", err).unwrap_or(());
+        255
+    });
+    process::exit(code);
+}
+
+fn run() -> io::Result<i32> {
+    let current_dir = env::current_dir()?;
+    let sock_path = locator::prepare_server_socket_path()?;
+    let handler = ChgUiHandler::new();
+    let (result_tx, result_rx) = oneshot::channel();
+    let fut = UnixClient::connect(sock_path)
+        .and_then(|client| {
+            client.set_current_dir(current_dir)
+        })
+        .and_then(|client| {
+            client.attach_io(io::stdin(), io::stdout(), io::stderr())
+        })
+        .and_then(|client| {
+            let pid = client.server_spec().process_id.unwrap();
+            let pgid = client.server_spec().process_group_id;
+            procutil::setup_signal_handler_once(pid, pgid)?;
+            Ok(client)
+        })
+        .and_then(|client| {
+            client.run_command_chg(handler, env::args_os().skip(1))
+        })
+        .map(|(_client, _handler, code)| {
+            procutil::restore_signal_handler_once()?;
+            Ok(code)
+        })
+        .or_else(|err| Ok(Err(err)))  // pass back error to caller
+        .map(|res| result_tx.send(res).unwrap());
+    tokio::run(fut);
+    result_rx.wait().unwrap_or(Err(io::Error::new(io::ErrorKind::Other,
+                                                  "no exit code set")))
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/src/message.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,117 @@
+// Copyright 2018 Yuya Nishihara <yuya@tcha.org>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Utility for parsing and building command-server messages.
+
+use bytes::Bytes;
+use std::error;
+use std::ffi::{OsStr, OsString};
+use std::io;
+use std::os::unix::ffi::OsStrExt;
+
+pub use tokio_hglib::message::*;  // re-exports
+
+/// Shell command type requested by the server.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum CommandType {
+    /// Pager should be spawned.
+    Pager,
+    /// Shell command should be executed to send back the result code.
+    System,
+}
+
+/// Shell command requested by the server.
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct CommandSpec {
+    pub command: OsString,
+    pub current_dir: OsString,
+    pub envs: Vec<(OsString, OsString)>,
+}
+
+/// Parses "S" channel request into command type and spec.
+pub fn parse_command_spec(data: Bytes) -> io::Result<(CommandType, CommandSpec)> {
+    let mut split = data.split(|&c| c == b'\0');
+    let ctype = parse_command_type(split.next().ok_or(new_parse_error("missing type"))?)?;
+    let command = split.next().ok_or(new_parse_error("missing command"))?;
+    let current_dir = split.next().ok_or(new_parse_error("missing current dir"))?;
+
+    let mut envs = Vec::new();
+    for l in split {
+        let mut s = l.splitn(2, |&c| c == b'=');
+        let k = s.next().unwrap();
+        let v = s.next().ok_or(new_parse_error("malformed env"))?;
+        envs.push((OsStr::from_bytes(k).to_owned(), OsStr::from_bytes(v).to_owned()));
+    }
+
+    let spec = CommandSpec {
+        command: OsStr::from_bytes(command).to_owned(),
+        current_dir: OsStr::from_bytes(current_dir).to_owned(),
+        envs: envs,
+    };
+    Ok((ctype, spec))
+}
+
+fn parse_command_type(value: &[u8]) -> io::Result<CommandType> {
+    match value {
+        b"pager" => Ok(CommandType::Pager),
+        b"system" => Ok(CommandType::System),
+        _ => Err(new_parse_error(format!("unknown command type: {}", decode_latin1(value)))),
+    }
+}
+
+fn decode_latin1<S>(s: S) -> String
+    where S: AsRef<[u8]>,
+{
+    s.as_ref().iter().map(|&c| c as char).collect()
+}
+
+fn new_parse_error<E>(error: E) -> io::Error
+    where E: Into<Box<error::Error + Send + Sync>>,
+{
+    io::Error::new(io::ErrorKind::InvalidData, error)
+}
+
+#[cfg(test)]
+mod tests {
+    use std::os::unix::ffi::OsStringExt;
+    use super::*;
+
+    #[test]
+    fn parse_command_spec_good() {
+        let src = [b"pager".as_ref(),
+                   b"less -FRX".as_ref(),
+                   b"/tmp".as_ref(),
+                   b"LANG=C".as_ref(),
+                   b"HGPLAIN=".as_ref()].join(&0);
+        let spec = CommandSpec {
+            command: os_string_from(b"less -FRX"),
+            current_dir: os_string_from(b"/tmp"),
+            envs: vec![(os_string_from(b"LANG"), os_string_from(b"C")),
+                       (os_string_from(b"HGPLAIN"), os_string_from(b""))],
+        };
+        assert_eq!(parse_command_spec(Bytes::from(src)).unwrap(), (CommandType::Pager, spec));
+    }
+
+    #[test]
+    fn parse_command_spec_too_short() {
+        assert!(parse_command_spec(Bytes::from_static(b"")).is_err());
+        assert!(parse_command_spec(Bytes::from_static(b"pager")).is_err());
+        assert!(parse_command_spec(Bytes::from_static(b"pager\0less")).is_err());
+    }
+
+    #[test]
+    fn parse_command_spec_malformed_env() {
+        assert!(parse_command_spec(Bytes::from_static(b"pager\0less\0/tmp\0HOME")).is_err());
+    }
+
+    #[test]
+    fn parse_command_spec_unknown_type() {
+        assert!(parse_command_spec(Bytes::from_static(b"paper\0less")).is_err());
+    }
+
+    fn os_string_from(s: &[u8]) -> OsString {
+        OsString::from_vec(s.to_vec())
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/src/procutil.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,87 @@
+// Copyright 2018 Yuya Nishihara <yuya@tcha.org>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Low-level utility for signal and process handling.
+
+use libc::{self, c_int, pid_t, size_t, ssize_t};
+use std::io;
+use std::os::unix::io::RawFd;
+use std::sync;
+
+#[link(name = "procutil", kind = "static")]
+extern "C" {
+    // sendfds.c
+    fn sendfds(sockfd: c_int, fds: *const c_int, fdlen: size_t) -> ssize_t;
+
+    // sighandlers.c
+    fn setupsignalhandler(pid: pid_t, pgid: pid_t) -> c_int;
+    fn restoresignalhandler() -> c_int;
+}
+
+/// Returns the effective uid of the current process.
+pub fn get_effective_uid() -> u32 {
+    unsafe { libc::geteuid() }
+}
+
+/// Changes the given fd to blocking mode.
+pub fn set_blocking_fd(fd: RawFd) -> io::Result<()> {
+    let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) };
+    if flags < 0 {
+        return Err(io::Error::last_os_error());
+    }
+    let r = unsafe { libc::fcntl(fd, libc::F_SETFL, flags & !libc::O_NONBLOCK) };
+    if r < 0 {
+        return Err(io::Error::last_os_error())
+    }
+    Ok(())
+}
+
+/// Sends file descriptors via the given socket.
+pub fn send_raw_fds(sock_fd: RawFd, fds: &[RawFd]) -> io::Result<()> {
+    let r = unsafe { sendfds(sock_fd, fds.as_ptr(), fds.len() as size_t) };
+    if r < 0 {
+        return Err(io::Error::last_os_error());
+    }
+    Ok(())
+}
+
+static SETUP_SIGNAL_HANDLER: sync::Once = sync::Once::new();
+static RESTORE_SIGNAL_HANDLER: sync::Once = sync::Once::new();
+
+/// Installs signal handlers to forward signals to the server.
+///
+/// # Safety
+///
+/// This touches global states, and thus synchronized as a one-time
+/// initialization function.
+pub fn setup_signal_handler_once(pid: u32, pgid: Option<u32>) -> io::Result<()> {
+    let pid_signed = pid as i32;
+    let pgid_signed = pgid.map(|n| n as i32).unwrap_or(0);
+    let mut r = 0;
+    SETUP_SIGNAL_HANDLER.call_once(|| {
+        r = unsafe { setupsignalhandler(pid_signed, pgid_signed) };
+    });
+    if r < 0 {
+        return Err(io::Error::last_os_error());
+    }
+    Ok(())
+}
+
+/// Restores the original signal handlers.
+///
+/// # Safety
+///
+/// This touches global states, and thus synchronized as a one-time
+/// initialization function.
+pub fn restore_signal_handler_once() -> io::Result<()> {
+    let mut r = 0;
+    RESTORE_SIGNAL_HANDLER.call_once(|| {
+        r = unsafe { restoresignalhandler() };
+    });
+    if r < 0 {
+        return Err(io::Error::last_os_error());
+    }
+    Ok(())
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/src/runcommand.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,163 @@
+// Copyright 2018 Yuya Nishihara <yuya@tcha.org>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Functions to run Mercurial command in cHg-aware command server.
+
+use bytes::Bytes;
+use futures::future::IntoFuture;
+use futures::{Async, Future, Poll};
+use std::io;
+use std::mem;
+use std::os::unix::io::AsRawFd;
+use tokio_hglib::{Client, Connection};
+use tokio_hglib::codec::ChannelMessage;
+use tokio_hglib::protocol::MessageLoop;
+
+use super::attachio::AttachIo;
+use super::message::{self, CommandType};
+use super::uihandler::SystemHandler;
+
+enum AsyncS<R, S> {
+    Ready(R),
+    NotReady(S),
+    PollAgain(S),
+}
+
+enum CommandState<C, H>
+    where C: Connection,
+          H: SystemHandler,
+{
+    Running(MessageLoop<C>, H),
+    SpawningPager(Client<C>, <H::SpawnPagerResult as IntoFuture>::Future),
+    AttachingPager(AttachIo<C, io::Stdin, H::PagerStdin, H::PagerStdin>, H),
+    WaitingSystem(Client<C>, <H::RunSystemResult as IntoFuture>::Future),
+    Finished,
+}
+
+type CommandPoll<C, H> = io::Result<(AsyncS<(Client<C>, H, i32), CommandState<C, H>>)>;
+
+/// Future resolves to `(exit_code, client)`.
+#[must_use = "futures do nothing unless polled"]
+pub struct ChgRunCommand<C, H>
+    where C: Connection,
+          H: SystemHandler,
+{
+    state: CommandState<C, H>,
+}
+
+impl<C, H> ChgRunCommand<C, H>
+    where C: Connection + AsRawFd,
+          H: SystemHandler,
+{
+    pub fn with_client(client: Client<C>, handler: H, packed_args: Bytes)
+                       -> ChgRunCommand<C, H> {
+        let msg_loop = MessageLoop::start_with_args(client, b"runcommand", packed_args);
+        ChgRunCommand {
+            state: CommandState::Running(msg_loop, handler),
+        }
+    }
+}
+
+impl<C, H> Future for ChgRunCommand<C, H>
+    where C: Connection + AsRawFd,
+          H: SystemHandler,
+{
+    type Item = (Client<C>, H, i32);
+    type Error = io::Error;
+
+    fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
+        loop {
+            let state = mem::replace(&mut self.state, CommandState::Finished);
+            match state.poll()? {
+                AsyncS::Ready((client, handler, code)) => {
+                    return Ok(Async::Ready((client, handler, code)));
+                }
+                AsyncS::NotReady(newstate) => {
+                    self.state = newstate;
+                    return Ok(Async::NotReady);
+                }
+                AsyncS::PollAgain(newstate) => {
+                    self.state = newstate;
+                }
+            }
+        }
+    }
+}
+
+impl<C, H> CommandState<C, H>
+    where C: Connection + AsRawFd,
+          H: SystemHandler,
+{
+    fn poll(self) -> CommandPoll<C, H> {
+        match self {
+            CommandState::Running(mut msg_loop, handler) => {
+                if let Async::Ready((client, msg)) = msg_loop.poll()? {
+                    process_message(client, handler, msg)
+                } else {
+                    Ok(AsyncS::NotReady(CommandState::Running(msg_loop, handler)))
+                }
+            }
+            CommandState::SpawningPager(client, mut fut) => {
+                if let Async::Ready((handler, pin)) = fut.poll()? {
+                    let fut = AttachIo::with_client(client, io::stdin(), pin, None);
+                    Ok(AsyncS::PollAgain(CommandState::AttachingPager(fut, handler)))
+                } else {
+                    Ok(AsyncS::NotReady(CommandState::SpawningPager(client, fut)))
+                }
+            }
+            CommandState::AttachingPager(mut fut, handler) => {
+                if let Async::Ready(client) = fut.poll()? {
+                    let msg_loop = MessageLoop::start(client, b"");  // terminator
+                    Ok(AsyncS::PollAgain(CommandState::Running(msg_loop, handler)))
+                } else {
+                    Ok(AsyncS::NotReady(CommandState::AttachingPager(fut, handler)))
+                }
+            }
+            CommandState::WaitingSystem(client, mut fut) => {
+                if let Async::Ready((handler, code)) = fut.poll()? {
+                    let data = message::pack_result_code(code);
+                    let msg_loop = MessageLoop::resume_with_data(client, data);
+                    Ok(AsyncS::PollAgain(CommandState::Running(msg_loop, handler)))
+                } else {
+                    Ok(AsyncS::NotReady(CommandState::WaitingSystem(client, fut)))
+                }
+            }
+            CommandState::Finished => panic!("poll ChgRunCommand after it's done")
+        }
+    }
+}
+
+fn process_message<C, H>(client: Client<C>, handler: H, msg: ChannelMessage) -> CommandPoll<C, H>
+    where C: Connection,
+          H: SystemHandler,
+{
+    match msg {
+        ChannelMessage::Data(b'r', data) => {
+            let code = message::parse_result_code(data)?;
+            Ok(AsyncS::Ready((client, handler, code)))
+        }
+        ChannelMessage::Data(..) => {
+            // just ignores data sent to optional channel
+            let msg_loop = MessageLoop::resume(client);
+            Ok(AsyncS::PollAgain(CommandState::Running(msg_loop, handler)))
+        }
+        ChannelMessage::InputRequest(..) | ChannelMessage::LineRequest(..) => {
+            Err(io::Error::new(io::ErrorKind::InvalidData, "unsupported request"))
+        }
+        ChannelMessage::SystemRequest(data) => {
+            let (cmd_type, cmd_spec) = message::parse_command_spec(data)?;
+            match cmd_type {
+                CommandType::Pager => {
+                    let fut = handler.spawn_pager(cmd_spec).into_future();
+                    Ok(AsyncS::PollAgain(CommandState::SpawningPager(client, fut)))
+                }
+                CommandType::System => {
+                    let fut = handler.run_system(cmd_spec).into_future();
+                    Ok(AsyncS::PollAgain(CommandState::WaitingSystem(client, fut)))
+                }
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/src/sendfds.c	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,51 @@
+/*
+ * Utility to send fds via Unix domain socket
+ *
+ * Copyright 2011, 2018 Yuya Nishihara <yuya@tcha.org>
+ *
+ * This software may be used and distributed according to the terms of the
+ * GNU General Public License version 2 or any later version.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#define MAX_FD_LEN 10
+
+/*
+ * Sends the given fds with 1-byte dummy payload.
+ *
+ * Returns the number of bytes sent on success, -1 on error and errno is set
+ * appropriately.
+ */
+ssize_t sendfds(int sockfd, const int *fds, size_t fdlen)
+{
+	char dummy[1] = {0};
+	struct iovec iov = {dummy, sizeof(dummy)};
+	char fdbuf[CMSG_SPACE(sizeof(fds[0]) * MAX_FD_LEN)];
+	struct msghdr msgh;
+	struct cmsghdr *cmsg;
+
+	/* just use a fixed-size buffer since we'll never send tons of fds */
+	if (fdlen > MAX_FD_LEN) {
+		errno = EINVAL;
+		return -1;
+	}
+
+	memset(&msgh, 0, sizeof(msgh));
+	msgh.msg_iov = &iov;
+	msgh.msg_iovlen = 1;
+	msgh.msg_control = fdbuf;
+	msgh.msg_controllen = CMSG_SPACE(sizeof(fds[0]) * fdlen);
+
+	cmsg = CMSG_FIRSTHDR(&msgh);
+	cmsg->cmsg_level = SOL_SOCKET;
+	cmsg->cmsg_type = SCM_RIGHTS;
+	cmsg->cmsg_len = CMSG_LEN(sizeof(fds[0]) * fdlen);
+	memcpy(CMSG_DATA(cmsg), fds, sizeof(fds[0]) * fdlen);
+	msgh.msg_controllen = cmsg->cmsg_len;
+	return sendmsg(sockfd, &msgh, 0);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/src/sighandlers.c	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,151 @@
+/*
+ * Signal handlers for cHg
+ *
+ * Copyright 2011, 2018 Yuya Nishihara <yuya@tcha.org>
+ *
+ * This software may be used and distributed according to the terms of the
+ * GNU General Public License version 2 or any later version.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+
+static pid_t peerpgid = 0;
+static pid_t peerpid = 0;
+
+static void forwardsignal(int sig)
+{
+	assert(peerpid > 0);
+	(void)kill(peerpid, sig);
+}
+
+static void forwardsignaltogroup(int sig)
+{
+	/* prefer kill(-pgid, sig), fallback to pid if pgid is invalid */
+	pid_t killpid = peerpgid > 1 ? -peerpgid : peerpid;
+	(void)kill(killpid, sig);
+}
+
+static void handlestopsignal(int sig)
+{
+	sigset_t unblockset, oldset;
+	struct sigaction sa, oldsa;
+	if (sigemptyset(&unblockset) < 0)
+		return;
+	if (sigaddset(&unblockset, sig) < 0)
+		return;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = SIG_DFL;
+	sa.sa_flags = SA_RESTART;
+	if (sigemptyset(&sa.sa_mask) < 0)
+		return;
+
+	forwardsignal(sig);
+	if (raise(sig) < 0) /* resend to self */
+		return;
+	if (sigaction(sig, &sa, &oldsa) < 0)
+		return;
+	if (sigprocmask(SIG_UNBLOCK, &unblockset, &oldset) < 0)
+		return;
+	/* resent signal will be handled before sigprocmask() returns */
+	if (sigprocmask(SIG_SETMASK, &oldset, NULL) < 0)
+		return;
+	if (sigaction(sig, &oldsa, NULL) < 0)
+		return;
+}
+
+/*
+ * Installs signal handlers.
+ *
+ * Returns 0 on success, -1 on error and errno is set appropriately.
+ * Installed handlers wouldn't be cleaned up on error.
+ */
+int setupsignalhandler(pid_t pid, pid_t pgid)
+{
+	if (pid <= 0) {
+		errno = EINVAL;
+		return -1;
+	}
+	peerpid = pid;
+	peerpgid = (pgid <= 1 ? 0 : pgid);
+
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+
+	/* deadly signals meant to be sent to a process group:
+	 * - SIGHUP: usually generated by the kernel, when termination of a
+	 *   process causes that process group to become orphaned
+	 * - SIGINT: usually generated by the terminal */
+	sa.sa_handler = forwardsignaltogroup;
+	sa.sa_flags = SA_RESTART;
+	if (sigemptyset(&sa.sa_mask) < 0)
+		return -1;
+	if (sigaction(SIGHUP, &sa, NULL) < 0)
+		return -1;
+	if (sigaction(SIGINT, &sa, NULL) < 0)
+		return -1;
+
+	/* terminate frontend by double SIGTERM in case of server freeze */
+	sa.sa_handler = forwardsignal;
+	sa.sa_flags |= SA_RESETHAND;
+	if (sigaction(SIGTERM, &sa, NULL) < 0)
+		return -1;
+
+	/* notify the worker about window resize events */
+	sa.sa_flags = SA_RESTART;
+	if (sigaction(SIGWINCH, &sa, NULL) < 0)
+		return -1;
+	/* forward user-defined signals */
+	if (sigaction(SIGUSR1, &sa, NULL) < 0)
+		return -1;
+	if (sigaction(SIGUSR2, &sa, NULL) < 0)
+		return -1;
+	/* propagate job control requests to worker */
+	sa.sa_handler = forwardsignal;
+	sa.sa_flags = SA_RESTART;
+	if (sigaction(SIGCONT, &sa, NULL) < 0)
+		return -1;
+	sa.sa_handler = handlestopsignal;
+	sa.sa_flags = SA_RESTART;
+	if (sigaction(SIGTSTP, &sa, NULL) < 0)
+		return -1;
+
+	return 0;
+}
+
+/*
+ * Restores signal handlers to the default, and masks SIGINT.
+ *
+ * Returns 0 on success, -1 on error and errno is set appropriately.
+ */
+int restoresignalhandler(void)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = SIG_DFL;
+	sa.sa_flags = SA_RESTART;
+	if (sigemptyset(&sa.sa_mask) < 0)
+		return -1;
+
+	if (sigaction(SIGHUP, &sa, NULL) < 0)
+		return -1;
+	if (sigaction(SIGTERM, &sa, NULL) < 0)
+		return -1;
+	if (sigaction(SIGWINCH, &sa, NULL) < 0)
+		return -1;
+	if (sigaction(SIGCONT, &sa, NULL) < 0)
+		return -1;
+	if (sigaction(SIGTSTP, &sa, NULL) < 0)
+		return -1;
+
+	/* ignore Ctrl+C while shutting down to make pager exits cleanly */
+	sa.sa_handler = SIG_IGN;
+	if (sigaction(SIGINT, &sa, NULL) < 0)
+		return -1;
+
+	peerpid = 0;
+	return 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/chg/src/uihandler.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,87 @@
+// Copyright 2018 Yuya Nishihara <yuya@tcha.org>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use futures::Future;
+use futures::future::IntoFuture;
+use std::io;
+use std::os::unix::io::AsRawFd;
+use std::os::unix::process::ExitStatusExt;
+use std::process::{Command, Stdio};
+use tokio;
+use tokio_process::{ChildStdin, CommandExt};
+
+use super::message::CommandSpec;
+use super::procutil;
+
+/// Callback to process shell command requests received from server.
+pub trait SystemHandler: Sized {
+    type PagerStdin: AsRawFd;
+    type SpawnPagerResult: IntoFuture<Item = (Self, Self::PagerStdin), Error = io::Error>;
+    type RunSystemResult: IntoFuture<Item = (Self, i32), Error = io::Error>;
+
+    /// Handles pager command request.
+    ///
+    /// Returns the pipe to be attached to the server if the pager is spawned.
+    fn spawn_pager(self, spec: CommandSpec) -> Self::SpawnPagerResult;
+
+    /// Handles system command request.
+    ///
+    /// Returns command exit code (positive) or signal number (negative).
+    fn run_system(self, spec: CommandSpec) -> Self::RunSystemResult;
+}
+
+/// Default cHg implementation to process requests received from server.
+pub struct ChgUiHandler {
+}
+
+impl ChgUiHandler {
+    pub fn new() -> ChgUiHandler {
+        ChgUiHandler {}
+    }
+}
+
+impl SystemHandler for ChgUiHandler {
+    type PagerStdin = ChildStdin;
+    type SpawnPagerResult = io::Result<(Self, Self::PagerStdin)>;
+    type RunSystemResult = Box<dyn Future<Item = (Self, i32), Error = io::Error> + Send>;
+
+    fn spawn_pager(self, spec: CommandSpec) -> Self::SpawnPagerResult {
+        let mut pager = new_shell_command(&spec)
+            .stdin(Stdio::piped())
+            .spawn_async()?;
+        let pin = pager.stdin().take().unwrap();
+        procutil::set_blocking_fd(pin.as_raw_fd())?;
+        // TODO: if pager exits, notify the server with SIGPIPE immediately.
+        // otherwise the server won't get SIGPIPE if it does not write
+        // anything. (issue5278)
+        // kill(peerpid, SIGPIPE);
+        tokio::spawn(pager.map(|_| ()).map_err(|_| ()));  // just ignore errors
+        Ok((self, pin))
+    }
+
+    fn run_system(self, spec: CommandSpec) -> Self::RunSystemResult {
+        let fut = new_shell_command(&spec)
+            .spawn_async()
+            .into_future()
+            .flatten()
+            .map(|status| {
+                let code = status.code().or_else(|| status.signal().map(|n| -n))
+                    .expect("either exit code or signal should be set");
+                (self, code)
+            });
+        Box::new(fut)
+    }
+}
+
+fn new_shell_command(spec: &CommandSpec) -> Command {
+    let mut builder = Command::new("/bin/sh");
+    builder
+        .arg("-c")
+        .arg(&spec.command)
+        .current_dir(&spec.current_dir)
+        .env_clear()
+        .envs(spec.envs.iter().cloned());
+    builder
+ }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/Cargo.toml	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,8 @@
+[package]
+name = "hg-core"
+version = "0.1.0"
+authors = ["Georges Racinet <gracinet@anybox.fr>"]
+description = "Mercurial pure Rust core library, with no assumption on Python bindings (FFI)"
+
+[lib]
+name = "hg"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/rustfmt.toml	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,3 @@
+max_width = 79
+wrap_comments = true
+error_on_line_overflow = true
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/ancestors.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,270 @@
+// ancestors.rs
+//
+// Copyright 2018 Georges Racinet <gracinet@anybox.fr>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Rust versions of generic DAG ancestors algorithms for Mercurial
+
+use super::{Graph, GraphError, Revision, NULL_REVISION};
+use std::collections::{BinaryHeap, HashSet};
+
+/// Iterator over the ancestors of a given list of revisions
+/// This is a generic type, defined and implemented for any Graph, so that
+/// it's easy to
+///
+/// - unit test in pure Rust
+/// - bind to main Mercurial code, potentially in several ways and have these
+///   bindings evolve over time
+pub struct AncestorsIterator<G: Graph> {
+    graph: G,
+    visit: BinaryHeap<Revision>,
+    seen: HashSet<Revision>,
+    stoprev: Revision,
+}
+
+impl<G: Graph> AncestorsIterator<G> {
+    /// Constructor.
+    ///
+    /// if `inclusive` is true, then the init revisions are emitted in
+    /// particular, otherwise iteration starts from their parents.
+    pub fn new<I>(
+        graph: G,
+        initrevs: I,
+        stoprev: Revision,
+        inclusive: bool,
+    ) -> Result<Self, GraphError>
+    where
+        I: IntoIterator<Item = Revision>,
+    {
+        let filtered_initrevs = initrevs.into_iter().filter(|&r| r >= stoprev);
+        if inclusive {
+            let visit: BinaryHeap<Revision> = filtered_initrevs.collect();
+            let seen = visit.iter().map(|&x| x).collect();
+            return Ok(AncestorsIterator {
+                visit: visit,
+                seen: seen,
+                stoprev: stoprev,
+                graph: graph,
+            });
+        }
+        let mut this = AncestorsIterator {
+            visit: BinaryHeap::new(),
+            seen: HashSet::new(),
+            stoprev: stoprev,
+            graph: graph,
+        };
+        this.seen.insert(NULL_REVISION);
+        for rev in filtered_initrevs {
+            this.conditionally_push_parents(rev)?;
+        }
+        Ok(this)
+    }
+
+    #[inline]
+    fn conditionally_push_rev(&mut self, rev: Revision) {
+        if self.stoprev <= rev && !self.seen.contains(&rev) {
+            self.seen.insert(rev);
+            self.visit.push(rev);
+        }
+    }
+
+    #[inline]
+    fn conditionally_push_parents(
+        &mut self,
+        rev: Revision,
+    ) -> Result<(), GraphError> {
+        let parents = self.graph.parents(rev)?;
+        self.conditionally_push_rev(parents.0);
+        self.conditionally_push_rev(parents.1);
+        Ok(())
+    }
+
+    /// Consumes partially the iterator to tell if the given target
+    /// revision
+    /// is in the ancestors it emits.
+    /// This is meant for iterators actually dedicated to that kind of
+    /// purpose
+    pub fn contains(&mut self, target: Revision) -> bool {
+        if self.seen.contains(&target) && target != NULL_REVISION {
+            return true;
+        }
+        for rev in self {
+            if rev == target {
+                return true;
+            }
+            if rev < target {
+                return false;
+            }
+        }
+        false
+    }
+}
+
+/// Main implementation.
+///
+/// The algorithm is the same as in `_lazyancestorsiter()` from `ancestors.py`
+/// with a few non crucial differences:
+///
+/// - there's no filtering of invalid parent revisions. Actually, it should be
+///   consistent and more efficient to filter them from the end caller.
+/// - we don't use the equivalent of `heapq.heapreplace()`, but we should, for
+///   the same reasons (using `peek_mut`)
+/// - we don't have the optimization for adjacent revs (case where p1 == rev-1)
+/// - we save a few pushes by comparing with `stoprev` before pushing
+///
+/// Error treatment:
+/// We swallow the possible GraphError of conditionally_push_parents() to
+/// respect the Iterator trait in a simple manner: never emitting parents
+/// for the returned revision. We finds this good enough for now, because:
+///
+/// - there's a good chance that invalid revisionss are fed from the start,
+///   and `new()` doesn't swallow the error result.
+/// - this is probably what the Python implementation produces anyway, due
+///   to filtering at each step, and Python code is currently the only
+///   concrete caller we target, so we shouldn't need a finer error treatment
+///   for the time being.
+impl<G: Graph> Iterator for AncestorsIterator<G> {
+    type Item = Revision;
+
+    fn next(&mut self) -> Option<Revision> {
+        let current = match self.visit.pop() {
+            None => {
+                return None;
+            }
+            Some(i) => i,
+        };
+        self.conditionally_push_parents(current).unwrap_or(());
+        Some(current)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+
+    use super::*;
+
+    #[derive(Clone, Debug)]
+    struct Stub;
+
+    /// This is the same as the dict from test-ancestors.py
+    impl Graph for Stub {
+        fn parents(
+            &self,
+            rev: Revision,
+        ) -> Result<(Revision, Revision), GraphError> {
+            match rev {
+                0 => Ok((-1, -1)),
+                1 => Ok((0, -1)),
+                2 => Ok((1, -1)),
+                3 => Ok((1, -1)),
+                4 => Ok((2, -1)),
+                5 => Ok((4, -1)),
+                6 => Ok((4, -1)),
+                7 => Ok((4, -1)),
+                8 => Ok((-1, -1)),
+                9 => Ok((6, 7)),
+                10 => Ok((5, -1)),
+                11 => Ok((3, 7)),
+                12 => Ok((9, -1)),
+                13 => Ok((8, -1)),
+                r => Err(GraphError::ParentOutOfRange(r)),
+            }
+        }
+    }
+
+    fn list_ancestors<G: Graph>(
+        graph: G,
+        initrevs: Vec<Revision>,
+        stoprev: Revision,
+        inclusive: bool,
+    ) -> Vec<Revision> {
+        AncestorsIterator::new(graph, initrevs, stoprev, inclusive)
+            .unwrap()
+            .collect()
+    }
+
+    #[test]
+    /// Same tests as test-ancestor.py, without membership
+    /// (see also test-ancestor.py.out)
+    fn test_list_ancestor() {
+        assert_eq!(list_ancestors(Stub, vec![], 0, false), vec![]);
+        assert_eq!(
+            list_ancestors(Stub, vec![11, 13], 0, false),
+            vec![8, 7, 4, 3, 2, 1, 0]
+        );
+        assert_eq!(list_ancestors(Stub, vec![1, 3], 0, false), vec![1, 0]);
+        assert_eq!(
+            list_ancestors(Stub, vec![11, 13], 0, true),
+            vec![13, 11, 8, 7, 4, 3, 2, 1, 0]
+        );
+        assert_eq!(list_ancestors(Stub, vec![11, 13], 6, false), vec![8, 7]);
+        assert_eq!(
+            list_ancestors(Stub, vec![11, 13], 6, true),
+            vec![13, 11, 8, 7]
+        );
+        assert_eq!(list_ancestors(Stub, vec![11, 13], 11, true), vec![13, 11]);
+        assert_eq!(list_ancestors(Stub, vec![11, 13], 12, true), vec![13]);
+        assert_eq!(
+            list_ancestors(Stub, vec![10, 1], 0, true),
+            vec![10, 5, 4, 2, 1, 0]
+        );
+    }
+
+    #[test]
+    /// Corner case that's not directly in test-ancestors.py, but
+    /// that happens quite often, as demonstrated by running the whole
+    /// suite.
+    /// For instance, run tests/test-obsolete-checkheads.t
+    fn test_nullrev_input() {
+        let mut iter =
+            AncestorsIterator::new(Stub, vec![-1], 0, false).unwrap();
+        assert_eq!(iter.next(), None)
+    }
+
+    #[test]
+    fn test_contains() {
+        let mut lazy =
+            AncestorsIterator::new(Stub, vec![10, 1], 0, true).unwrap();
+        assert!(lazy.contains(1));
+        assert!(!lazy.contains(3));
+
+        let mut lazy =
+            AncestorsIterator::new(Stub, vec![0], 0, false).unwrap();
+        assert!(!lazy.contains(NULL_REVISION));
+    }
+
+    /// A corrupted Graph, supporting error handling tests
+    struct Corrupted;
+
+    impl Graph for Corrupted {
+        fn parents(
+            &self,
+            rev: Revision,
+        ) -> Result<(Revision, Revision), GraphError> {
+            match rev {
+                1 => Ok((0, -1)),
+                r => Err(GraphError::ParentOutOfRange(r)),
+            }
+        }
+    }
+
+    #[test]
+    fn test_initrev_out_of_range() {
+        // inclusive=false looks up initrev's parents right away
+        match AncestorsIterator::new(Stub, vec![25], 0, false) {
+            Ok(_) => panic!("Should have been ParentOutOfRange"),
+            Err(e) => assert_eq!(e, GraphError::ParentOutOfRange(25)),
+        }
+    }
+
+    #[test]
+    fn test_next_out_of_range() {
+        // inclusive=false looks up initrev's parents right away
+        let mut iter =
+            AncestorsIterator::new(Corrupted, vec![1], 0, false).unwrap();
+        assert_eq!(iter.next(), Some(0));
+        assert_eq!(iter.next(), None);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/lib.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,24 @@
+// Copyright 2018 Georges Racinet <gracinet@anybox.fr>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+mod ancestors;
+pub use ancestors::AncestorsIterator;
+
+/// Mercurial revision numbers
+///
+/// As noted in revlog.c, revision numbers are actually encoded in
+/// 4 bytes, and are liberally converted to ints, whence the i32
+pub type Revision = i32;
+
+pub const NULL_REVISION: Revision = -1;
+
+/// The simplest expression of what we need of Mercurial DAGs.
+pub trait Graph {
+    fn parents(&self, Revision) -> Result<(Revision, Revision), GraphError>;
+}
+
+#[derive(Clone, Debug, PartialEq)]
+pub enum GraphError {
+    ParentOutOfRange(Revision),
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-direct-ffi/Cargo.toml	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,12 @@
+[package]
+name = "hgdirectffi"
+version = "0.1.0"
+authors = ["Georges Racinet <gracinet@anybox.fr>"]
+description = "Low level Python bindings for hg-core, going through existing C extensions"
+
+[dependencies]
+libc = "*"
+hg-core = { path = "../hg-core" }
+
+[lib]
+crate-type = ["staticlib"]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-direct-ffi/rustfmt.toml	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,3 @@
+max_width = 79
+wrap_comments = true
+error_on_line_overflow = true
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-direct-ffi/src/ancestors.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,264 @@
+// Copyright 2018 Georges Racinet <gracinet@anybox.fr>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Bindings for CPython extension code
+//!
+//! This exposes methods to build and use a `rustlazyancestors` iterator
+//! from C code, using an index and its parents function that are passed
+//! from the caller at instantiation.
+
+use hg::AncestorsIterator;
+use hg::{Graph, GraphError, Revision, NULL_REVISION};
+use libc::{c_int, c_long, c_void, ssize_t};
+use std::ptr::null_mut;
+use std::slice;
+
+type IndexPtr = *mut c_void;
+type IndexParentsFn =
+    unsafe extern "C" fn(index: IndexPtr, rev: ssize_t, ps: *mut [c_int; 2], max_rev: c_int)
+        -> c_int;
+
+/// A Graph backed up by objects and functions from revlog.c
+///
+/// This implementation of the Graph trait, relies on (pointers to)
+/// - the C index object (`index` member)
+/// - the `index_get_parents()` function (`parents` member)
+pub struct Index {
+    index: IndexPtr,
+    parents: IndexParentsFn,
+}
+
+impl Index {
+    pub fn new(index: IndexPtr, parents: IndexParentsFn) -> Self {
+        Index {
+            index: index,
+            parents: parents,
+        }
+    }
+}
+
+impl Graph for Index {
+    /// wrap a call to the C extern parents function
+    fn parents(&self, rev: Revision) -> Result<(Revision, Revision), GraphError> {
+        let mut res: [c_int; 2] = [0; 2];
+        let code =
+            unsafe { (self.parents)(self.index, rev as ssize_t, &mut res as *mut [c_int; 2], rev) };
+        match code {
+            0 => Ok((res[0], res[1])),
+            _ => Err(GraphError::ParentOutOfRange(rev)),
+        }
+    }
+}
+
+/// Wrapping of AncestorsIterator<Index> constructor, for C callers.
+///
+/// Besides `initrevs`, `stoprev` and `inclusive`, that are converted
+/// we receive the index and the parents function as pointers
+#[no_mangle]
+pub extern "C" fn rustlazyancestors_init(
+    index: IndexPtr,
+    parents: IndexParentsFn,
+    initrevslen: usize,
+    initrevs: *mut c_long,
+    stoprev: c_long,
+    inclusive: c_int,
+) -> *mut AncestorsIterator<Index> {
+    unsafe {
+        raw_init(
+            Index::new(index, parents),
+            initrevslen,
+            initrevs,
+            stoprev,
+            inclusive,
+        )
+    }
+}
+
+/// Testable (for any Graph) version of rustlazyancestors_init
+#[inline]
+unsafe fn raw_init<G: Graph>(
+    graph: G,
+    initrevslen: usize,
+    initrevs: *mut c_long,
+    stoprev: c_long,
+    inclusive: c_int,
+) -> *mut AncestorsIterator<G> {
+    let inclb = match inclusive {
+        0 => false,
+        1 => true,
+        _ => {
+            return null_mut();
+        }
+    };
+
+    let slice = slice::from_raw_parts(initrevs, initrevslen);
+
+    Box::into_raw(Box::new(match AncestorsIterator::new(
+        graph,
+        slice.into_iter().map(|&r| r as Revision),
+        stoprev as Revision,
+        inclb,
+    ) {
+        Ok(it) => it,
+        Err(_) => {
+            return null_mut();
+        }
+    }))
+}
+
+/// Deallocator to be called from C code
+#[no_mangle]
+pub extern "C" fn rustlazyancestors_drop(raw_iter: *mut AncestorsIterator<Index>) {
+    raw_drop(raw_iter);
+}
+
+/// Testable (for any Graph) version of rustlazayancestors_drop
+#[inline]
+fn raw_drop<G: Graph>(raw_iter: *mut AncestorsIterator<G>) {
+    unsafe {
+        Box::from_raw(raw_iter);
+    }
+}
+
+/// Iteration main method to be called from C code
+///
+/// We convert the end of iteration into NULL_REVISION,
+/// it will be up to the C wrapper to convert that back into a Python end of
+/// iteration
+#[no_mangle]
+pub extern "C" fn rustlazyancestors_next(raw: *mut AncestorsIterator<Index>) -> c_long {
+    raw_next(raw)
+}
+
+/// Testable (for any Graph) version of rustlazayancestors_next
+#[inline]
+fn raw_next<G: Graph>(raw: *mut AncestorsIterator<G>) -> c_long {
+    let as_ref = unsafe { &mut *raw };
+    as_ref.next().unwrap_or(NULL_REVISION) as c_long
+}
+
+#[no_mangle]
+pub extern "C" fn rustlazyancestors_contains(
+    raw: *mut AncestorsIterator<Index>,
+    target: c_long,
+) -> c_int {
+    raw_contains(raw, target)
+}
+
+/// Testable (for any Graph) version of rustlazayancestors_next
+#[inline]
+fn raw_contains<G: Graph>(
+    raw: *mut AncestorsIterator<G>,
+    target: c_long,
+) -> c_int {
+    let as_ref = unsafe { &mut *raw };
+    if as_ref.contains(target as Revision) {
+        return 1;
+    }
+    0
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use std::thread;
+
+    #[derive(Clone, Debug)]
+    struct Stub;
+
+    impl Graph for Stub {
+        fn parents(&self, r: Revision) -> Result<(Revision, Revision), GraphError> {
+            match r {
+                25 => Err(GraphError::ParentOutOfRange(25)),
+                _ => Ok((1, 2)),
+            }
+        }
+    }
+
+    /// Helper for test_init_next()
+    fn stub_raw_init(
+        initrevslen: usize,
+        initrevs: usize,
+        stoprev: c_long,
+        inclusive: c_int,
+    ) -> usize {
+        unsafe {
+            raw_init(
+                Stub,
+                initrevslen,
+                initrevs as *mut c_long,
+                stoprev,
+                inclusive,
+            ) as usize
+        }
+    }
+
+    fn stub_raw_init_from_vec(
+        mut initrevs: Vec<c_long>,
+        stoprev: c_long,
+        inclusive: c_int,
+    ) -> *mut AncestorsIterator<Stub> {
+        unsafe {
+            raw_init(
+                Stub,
+                initrevs.len(),
+                initrevs.as_mut_ptr(),
+                stoprev,
+                inclusive,
+            )
+        }
+    }
+
+    #[test]
+    // Test what happens when we init an Iterator as with the exposed C ABI
+    // and try to use it afterwards
+    // We spawn new threads, in order to make memory consistency harder
+    // but this forces us to convert the pointers into shareable usizes.
+    fn test_init_next() {
+        let mut initrevs: Vec<c_long> = vec![11, 13];
+        let initrevs_len = initrevs.len();
+        let initrevs_ptr = initrevs.as_mut_ptr() as usize;
+        let handler = thread::spawn(move || stub_raw_init(initrevs_len, initrevs_ptr, 0, 1));
+        let raw = handler.join().unwrap() as *mut AncestorsIterator<Stub>;
+
+        assert_eq!(raw_next(raw), 13);
+        assert_eq!(raw_next(raw), 11);
+        assert_eq!(raw_next(raw), 2);
+        assert_eq!(raw_next(raw), 1);
+        assert_eq!(raw_next(raw), NULL_REVISION as c_long);
+        raw_drop(raw);
+    }
+
+    #[test]
+    fn test_init_wrong_bool() {
+        assert_eq!(stub_raw_init_from_vec(vec![11, 13], 0, 2), null_mut());
+    }
+
+    #[test]
+    fn test_empty() {
+        let raw = stub_raw_init_from_vec(vec![], 0, 1);
+        assert_eq!(raw_next(raw), NULL_REVISION as c_long);
+        raw_drop(raw);
+    }
+
+    #[test]
+    fn test_init_err_out_of_range() {
+        assert!(stub_raw_init_from_vec(vec![25], 0, 0).is_null());
+    }
+
+    #[test]
+    fn test_contains() {
+        let raw = stub_raw_init_from_vec(vec![5, 6], 0, 1);
+        assert_eq!(raw_contains(raw, 5), 1);
+        assert_eq!(raw_contains(raw, 2), 1);
+    }
+
+    #[test]
+    fn test_contains_exclusive() {
+        let raw = stub_raw_init_from_vec(vec![5, 6], 0, 0);
+        assert_eq!(raw_contains(raw, 5), 0);
+        assert_eq!(raw_contains(raw, 2), 1);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-direct-ffi/src/lib.rs	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,19 @@
+// Copyright 2018 Georges Racinet <gracinet@anybox.fr>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Bindings for CPython extension code
+//!
+//! This exposes methods to build and use a `rustlazyancestors` iterator
+//! from C code, using an index and its parents function that are passed
+//! from the caller at instantiation.
+
+extern crate hg;
+extern crate libc;
+
+mod ancestors;
+pub use ancestors::{
+    rustlazyancestors_contains, rustlazyancestors_drop,
+    rustlazyancestors_init, rustlazyancestors_next,
+};
--- a/setup.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/setup.py	Mon Oct 22 14:46:06 2018 -0400
@@ -11,6 +11,9 @@
     # Mercurial will never work on Python 3 before 3.5 due to a lack
     # of % formatting on bytestrings, and can't work on 3.6.0 or 3.6.1
     # due to a bug in % formatting in bytestrings.
+    # We cannot support Python 3.5.0, 3.5.1, 3.5.2 because of bug in
+    # codecs.escape_encode() where it raises SystemError on empty bytestring
+    # bug link: https://bugs.python.org/issue25270
     #
     # TODO: when we actually work on Python 3, use this string as the
     # actual supportedpy string.
@@ -21,6 +24,9 @@
         '!=3.2.*',
         '!=3.3.*',
         '!=3.4.*',
+        '!=3.5.0',
+        '!=3.5.1',
+        '!=3.5.2',
         '!=3.6.0',
         '!=3.6.1',
     ])
@@ -126,6 +132,8 @@
 
 ispypy = "PyPy" in sys.version
 
+iswithrustextensions = 'HGWITHRUSTEXT' in os.environ
+
 import ctypes
 import stat, subprocess, time
 import re
@@ -256,7 +264,9 @@
                and not e.startswith(b'warning: Not importing')
                and not e.startswith(b'obsolete feature not enabled')
                and not e.startswith(b'*** failed to import extension')
-               and not e.startswith(b'devel-warn:'))]
+               and not e.startswith(b'devel-warn:')
+               and not (e.startswith(b'(third party extension')
+                        and e.endswith(b'or newer of Mercurial; disabling)')))]
     return b'\n'.join(b'  ' + e for e in err)
 
 def findhg():
@@ -452,6 +462,8 @@
         return build_ext.build_extensions(self)
 
     def build_extension(self, ext):
+        if isinstance(ext, RustExtension):
+            ext.rustbuild()
         try:
             build_ext.build_extension(self, ext)
         except CCompilerError:
@@ -604,7 +616,7 @@
 
         if filelen > 0 and filelen != size:
             dllbasename = os.path.basename(buf.value)
-            if not dllbasename.lower().endswith('.dll'):
+            if not dllbasename.lower().endswith(b'.dll'):
                 raise SystemExit('Python DLL does not end with .dll: %s' %
                                  dllbasename)
             pythonlib = dllbasename[:-4]
@@ -617,10 +629,16 @@
 
         log.info('using %s as Python library name' % pythonlib)
         with open('mercurial/hgpythonlib.h', 'wb') as f:
-            f.write('/* this file is autogenerated by setup.py */\n')
-            f.write('#define HGPYTHONLIB "%s"\n' % pythonlib)
+            f.write(b'/* this file is autogenerated by setup.py */\n')
+            f.write(b'#define HGPYTHONLIB "%s"\n' % pythonlib)
+
+        macros = None
+        if sys.version_info[0] >= 3:
+            macros = [('_UNICODE', None), ('UNICODE', None)]
+
         objects = self.compiler.compile(['mercurial/exewrapper.c'],
-                                         output_dir=self.build_temp)
+                                         output_dir=self.build_temp,
+                                         macros=macros)
         dir = os.path.dirname(self.get_ext_fullpath('dummy'))
         self.hgtarget = os.path.join(dir, 'hg')
         self.compiler.link_executable(objects, self.hgtarget,
@@ -812,18 +830,22 @@
             'mercurial.thirdparty.attr',
             'mercurial.thirdparty.cbor',
             'mercurial.thirdparty.cbor.cbor2',
-            'mercurial.thirdparty.concurrent',
-            'mercurial.thirdparty.concurrent.futures',
             'mercurial.thirdparty.zope',
             'mercurial.thirdparty.zope.interface',
             'mercurial.utils',
+            'mercurial.revlogutils',
+            'mercurial.testing',
             'hgext', 'hgext.convert', 'hgext.fsmonitor',
+            'hgext.fastannotate',
             'hgext.fsmonitor.pywatchman',
             'hgext.infinitepush',
             'hgext.highlight',
             'hgext.largefiles', 'hgext.lfs', 'hgext.narrow',
             'hgext.zeroconf', 'hgext3rd',
             'hgdemandimport']
+if sys.version_info[0] == 2:
+    packages.extend(['mercurial.thirdparty.concurrent',
+                     'mercurial.thirdparty.concurrent.futures'])
 
 common_depends = ['mercurial/bitmanipulation.h',
                   'mercurial/compat.h',
@@ -872,6 +894,54 @@
     'mercurial/thirdparty/xdiff/xutils.h',
 ]
 
+class RustExtension(Extension):
+    """A C Extension, conditionnally enhanced with Rust code.
+
+    if iswithrustextensions is False, does nothing else than plain Extension
+    """
+
+    rusttargetdir = os.path.join('rust', 'target', 'release')
+
+    def __init__(self, mpath, sources, rustlibname, subcrate, **kw):
+        Extension.__init__(self, mpath, sources, **kw)
+        if not iswithrustextensions:
+            return
+        srcdir = self.rustsrcdir = os.path.join('rust', subcrate)
+        self.libraries.append(rustlibname)
+        self.extra_compile_args.append('-DWITH_RUST')
+
+        # adding Rust source and control files to depends so that the extension
+        # gets rebuilt if they've changed
+        self.depends.append(os.path.join(srcdir, 'Cargo.toml'))
+        cargo_lock = os.path.join(srcdir, 'Cargo.lock')
+        if os.path.exists(cargo_lock):
+            self.depends.append(cargo_lock)
+        for dirpath, subdir, fnames in os.walk(os.path.join(srcdir, 'src')):
+            self.depends.extend(os.path.join(dirpath, fname)
+                                for fname in fnames
+                                if os.path.splitext(fname)[1] == '.rs')
+
+    def rustbuild(self):
+        if not iswithrustextensions:
+            return
+        env = os.environ.copy()
+        if 'HGTEST_RESTOREENV' in env:
+            # Mercurial tests change HOME to a temporary directory,
+            # but, if installed with rustup, the Rust toolchain needs
+            # HOME to be correct (otherwise the 'no default toolchain'
+            # error message is issued and the build fails).
+            # This happens currently with test-hghave.t, which does
+            # invoke this build.
+
+            # Unix only fix (os.path.expanduser not really reliable if
+            # HOME is shadowed like this)
+            import pwd
+            env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir
+
+        subprocess.check_call(['cargo', 'build', '-vv', '--release'],
+                              env=env, cwd=self.rustsrcdir)
+        self.library_dirs.append(self.rusttargetdir)
+
 extmodules = [
     Extension('mercurial.cext.base85', ['mercurial/cext/base85.c'],
               include_dirs=common_include_dirs,
@@ -884,14 +954,19 @@
                                         'mercurial/cext/mpatch.c'],
               include_dirs=common_include_dirs,
               depends=common_depends),
-    Extension('mercurial.cext.parsers', ['mercurial/cext/charencode.c',
-                                         'mercurial/cext/dirs.c',
-                                         'mercurial/cext/manifest.c',
-                                         'mercurial/cext/parsers.c',
-                                         'mercurial/cext/pathencode.c',
-                                         'mercurial/cext/revlog.c'],
-              include_dirs=common_include_dirs,
-              depends=common_depends + ['mercurial/cext/charencode.h']),
+    RustExtension('mercurial.cext.parsers', ['mercurial/cext/charencode.c',
+                                             'mercurial/cext/dirs.c',
+                                             'mercurial/cext/manifest.c',
+                                             'mercurial/cext/parsers.c',
+                                             'mercurial/cext/pathencode.c',
+                                             'mercurial/cext/revlog.c'],
+                  'hgdirectffi',
+                  'hg-direct-ffi',
+                  include_dirs=common_include_dirs,
+                  depends=common_depends + ['mercurial/cext/charencode.h',
+                                            'mercurial/rust/src/lib.rs',
+                                            'mercurial/rust/src/ancestors.rs',
+                                            'mercurial/rust/src/cpython.rs']),
     Extension('mercurial.cext.osutil', ['mercurial/cext/osutil.c'],
               include_dirs=common_include_dirs,
               extra_compile_args=osutil_cflags,
@@ -907,7 +982,9 @@
 
 sys.path.insert(0, 'contrib/python-zstandard')
 import setup_zstd
-extmodules.append(setup_zstd.get_c_extension(name='mercurial.zstd'))
+extmodules.append(setup_zstd.get_c_extension(
+    name='mercurial.zstd',
+    root=os.path.abspath(os.path.dirname(__file__))))
 
 try:
     from distutils import cygwinccompiler
@@ -992,7 +1069,7 @@
 if os.name == 'nt':
     # Windows binary file versions for exe/dll files must have the
     # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535
-    setupversion = version.split('+', 1)[0]
+    setupversion = setupversion.split(r'+', 1)[0]
 
 if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
     version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[1].splitlines()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/artifacts/PURPOSE	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,9 @@
+This directory is meant to cache artifacts useful for tests (such as bundle).
+
+Those artifacts need to be cached because they are slow to regenerate on each
+test but too large to be tracked within the repository. They are not expected
+to change between each run and can be cached.
+
+The `./scripts/` contains code to generate the artifact while the `cache`
+directory contains resulting artifact.
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/artifacts/cache/big-file-churn.hg.md5	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,1 @@
+fe0d0bb5979de50f4fed71bb9437764d
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/artifacts/scripts/generate-churning-bundle.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+#
+# generate-branchy-bundle - generate a branch for a "large" branchy repository
+#
+# Copyright 2018 Octobus, contact@octobus.net
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+#
+# This script generates a repository suitable for testing delta computation
+# strategies.
+#
+# The repository update a single "large" file with many updates. One fixed part
+# of the files always get updated while the rest of the lines get updated over
+# time. This update happens over many topological branches, some getting merged
+# back.
+#
+# Running with `chg` in your path and `CHGHG` set is recommended for speed.
+
+from __future__ import absolute_import, print_function
+
+import hashlib
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+
+BUNDLE_NAME = 'big-file-churn.hg'
+
+# constants for generating the repository
+NB_CHANGESET = 5000
+PERIOD_MERGING = 8
+PERIOD_BRANCHING = 7
+MOVE_BACK_MIN = 3
+MOVE_BACK_RANGE = 5
+
+# constants for generating the large file we keep updating
+#
+# At each revision, the beginning on the file change,
+# and set of other lines changes too.
+FILENAME='SPARSE-REVLOG-TEST-FILE'
+NB_LINES = 10500
+ALWAYS_CHANGE_LINES = 500
+FILENAME = 'SPARSE-REVLOG-TEST-FILE'
+OTHER_CHANGES = 300
+
+def nextcontent(previous_content):
+    """utility to produce a new file content from the previous one"""
+    return hashlib.md5(previous_content).hexdigest()
+
+def filecontent(iteridx, oldcontent):
+    """generate a new file content
+
+    The content is generated according the iteration index and previous
+    content"""
+
+    # initial call
+    if iteridx is None:
+        current = ''
+    else:
+        current = str(iteridx)
+
+    for idx in xrange(NB_LINES):
+        do_change_line = True
+        if oldcontent is not None and ALWAYS_CHANGE_LINES < idx:
+            do_change_line = not ((idx - iteridx) % OTHER_CHANGES)
+
+        if do_change_line:
+            to_write = current + '\n'
+            current = nextcontent(current)
+        else:
+            to_write = oldcontent[idx]
+        yield to_write
+
+def updatefile(filename, idx):
+    """update <filename> to be at appropriate content for iteration <idx>"""
+    existing = None
+    if idx is not None:
+        with open(filename, 'rb') as old:
+            existing = old.readlines()
+    with open(filename, 'wb') as target:
+        for line in filecontent(idx, existing):
+            target.write(line)
+
+def hg(command, *args):
+    """call a mercurial command with appropriate config and argument"""
+    env = os.environ.copy()
+    if 'CHGHG' in env:
+        full_cmd = ['chg']
+    else:
+        full_cmd = ['hg']
+    full_cmd.append('--quiet')
+    full_cmd.append(command)
+    if command == 'commit':
+        # reproducible commit metadata
+        full_cmd.extend(['--date', '0 0', '--user', 'test'])
+    elif command == 'merge':
+        # avoid conflicts by picking the local variant
+        full_cmd.extend(['--tool', ':merge-local'])
+    full_cmd.extend(args)
+    env['HGRCPATH'] = ''
+    return subprocess.check_call(full_cmd, env=env)
+
+def run(target):
+    tmpdir = tempfile.mkdtemp(prefix='tmp-hg-test-big-file-bundle-')
+    try:
+        os.chdir(tmpdir)
+        hg('init')
+        updatefile(FILENAME, None)
+        hg('commit', '--addremove', '--message', 'initial commit')
+        for idx in xrange(1, NB_CHANGESET + 1):
+            if sys.stdout.isatty():
+                print("generating commit #%d/%d" % (idx, NB_CHANGESET))
+            if (idx % PERIOD_BRANCHING) == 0:
+                move_back = MOVE_BACK_MIN + (idx % MOVE_BACK_RANGE)
+                hg('update', ".~%d" % move_back)
+            if (idx % PERIOD_MERGING) == 0:
+                hg('merge', 'min(head())')
+            updatefile(FILENAME, idx)
+            hg('commit', '--message', 'commit #%d' % idx)
+        hg('bundle', '--all', target)
+        with open(target, 'rb') as bundle:
+            data = bundle.read()
+            digest = hashlib.md5(data).hexdigest()
+        with open(target + '.md5', 'wb') as md5file:
+            md5file.write(digest + '\n')
+        if sys.stdout.isatty():
+            print('bundle generated at "%s" md5: %s' % (target, digest))
+
+    finally:
+        shutil.rmtree(tmpdir)
+    return 0
+
+if __name__ == '__main__':
+    orig = os.path.realpath(os.path.dirname(sys.argv[0]))
+    target = os.path.join(orig, os.pardir, 'cache', BUNDLE_NAME)
+    sys.exit(run(target))
+
--- a/tests/badserverext.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/badserverext.py	Mon Oct 22 14:46:06 2018 -0400
@@ -238,10 +238,10 @@
             self._ui = ui
             super(badserver, self).__init__(ui, *args, **kwargs)
 
-            recvbytes = self._ui.config('badserver', 'closeafterrecvbytes')
+            recvbytes = self._ui.config(b'badserver', b'closeafterrecvbytes')
             recvbytes = recvbytes.split(',')
             self.closeafterrecvbytes = [int(v) for v in recvbytes if v]
-            sendbytes = self._ui.config('badserver', 'closeaftersendbytes')
+            sendbytes = self._ui.config(b'badserver', b'closeaftersendbytes')
             sendbytes = sendbytes.split(',')
             self.closeaftersendbytes = [int(v) for v in sendbytes if v]
 
@@ -261,7 +261,7 @@
 
         # Called to accept() a pending socket.
         def get_request(self):
-            if self._ui.configbool('badserver', 'closebeforeaccept'):
+            if self._ui.configbool(b'badserver', b'closebeforeaccept'):
                 self.socket.close()
 
                 # Tells the server to stop processing more requests.
--- a/tests/bruterebase.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/bruterebase.py	Mon Oct 22 14:46:06 2018 -0400
@@ -45,7 +45,7 @@
             subset = [rev for j, rev in enumerate(srevs) if i & (1 << j) != 0]
             spec = revsetlang.formatspec(b'%ld', subset)
             tr = repo.transaction(b'rebase')
-            tr.report = lambda x: 0 # hide "transaction abort"
+            tr._report = lambda x: 0 # hide "transaction abort"
 
             ui.pushbuffer()
             try:
--- a/tests/bzr-definitions	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/bzr-definitions	Mon Oct 22 14:46:06 2018 -0400
@@ -6,7 +6,7 @@
 
 glog()
 {
-    hg log -G --template '{rev}@{branch} "{desc|firstline}" files: {files}\n' "$@"
+    hg log -G --template '{rev}@{branch} "{desc|firstline}" files+: [{file_adds}], files-: [{file_dels}], files: [{file_mods}]\n' "$@"
 }
 
 manifest()
--- a/tests/common-pattern.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/common-pattern.py	Mon Oct 22 14:46:06 2018 -0400
@@ -81,7 +81,8 @@
      br'listkeys%0A'
      br'pushkey%0A'
      br'remote-changegroup%3Dhttp%2Chttps%0A'
-     br'rev-branch-cache',
+     br'rev-branch-cache%0A'
+     br'stream%3Dv2',
      # (replacement patterns)
      br'$USUAL_BUNDLE2_CAPS_NO_PHASES$'
     ),
--- a/tests/drawdag.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/drawdag.py	Mon Oct 22 14:46:06 2018 -0400
@@ -288,8 +288,7 @@
             'date': b'0 0',
             'extra': {b'branch': b'default'},
         }
-        super(simplecommitctx, self).__init__(self, name, **opts)
-        self._repo = repo
+        super(simplecommitctx, self).__init__(repo, name, **opts)
         self._added = added
         self._parents = parentctxs
         while len(self._parents) < 2:
--- a/tests/dummysmtpd.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/dummysmtpd.py	Mon Oct 22 14:46:06 2018 -0400
@@ -26,7 +26,7 @@
     def __init__(self, localaddr):
         smtpd.SMTPServer.__init__(self, localaddr, remoteaddr=None)
 
-    def process_message(self, peer, mailfrom, rcpttos, data):
+    def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
         log('%s from=%s to=%s\n' % (peer[0], mailfrom, ', '.join(rcpttos)))
 
     def handle_error(self):
--- a/tests/f	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/f	Mon Oct 22 14:46:06 2018 -0400
@@ -66,7 +66,7 @@
         elif islink:
             if opts.type:
                 facts.append(b'link')
-            content = os.readlink(f)
+            content = os.readlink(f).encode('utf8')
         elif isstdin:
             content = getattr(sys.stdin, 'buffer', sys.stdin).read()
             if opts.size:
@@ -88,9 +88,11 @@
             if opts.newer:
                 # mtime might be in whole seconds so newer file might be same
                 if stat.st_mtime >= os.stat(opts.newer).st_mtime:
-                    facts.append(b'newer than %s' % opts.newer)
+                    facts.append(b'newer than %s' % opts.newer.encode(
+                        'utf8', 'replace'))
                 else:
-                    facts.append(b'older than %s' % opts.newer)
+                    facts.append(b'older than %s' % opts.newer.encode(
+                        'utf8', 'replace'))
         if opts.md5 and content is not None:
             h = hashlib.md5(content)
             facts.append(b'md5=%s' % binascii.hexlify(h.digest())[:opts.bytes])
--- a/tests/fsmonitor-run-tests.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/fsmonitor-run-tests.py	Mon Oct 22 14:46:06 2018 -0400
@@ -123,6 +123,12 @@
         runtestsargv.extend([
             '--extra-config',
             'extensions.fsmonitor=',
+            # specify fsmonitor.mode=paranoid always in order to force
+            # fsmonitor extension execute "paranoid" code path
+            #
+            # TODO: make fsmonitor-run-tests.py accept specific options
+            '--extra-config',
+            'fsmonitor.mode=paranoid',
             '--blacklist',
             blacklist,
         ])
--- a/tests/get-with-headers.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/get-with-headers.py	Mon Oct 22 14:46:06 2018 -0400
@@ -11,6 +11,7 @@
 import sys
 
 from mercurial import (
+    pycompat,
     util,
 )
 
@@ -90,7 +91,7 @@
             data = json.loads(data)
             lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
             for line in lines:
-                bodyfh.write(line.rstrip())
+                bodyfh.write(pycompat.sysbytes(line.rstrip()))
                 bodyfh.write(b'\n')
         else:
             bodyfh.write(data)
--- a/tests/heredoctest.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/heredoctest.py	Mon Oct 22 14:46:06 2018 -0400
@@ -2,6 +2,10 @@
 
 import sys
 
+def flush():
+    sys.stdout.flush()
+    sys.stderr.flush()
+
 globalvars = {}
 lines = sys.stdin.readlines()
 while lines:
@@ -15,6 +19,9 @@
             snippet += l[4:]
         c = compile(snippet, '<heredoc>', 'single')
         try:
+            flush()
             exec(c, globalvars)
+            flush()
         except Exception as inst:
+            flush()
             print(repr(inst))
--- a/tests/hghave.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/hghave.py	Mon Oct 22 14:46:06 2018 -0400
@@ -16,6 +16,22 @@
     "false": (lambda: False, "nail clipper"),
 }
 
+if sys.version_info[0] >= 3:
+    def _bytespath(p):
+        if p is None:
+            return p
+        return p.encode('utf-8')
+
+    def _strpath(p):
+        if p is None:
+            return p
+        return p.decode('utf-8')
+else:
+    def _bytespath(p):
+        return p
+
+    _strpath = _bytespath
+
 def check(name, desc):
     """Registers a check function for a feature."""
     def decorator(func):
@@ -360,7 +376,7 @@
     os.close(fh)
     name = tempfile.mktemp(dir='.', prefix=tempprefix)
     try:
-        util.oslink(fn, name)
+        util.oslink(_bytespath(fn), _bytespath(name))
         os.unlink(name)
         return True
     except OSError:
@@ -625,23 +641,13 @@
     # chg disables demandimport intentionally for performance wins.
     return ((not has_chg()) and os.environ.get('HGDEMANDIMPORT') != 'disable')
 
-@check("py3k", "running with Python 3.x")
-def has_py3k():
+@check("py3", "running with Python 3.x")
+def has_py3():
     return 3 == sys.version_info[0]
 
 @check("py3exe", "a Python 3.x interpreter is available")
 def has_python3exe():
-    return 'PYTHON3' in os.environ
-
-@check("py3pygments", "Pygments available on Python 3.x")
-def has_py3pygments():
-    if has_py3k():
-        return has_pygments()
-    elif has_python3exe():
-        # just check exit status (ignoring output)
-        py3 = os.environ['PYTHON3']
-        return matchoutput('%s -c "import pygments"' % py3, br'')
-    return False
+    return matchoutput('python3 -V', br'^Python 3.(5|6|7|8|9)')
 
 @check("pure", "running with pure Python code")
 def has_pure():
@@ -780,3 +786,23 @@
 @check('repofncache', 'repository has an fncache')
 def has_repofncache():
     return 'fncache' in getrepofeatures()
+
+@check('sqlite', 'sqlite3 module is available')
+def has_sqlite():
+    try:
+        import sqlite3
+        sqlite3.sqlite_version
+    except ImportError:
+        return False
+
+    return matchoutput('sqlite3 -version', b'^3\.\d+')
+
+@check('vcr', 'vcr http mocking library')
+def has_vcr():
+    try:
+        import vcr
+        vcr.VCR
+        return True
+    except (ImportError, AttributeError):
+        pass
+    return False
--- a/tests/hgweberror.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/hgweberror.py	Mon Oct 22 14:46:06 2018 -0400
@@ -10,7 +10,7 @@
     '''Dummy web command that raises an uncaught Exception.'''
 
     # Simulate an error after partial response.
-    if 'partialresponse' in web.req.qsparams:
+    if b'partialresponse' in web.req.qsparams:
         web.res.status = b'200 Script output follows'
         web.res.headers[b'Content-Type'] = b'text/plain'
         web.res.setbodywillwrite()
@@ -21,4 +21,4 @@
 
 def extsetup(ui):
     setattr(webcommands, 'raiseerror', raiseerror)
-    webcommands.__all__.append('raiseerror')
+    webcommands.__all__.append(b'raiseerror')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/accept-4564.json	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,138 @@
+{
+    "version": 1, 
+    "interactions": [
+        {
+            "request": {
+                "body": "api.token=cli-hahayouwish&ids%5B0%5D=4564", 
+                "headers": {
+                    "content-length": [
+                        "58"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
+                    ]
+                }, 
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query"
+            }, 
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "headers": {
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F24j2baem5tmap4tvfdz7ufmca2lhm3wx4agyqv4w; expires=Thu, 14-Sep-2023 04:24:35 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "connection": [
+                        "close"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:24:35 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"4564\",\"phid\":\"PHID-DREV-6cgnf5fyeeqhntbxgfb7\",\"title\":\"localrepo: move some vfs initialization out of __init__\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D4564\",\"dateCreated\":\"1536856174\",\"dateModified\":\"1536856175\",\"authorPHID\":\"PHID-USER-p54bpwbifxx7sbgpx47d\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":[],\"branch\":null,\"summary\":\"In order to make repository types more dynamic, we'll need to move the\\nlogic for determining repository behavior out of\\nlocalrepository.__init__ so we can influence behavior before the type\\nis instantiated.\\n\\nThis commit starts that process by moving working directory and .hg\\/\\nvfs initialization to our new standalone function for instantiating\\nlocal repositories.\\n\\nAside from API changes, behavior should be fully backwards compatible.\\n\\n.. api::\\n\\n   localrepository.__init__ now does less work and accepts new args\\n\\n   Use ``hg.repository()``, ``localrepo.instance()``, or\\n   ``localrepo.makelocalrepository()`` to obtain a new local repository\\n   instance instead of calling the ``localrepository`` constructor\\n   directly.\",\"testPlan\":\"\",\"lineCount\":\"64\",\"activeDiffPHID\":\"PHID-DIFF-ir6bizkdou7fm7xhuo6v\",\"diffs\":[\"11002\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-gqp33hnxg65vkl3xioka\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\"}],\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "api.token=cli-hahayouwish&objectIdentifier=PHID-DREV-6cgnf5fyeeqhntbxgfb7&transactions%5B0%5D%5Btype%5D=accept&transactions%5B0%5D%5Bvalue%5D=true&transactions%5B1%5D%5Btype%5D=comment&transactions%5B1%5D%5Bvalue%5D=I+think+I+like+where+this+is+headed.+Will+read+rest+of+series+later.", 
+                "headers": {
+                    "content-length": [
+                        "301"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
+                    ]
+                }, 
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit"
+            }, 
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "headers": {
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fcna7xx3xon5xxyoasbveqlfz4fswd2risihw7dff; expires=Thu, 14-Sep-2023 04:24:36 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:24:36 GMT"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":\"4564\",\"phid\":\"PHID-DREV-6cgnf5fyeeqhntbxgfb7\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-nfqswjwwfuzdrhb\"},{\"phid\":\"PHID-XACT-DREV-oqb5pkqsdify6nm\"},{\"phid\":\"PHID-XACT-DREV-i6epvc7avyv3ve7\"},{\"phid\":\"PHID-XACT-DREV-du5hbg5rege3i5w\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }
+    ]
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabread-4480.json	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,200 @@
+{
+    "version": 1, 
+    "interactions": [
+        {
+            "response": {
+                "headers": {
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F6ypywsajmaqsclzrydncbnegfczzct2m5c4wovqw; expires=Thu, 14-Sep-2023 04:15:56 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:15:56 GMT"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"4480\",\"phid\":\"PHID-DREV-gsa7dkuimmam7nafw7h3\",\"title\":\"exchangev2: start to implement pull with wire protocol v2\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D4480\",\"dateCreated\":\"1536164431\",\"dateModified\":\"1536981352\",\"authorPHID\":\"PHID-USER-p54bpwbifxx7sbgpx47d\",\"status\":\"3\",\"statusName\":\"Closed\",\"properties\":{\"wasAcceptedBeforeClose\":false},\"branch\":null,\"summary\":\"Wire protocol version 2 will take a substantially different\\napproach to exchange than version 1 (at least as far as pulling\\nis concerned).\\n\\nThis commit establishes a new exchangev2 module for holding\\ncode related to exchange using wire protocol v2. I could have\\nadded things to the existing exchange module. But it is already\\nquite big. And doing things inline isn't in question because\\nthe existing code is already littered with conditional code\\nfor various states of support for the existing wire protocol\\nas it evolved over 10+ years. A new module gives us a chance\\nto make a clean break.\\n\\nThis approach does mean we'll end up writing some duplicate\\ncode. And there's a significant chance we'll miss functionality\\nas code is ported. The plan is to eventually add #testcase's\\nto existing tests so the new wire protocol is tested side-by-side\\nwith the existing one. This will hopefully tease out any\\nfeatures that weren't ported properly. But before we get there,\\nwe need to build up support for the new exchange methods.\\n\\nOur journey towards implementing a new exchange begins with pulling.\\nAnd pulling begins with discovery.\\n\\nThe discovery code added to exchangev2 is heavily drawn from\\nthe following functions:\\n\\n* exchange._pulldiscoverychangegroup\\n* discovery.findcommonincoming\\n\\nFor now, we build on top of existing discovery mechanisms. The\\nnew wire protocol should be capable of doing things more efficiently.\\nBut I'd rather defer on this problem.\\n\\nTo foster the transition, we invent a fake capability on the HTTPv2\\npeer and have the main pull code in exchange.py call into exchangev2\\nwhen the new wire protocol is being used.\",\"testPlan\":\"\",\"lineCount\":\"145\",\"activeDiffPHID\":\"PHID-DIFF-kg2rt6kiekgo5rgyeu5n\",\"diffs\":[\"11058\",\"10961\",\"10793\"],\"commits\":[\"PHID-CMIT-kvz2f3rczvi6exmvtyaq\"],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-cgcdlc6c3gpxapbmkwa2\",\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-a77jfv32jtxfwxngd6bd\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\"}],\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "58"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ]
+                }, 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "method": "POST", 
+                "body": "ids%5B0%5D=4480&api.token=cli-hahayouwish"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Floppdxhbjv46vg5mwnf2squrj4vgegsce5fwhhb6; expires=Thu, 14-Sep-2023 04:15:57 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:15:57 GMT"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"11058\":{\"id\":\"11058\",\"revisionID\":\"4480\",\"dateCreated\":\"1536771503\",\"dateModified\":\"1536981352\",\"sourceControlBaseRevision\":\"a5de21c9e3703f8e8eb064bd7d893ff2f703c66a\",\"sourceControlPath\":null,\"sourceControlSystem\":\"hg\",\"branch\":null,\"bookmark\":null,\"creationMethod\":\"commit\",\"description\":\"rHGa86d21e70b2b79d5e7e1085e5e755b4b26b8676d\",\"unitStatus\":\"6\",\"lintStatus\":\"6\",\"changes\":[{\"id\":\"24371\",\"metadata\":{\"line:first\":59},\"oldPath\":\"tests\\/wireprotohelpers.sh\",\"currentPath\":\"tests\\/wireprotohelpers.sh\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":[],\"type\":\"2\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"7\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"1\",\"newOffset\":\"1\",\"oldLength\":\"58\",\"newLength\":\"65\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\" HTTPV2=exp-http-v2-0001\\n MEDIATYPE=application\\/mercurial-exp-framing-0005\\n \\n sendhttpraw() {\\n   hg --verbose debugwireproto --peer raw http:\\/\\/$LOCALIP:$HGPORT\\/\\n }\\n \\n sendhttpv2peer() {\\n   hg --verbose debugwireproto --nologhandshake --peer http2 http:\\/\\/$LOCALIP:$HGPORT\\/\\n }\\n \\n sendhttpv2peerhandshake() {\\n   hg --verbose debugwireproto --peer http2 http:\\/\\/$LOCALIP:$HGPORT\\/\\n }\\n \\n cat \\u003e dummycommands.py \\u003c\\u003c EOF\\n from mercurial import (\\n     wireprototypes,\\n     wireprotov1server,\\n     wireprotov2server,\\n )\\n \\n @wireprotov1server.wireprotocommand(b'customreadonly', permission=b'pull')\\n def customreadonlyv1(repo, proto):\\n     return wireprototypes.bytesresponse(b'customreadonly bytes response')\\n \\n @wireprotov2server.wireprotocommand(b'customreadonly', permission=b'pull')\\n def customreadonlyv2(repo, proto):\\n     yield b'customreadonly bytes response'\\n \\n @wireprotov1server.wireprotocommand(b'customreadwrite', permission=b'push')\\n def customreadwrite(repo, proto):\\n     return wireprototypes.bytesresponse(b'customreadwrite bytes response')\\n \\n @wireprotov2server.wireprotocommand(b'customreadwrite', permission=b'push')\\n def customreadwritev2(repo, proto):\\n     yield b'customreadwrite bytes response'\\n EOF\\n \\n cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n [extensions]\\n drawdag = $TESTDIR\\/drawdag.py\\n EOF\\n \\n enabledummycommands() {\\n   cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n [extensions]\\n dummycommands = $TESTTMP\\/dummycommands.py\\n EOF\\n }\\n \\n enablehttpv2() {\\n   cat \\u003e\\u003e $1\\/.hg\\/hgrc \\u003c\\u003c EOF\\n [experimental]\\n web.apiserver = true\\n web.api.http-v2 = true\\n EOF\\n }\\n+\\n+enablehttpv2client() {\\n+  cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n+[experimental]\\n+httppeer.advertise-v2 = true\\n+EOF\\n+}\\n\"}]},{\"id\":\"24370\",\"metadata\":{\"line:first\":1},\"oldPath\":null,\"currentPath\":\"tests\\/test-wireproto-exchangev2.t\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"53\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"53\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+Tests for wire protocol version 2 exchange.\\n+Tests in this file should be folded into existing tests once protocol\\n+v2 has enough features that it can be enabled via #testcase in existing\\n+tests.\\n+\\n+  $ . $TESTDIR\\/wireprotohelpers.sh\\n+  $ enablehttpv2client\\n+\\n+  $ hg init server-simple\\n+  $ enablehttpv2 server-simple\\n+  $ cd server-simple\\n+  $ cat \\u003e\\u003e .hg\\/hgrc \\u003c\\u003c EOF\\n+  \\u003e [phases]\\n+  \\u003e publish = false\\n+  \\u003e EOF\\n+  $ echo a0 \\u003e a\\n+  $ echo b0 \\u003e b\\n+  $ hg -q commit -A -m 'commit 0'\\n+\\n+  $ echo a1 \\u003e a\\n+  $ hg commit -m 'commit 1'\\n+  $ hg phase --public -r .\\n+  $ echo a2 \\u003e a\\n+  $ hg commit -m 'commit 2'\\n+\\n+  $ hg -q up -r 0\\n+  $ echo b1 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 1'\\n+  $ echo b2 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 2'\\n+\\n+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log\\n+  $ cat hg.pid \\u003e $DAEMON_PIDS\\n+\\n+  $ cd ..\\n+\\n+Test basic clone\\n+\\n+  $ hg --debug clone -U http:\\/\\/localhost:$HGPORT client-simple\\n+  using http:\\/\\/localhost:$HGPORT\\/\\n+  sending capabilities command\\n+  query 1; heads\\n+  sending 2 commands\\n+  sending command heads: {}\\n+  sending command known: {\\n+    'nodes': []\\n+  }\\n+  received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)\\n+  received frame(size=43; request=1; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)\\n+  received frame(size=11; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=1; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)\\n\"}]},{\"id\":\"24369\",\"metadata\":{\"line:first\":805},\"oldPath\":\"mercurial\\/httppeer.py\",\"currentPath\":\"mercurial\\/httppeer.py\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":[],\"type\":\"2\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"2\",\"delLines\":\"1\",\"hunks\":[{\"oldOffset\":\"1\",\"newOffset\":\"1\",\"oldLength\":\"1006\",\"newLength\":\"1007\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\" # httppeer.py - HTTP repository proxy classes for mercurial\\n #\\n # Copyright 2005, 2006 Matt Mackall \\u003cmpm@selenic.com\\u003e\\n # Copyright 2006 Vadim Gelfer \\u003cvadim.gelfer@gmail.com\\u003e\\n #\\n # This software may be used and distributed according to the terms of the\\n # GNU General Public License version 2 or any later version.\\n \\n from __future__ import absolute_import\\n \\n import errno\\n import io\\n import os\\n import socket\\n import struct\\n import weakref\\n \\n from .i18n import _\\n from . import (\\n     bundle2,\\n     error,\\n     httpconnection,\\n     pycompat,\\n     repository,\\n     statichttprepo,\\n     url as urlmod,\\n     util,\\n     wireprotoframing,\\n     wireprototypes,\\n     wireprotov1peer,\\n     wireprotov2peer,\\n     wireprotov2server,\\n )\\n from .utils import (\\n     cborutil,\\n     interfaceutil,\\n     stringutil,\\n )\\n \\n httplib = util.httplib\\n urlerr = util.urlerr\\n urlreq = util.urlreq\\n \\n def encodevalueinheaders(value, header, limit):\\n     \\\"\\\"\\\"Encode a string value into multiple HTTP headers.\\n \\n     ``value`` will be encoded into 1 or more HTTP headers with the names\\n     ``header-\\u003cN\\u003e`` where ``\\u003cN\\u003e`` is an integer starting at 1. Each header\\n     name + value will be at most ``limit`` bytes long.\\n \\n     Returns an iterable of 2-tuples consisting of header names and\\n     values as native strings.\\n     \\\"\\\"\\\"\\n     # HTTP Headers are ASCII. Python 3 requires them to be unicodes,\\n     # not bytes. This function always takes bytes in as arguments.\\n     fmt = pycompat.strurl(header) + r'-%s'\\n     # Note: it is *NOT* a bug that the last bit here is a bytestring\\n     # and not a unicode: we're just getting the encoded length anyway,\\n     # and using an r-string to make it portable between Python 2 and 3\\n     # doesn't work because then the \\\\r is a literal backslash-r\\n     # instead of a carriage return.\\n     valuelen = limit - len(fmt % r'000') - len(': \\\\r\\\\n')\\n     result = []\\n \\n     n = 0\\n     for i in pycompat.xrange(0, len(value), valuelen):\\n         n += 1\\n         result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))\\n \\n     return result\\n \\n def _wraphttpresponse(resp):\\n     \\\"\\\"\\\"Wrap an HTTPResponse with common error handlers.\\n \\n     This ensures that any I\\/O from any consumer raises the appropriate\\n     error and messaging.\\n     \\\"\\\"\\\"\\n     origread = resp.read\\n \\n     class readerproxy(resp.__class__):\\n         def read(self, size=None):\\n             try:\\n                 return origread(size)\\n             except httplib.IncompleteRead as e:\\n                 # e.expected is an integer if length known or None otherwise.\\n                 if e.expected:\\n                     got = len(e.partial)\\n                     total = e.expected + got\\n                     msg = _('HTTP request error (incomplete response; '\\n                             'expected %d bytes got %d)') % (total, got)\\n                 else:\\n                     msg = _('HTTP request error (incomplete response)')\\n \\n                 raise error.PeerTransportError(\\n                     msg,\\n                     hint=_('this may be an intermittent network failure; '\\n                            'if the error persists, consider contacting the '\\n                            'network or server operator'))\\n             except httplib.HTTPException as e:\\n                 raise error.PeerTransportError(\\n                     _('HTTP request error (%s)') % e,\\n                     hint=_('this may be an intermittent network failure; '\\n                            'if the error persists, consider contacting the '\\n                            'network or server operator'))\\n \\n     resp.__class__ = readerproxy\\n \\n class _multifile(object):\\n     def __init__(self, *fileobjs):\\n         for f in fileobjs:\\n             if not util.safehasattr(f, 'length'):\\n                 raise ValueError(\\n                     '_multifile only supports file objects that '\\n                     'have a length but this one does not:', type(f), f)\\n         self._fileobjs = fileobjs\\n         self._index = 0\\n \\n     @property\\n     def length(self):\\n         return sum(f.length for f in self._fileobjs)\\n \\n     def read(self, amt=None):\\n         if amt \\u003c= 0:\\n             return ''.join(f.read() for f in self._fileobjs)\\n         parts = []\\n         while amt and self._index \\u003c len(self._fileobjs):\\n             parts.append(self._fileobjs[self._index].read(amt))\\n             got = len(parts[-1])\\n             if got \\u003c amt:\\n                 self._index += 1\\n             amt -= got\\n         return ''.join(parts)\\n \\n     def seek(self, offset, whence=os.SEEK_SET):\\n         if whence != os.SEEK_SET:\\n             raise NotImplementedError(\\n                 '_multifile does not support anything other'\\n                 ' than os.SEEK_SET for whence on seek()')\\n         if offset != 0:\\n             raise NotImplementedError(\\n                 '_multifile only supports seeking to start, but that '\\n                 'could be fixed if you need it')\\n         for f in self._fileobjs:\\n             f.seek(0)\\n         self._index = 0\\n \\n def makev1commandrequest(ui, requestbuilder, caps, capablefn,\\n                          repobaseurl, cmd, args):\\n     \\\"\\\"\\\"Make an HTTP request to run a command for a version 1 client.\\n \\n     ``caps`` is a set of known server capabilities. The value may be\\n     None if capabilities are not yet known.\\n \\n     ``capablefn`` is a function to evaluate a capability.\\n \\n     ``cmd``, ``args``, and ``data`` define the command, its arguments, and\\n     raw data to pass to it.\\n     \\\"\\\"\\\"\\n     if cmd == 'pushkey':\\n         args['data'] = ''\\n     data = args.pop('data', None)\\n     headers = args.pop('headers', {})\\n \\n     ui.debug(\\\"sending %s command\\\\n\\\" % cmd)\\n     q = [('cmd', cmd)]\\n     headersize = 0\\n     # Important: don't use self.capable() here or else you end up\\n     # with infinite recursion when trying to look up capabilities\\n     # for the first time.\\n     postargsok = caps is not None and 'httppostargs' in caps\\n \\n     # Send arguments via POST.\\n     if postargsok and args:\\n         strargs = urlreq.urlencode(sorted(args.items()))\\n         if not data:\\n             data = strargs\\n         else:\\n             if isinstance(data, bytes):\\n                 i = io.BytesIO(data)\\n                 i.length = len(data)\\n                 data = i\\n             argsio = io.BytesIO(strargs)\\n             argsio.length = len(strargs)\\n             data = _multifile(argsio, data)\\n         headers[r'X-HgArgs-Post'] = len(strargs)\\n     elif args:\\n         # Calling self.capable() can infinite loop if we are calling\\n         # \\\"capabilities\\\". But that command should never accept wire\\n         # protocol arguments. So this should never happen.\\n         assert cmd != 'capabilities'\\n         httpheader = capablefn('httpheader')\\n         if httpheader:\\n             headersize = int(httpheader.split(',', 1)[0])\\n \\n         # Send arguments via HTTP headers.\\n         if headersize \\u003e 0:\\n             # The headers can typically carry more data than the URL.\\n             encargs = urlreq.urlencode(sorted(args.items()))\\n             for header, value in encodevalueinheaders(encargs, 'X-HgArg',\\n                                                       headersize):\\n                 headers[header] = value\\n         # Send arguments via query string (Mercurial \\u003c1.9).\\n         else:\\n             q += sorted(args.items())\\n \\n     qs = '?%s' % urlreq.urlencode(q)\\n     cu = \\\"%s%s\\\" % (repobaseurl, qs)\\n     size = 0\\n     if util.safehasattr(data, 'length'):\\n         size = data.length\\n     elif data is not None:\\n         size = len(data)\\n     if data is not None and r'Content-Type' not in headers:\\n         headers[r'Content-Type'] = r'application\\/mercurial-0.1'\\n \\n     # Tell the server we accept application\\/mercurial-0.2 and multiple\\n     # compression formats if the server is capable of emitting those\\n     # payloads.\\n     # Note: Keep this set empty by default, as client advertisement of\\n     # protocol parameters should only occur after the handshake.\\n     protoparams = set()\\n \\n     mediatypes = set()\\n     if caps is not None:\\n         mt = capablefn('httpmediatype')\\n         if mt:\\n             protoparams.add('0.1')\\n             mediatypes = set(mt.split(','))\\n \\n         protoparams.add('partial-pull')\\n \\n     if '0.2tx' in mediatypes:\\n         protoparams.add('0.2')\\n \\n     if '0.2tx' in mediatypes and capablefn('compression'):\\n         # We \\/could\\/ compare supported compression formats and prune\\n         # non-mutually supported or error if nothing is mutually supported.\\n         # For now, send the full list to the server and have it error.\\n         comps = [e.wireprotosupport().name for e in\\n                  util.compengines.supportedwireengines(util.CLIENTROLE)]\\n         protoparams.add('comp=%s' % ','.join(comps))\\n \\n     if protoparams:\\n         protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),\\n                                             'X-HgProto',\\n                                             headersize or 1024)\\n         for header, value in protoheaders:\\n             headers[header] = value\\n \\n     varyheaders = []\\n     for header in headers:\\n         if header.lower().startswith(r'x-hg'):\\n             varyheaders.append(header)\\n \\n     if varyheaders:\\n         headers[r'Vary'] = r','.join(sorted(varyheaders))\\n \\n     req = requestbuilder(pycompat.strurl(cu), data, headers)\\n \\n     if data is not None:\\n         ui.debug(\\\"sending %d bytes\\\\n\\\" % size)\\n         req.add_unredirected_header(r'Content-Length', r'%d' % size)\\n \\n     return req, cu, qs\\n \\n def _reqdata(req):\\n     \\\"\\\"\\\"Get request data, if any. If no data, returns None.\\\"\\\"\\\"\\n     if pycompat.ispy3:\\n         return req.data\\n     if not req.has_data():\\n         return None\\n     return req.get_data()\\n \\n def sendrequest(ui, opener, req):\\n     \\\"\\\"\\\"Send a prepared HTTP request.\\n \\n     Returns the response object.\\n     \\\"\\\"\\\"\\n     dbg = ui.debug\\n     if (ui.debugflag\\n         and ui.configbool('devel', 'debug.peer-request')):\\n         line = 'devel-peer-request: %s\\\\n'\\n         dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),\\n                               pycompat.bytesurl(req.get_full_url())))\\n         hgargssize = None\\n \\n         for header, value in sorted(req.header_items()):\\n             header = pycompat.bytesurl(header)\\n             value = pycompat.bytesurl(value)\\n             if header.startswith('X-hgarg-'):\\n                 if hgargssize is None:\\n                     hgargssize = 0\\n                 hgargssize += len(value)\\n             else:\\n                 dbg(line % '  %s %s' % (header, value))\\n \\n         if hgargssize is not None:\\n             dbg(line % '  %d bytes of commands arguments in headers'\\n                 % hgargssize)\\n         data = _reqdata(req)\\n         if data is not None:\\n             length = getattr(data, 'length', None)\\n             if length is None:\\n                 length = len(data)\\n             dbg(line % '  %d bytes of data' % length)\\n \\n         start = util.timer()\\n \\n     res = None\\n     try:\\n         res = opener.open(req)\\n     except urlerr.httperror as inst:\\n         if inst.code == 401:\\n             raise error.Abort(_('authorization failed'))\\n         raise\\n     except httplib.HTTPException as inst:\\n         ui.debug('http error requesting %s\\\\n' %\\n                  util.hidepassword(req.get_full_url()))\\n         ui.traceback()\\n         raise IOError(None, inst)\\n     finally:\\n         if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):\\n             code = res.code if res else -1\\n             dbg(line % '  finished in %.4f seconds (%d)'\\n                 % (util.timer() - start, code))\\n \\n     # Insert error handlers for common I\\/O failures.\\n     _wraphttpresponse(res)\\n \\n     return res\\n \\n class RedirectedRepoError(error.RepoError):\\n     def __init__(self, msg, respurl):\\n         super(RedirectedRepoError, self).__init__(msg)\\n         self.respurl = respurl\\n \\n def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,\\n                            allowcbor=False):\\n     # record the url we got redirected to\\n     redirected = False\\n     respurl = pycompat.bytesurl(resp.geturl())\\n     if respurl.endswith(qs):\\n         respurl = respurl[:-len(qs)]\\n         qsdropped = False\\n     else:\\n         qsdropped = True\\n \\n     if baseurl.rstrip('\\/') != respurl.rstrip('\\/'):\\n         redirected = True\\n         if not ui.quiet:\\n             ui.warn(_('real URL is %s\\\\n') % respurl)\\n \\n     try:\\n         proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))\\n     except AttributeError:\\n         proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))\\n \\n     safeurl = util.hidepassword(baseurl)\\n     if proto.startswith('application\\/hg-error'):\\n         raise error.OutOfBandError(resp.read())\\n \\n     # Pre 1.0 versions of Mercurial used text\\/plain and\\n     # application\\/hg-changegroup. We don't support such old servers.\\n     if not proto.startswith('application\\/mercurial-'):\\n         ui.debug(\\\"requested URL: '%s'\\\\n\\\" % util.hidepassword(requrl))\\n         msg = _(\\\"'%s' does not appear to be an hg repository:\\\\n\\\"\\n                 \\\"---%%\\u003c--- (%s)\\\\n%s\\\\n---%%\\u003c---\\\\n\\\") % (\\n             safeurl, proto or 'no content-type', resp.read(1024))\\n \\n         # Some servers may strip the query string from the redirect. We\\n         # raise a special error type so callers can react to this specially.\\n         if redirected and qsdropped:\\n             raise RedirectedRepoError(msg, respurl)\\n         else:\\n             raise error.RepoError(msg)\\n \\n     try:\\n         subtype = proto.split('-', 1)[1]\\n \\n         # Unless we end up supporting CBOR in the legacy wire protocol,\\n         # this should ONLY be encountered for the initial capabilities\\n         # request during handshake.\\n         if subtype == 'cbor':\\n             if allowcbor:\\n                 return respurl, proto, resp\\n             else:\\n                 raise error.RepoError(_('unexpected CBOR response from '\\n                                         'server'))\\n \\n         version_info = tuple([int(n) for n in subtype.split('.')])\\n     except ValueError:\\n         raise error.RepoError(_(\\\"'%s' sent a broken Content-Type \\\"\\n                                 \\\"header (%s)\\\") % (safeurl, proto))\\n \\n     # TODO consider switching to a decompression reader that uses\\n     # generators.\\n     if version_info == (0, 1):\\n         if compressible:\\n             resp = util.compengines['zlib'].decompressorreader(resp)\\n \\n     elif version_info == (0, 2):\\n         # application\\/mercurial-0.2 always identifies the compression\\n         # engine in the payload header.\\n         elen = struct.unpack('B', util.readexactly(resp, 1))[0]\\n         ename = util.readexactly(resp, elen)\\n         engine = util.compengines.forwiretype(ename)\\n \\n         resp = engine.decompressorreader(resp)\\n     else:\\n         raise error.RepoError(_(\\\"'%s' uses newer protocol %s\\\") %\\n                               (safeurl, subtype))\\n \\n     return respurl, proto, resp\\n \\n class httppeer(wireprotov1peer.wirepeer):\\n     def __init__(self, ui, path, url, opener, requestbuilder, caps):\\n         self.ui = ui\\n         self._path = path\\n         self._url = url\\n         self._caps = caps\\n         self._urlopener = opener\\n         self._requestbuilder = requestbuilder\\n \\n     def __del__(self):\\n         for h in self._urlopener.handlers:\\n             h.close()\\n             getattr(h, \\\"close_all\\\", lambda: None)()\\n \\n     # Begin of ipeerconnection interface.\\n \\n     def url(self):\\n         return self._path\\n \\n     def local(self):\\n         return None\\n \\n     def peer(self):\\n         return self\\n \\n     def canpush(self):\\n         return True\\n \\n     def close(self):\\n         pass\\n \\n     # End of ipeerconnection interface.\\n \\n     # Begin of ipeercommands interface.\\n \\n     def capabilities(self):\\n         return self._caps\\n \\n     # End of ipeercommands interface.\\n \\n     def _callstream(self, cmd, _compressible=False, **args):\\n         args = pycompat.byteskwargs(args)\\n \\n         req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,\\n                                            self._caps, self.capable,\\n                                            self._url, cmd, args)\\n \\n         resp = sendrequest(self.ui, self._urlopener, req)\\n \\n         self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,\\n                                                      resp, _compressible)\\n \\n         return resp\\n \\n     def _call(self, cmd, **args):\\n         fp = self._callstream(cmd, **args)\\n         try:\\n             return fp.read()\\n         finally:\\n             # if using keepalive, allow connection to be reused\\n             fp.close()\\n \\n     def _callpush(self, cmd, cg, **args):\\n         # have to stream bundle to a temp file because we do not have\\n         # http 1.1 chunked transfer.\\n \\n         types = self.capable('unbundle')\\n         try:\\n             types = types.split(',')\\n         except AttributeError:\\n             # servers older than d1b16a746db6 will send 'unbundle' as a\\n             # boolean capability. They only support headerless\\/uncompressed\\n             # bundles.\\n             types = [\\\"\\\"]\\n         for x in types:\\n             if x in bundle2.bundletypes:\\n                 type = x\\n                 break\\n \\n         tempname = bundle2.writebundle(self.ui, cg, None, type)\\n         fp = httpconnection.httpsendfile(self.ui, tempname, \\\"rb\\\")\\n         headers = {r'Content-Type': r'application\\/mercurial-0.1'}\\n \\n         try:\\n             r = self._call(cmd, data=fp, headers=headers, **args)\\n             vals = r.split('\\\\n', 1)\\n             if len(vals) \\u003c 2:\\n                 raise error.ResponseError(_(\\\"unexpected response:\\\"), r)\\n             return vals\\n         except urlerr.httperror:\\n             # Catch and re-raise these so we don't try and treat them\\n             # like generic socket errors. They lack any values in\\n             # .args on Python 3 which breaks our socket.error block.\\n             raise\\n         except socket.error as err:\\n             if err.args[0] in (errno.ECONNRESET, errno.EPIPE):\\n                 raise error.Abort(_('push failed: %s') % err.args[1])\\n             raise error.Abort(err.args[1])\\n         finally:\\n             fp.close()\\n             os.unlink(tempname)\\n \\n     def _calltwowaystream(self, cmd, fp, **args):\\n         fh = None\\n         fp_ = None\\n         filename = None\\n         try:\\n             # dump bundle to disk\\n             fd, filename = pycompat.mkstemp(prefix=\\\"hg-bundle-\\\", suffix=\\\".hg\\\")\\n             fh = os.fdopen(fd, r\\\"wb\\\")\\n             d = fp.read(4096)\\n             while d:\\n                 fh.write(d)\\n                 d = fp.read(4096)\\n             fh.close()\\n             # start http push\\n             fp_ = httpconnection.httpsendfile(self.ui, filename, \\\"rb\\\")\\n             headers = {r'Content-Type': r'application\\/mercurial-0.1'}\\n             return self._callstream(cmd, data=fp_, headers=headers, **args)\\n         finally:\\n             if fp_ is not None:\\n                 fp_.close()\\n             if fh is not None:\\n                 fh.close()\\n                 os.unlink(filename)\\n \\n     def _callcompressable(self, cmd, **args):\\n         return self._callstream(cmd, _compressible=True, **args)\\n \\n     def _abort(self, exception):\\n         raise exception\\n \\n def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):\\n     reactor = wireprotoframing.clientreactor(hasmultiplesend=False,\\n                                              buffersends=True)\\n \\n     handler = wireprotov2peer.clienthandler(ui, reactor)\\n \\n     url = '%s\\/%s' % (apiurl, permission)\\n \\n     if len(requests) \\u003e 1:\\n         url += '\\/multirequest'\\n     else:\\n         url += '\\/%s' % requests[0][0]\\n \\n     ui.debug('sending %d commands\\\\n' % len(requests))\\n     for command, args, f in requests:\\n         ui.debug('sending command %s: %s\\\\n' % (\\n             command, stringutil.pprint(args, indent=2)))\\n         assert not list(handler.callcommand(command, args, f))\\n \\n     # TODO stream this.\\n     body = b''.join(map(bytes, handler.flushcommands()))\\n \\n     # TODO modify user-agent to reflect v2\\n     headers = {\\n         r'Accept': wireprotov2server.FRAMINGTYPE,\\n         r'Content-Type': wireprotov2server.FRAMINGTYPE,\\n     }\\n \\n     req = requestbuilder(pycompat.strurl(url), body, headers)\\n     req.add_unredirected_header(r'Content-Length', r'%d' % len(body))\\n \\n     try:\\n         res = opener.open(req)\\n     except urlerr.httperror as e:\\n         if e.code == 401:\\n             raise error.Abort(_('authorization failed'))\\n \\n         raise\\n     except httplib.HTTPException as e:\\n         ui.traceback()\\n         raise IOError(None, e)\\n \\n     return handler, res\\n \\n class queuedcommandfuture(pycompat.futures.Future):\\n     \\\"\\\"\\\"Wraps result() on command futures to trigger submission on call.\\\"\\\"\\\"\\n \\n     def result(self, timeout=None):\\n         if self.done():\\n             return pycompat.futures.Future.result(self, timeout)\\n \\n         self._peerexecutor.sendcommands()\\n \\n         # sendcommands() will restore the original __class__ and self.result\\n         # will resolve to Future.result.\\n         return self.result(timeout)\\n \\n @interfaceutil.implementer(repository.ipeercommandexecutor)\\n class httpv2executor(object):\\n     def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):\\n         self._ui = ui\\n         self._opener = opener\\n         self._requestbuilder = requestbuilder\\n         self._apiurl = apiurl\\n         self._descriptor = descriptor\\n         self._sent = False\\n         self._closed = False\\n         self._neededpermissions = set()\\n         self._calls = []\\n         self._futures = weakref.WeakSet()\\n         self._responseexecutor = None\\n         self._responsef = None\\n \\n     def __enter__(self):\\n         return self\\n \\n     def __exit__(self, exctype, excvalue, exctb):\\n         self.close()\\n \\n     def callcommand(self, command, args):\\n         if self._sent:\\n             raise error.ProgrammingError('callcommand() cannot be used after '\\n                                          'commands are sent')\\n \\n         if self._closed:\\n             raise error.ProgrammingError('callcommand() cannot be used after '\\n                                          'close()')\\n \\n         # The service advertises which commands are available. So if we attempt\\n         # to call an unknown command or pass an unknown argument, we can screen\\n         # for this.\\n         if command not in self._descriptor['commands']:\\n             raise error.ProgrammingError(\\n                 'wire protocol command %s is not available' % command)\\n \\n         cmdinfo = self._descriptor['commands'][command]\\n         unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))\\n \\n         if unknownargs:\\n             raise error.ProgrammingError(\\n                 'wire protocol command %s does not accept argument: %s' % (\\n                     command, ', '.join(sorted(unknownargs))))\\n \\n         self._neededpermissions |= set(cmdinfo['permissions'])\\n \\n         # TODO we \\/could\\/ also validate types here, since the API descriptor\\n         # includes types...\\n \\n         f = pycompat.futures.Future()\\n \\n         # Monkeypatch it so result() triggers sendcommands(), otherwise result()\\n         # could deadlock.\\n         f.__class__ = queuedcommandfuture\\n         f._peerexecutor = self\\n \\n         self._futures.add(f)\\n         self._calls.append((command, args, f))\\n \\n         return f\\n \\n     def sendcommands(self):\\n         if self._sent:\\n             return\\n \\n         if not self._calls:\\n             return\\n \\n         self._sent = True\\n \\n         # Unhack any future types so caller sees a clean type and so we\\n         # break reference cycle.\\n         for f in self._futures:\\n             if isinstance(f, queuedcommandfuture):\\n                 f.__class__ = pycompat.futures.Future\\n                 f._peerexecutor = None\\n \\n         # Mark the future as running and filter out cancelled futures.\\n         calls = [(command, args, f)\\n                  for command, args, f in self._calls\\n                  if f.set_running_or_notify_cancel()]\\n \\n         # Clear out references, prevent improper object usage.\\n         self._calls = None\\n \\n         if not calls:\\n             return\\n \\n         permissions = set(self._neededpermissions)\\n \\n         if 'push' in permissions and 'pull' in permissions:\\n             permissions.remove('pull')\\n \\n         if len(permissions) \\u003e 1:\\n             raise error.RepoError(_('cannot make request requiring multiple '\\n                                     'permissions: %s') %\\n                                   _(', ').join(sorted(permissions)))\\n \\n         permission = {\\n             'push': 'rw',\\n             'pull': 'ro',\\n         }[permissions.pop()]\\n \\n         handler, resp = sendv2request(\\n             self._ui, self._opener, self._requestbuilder, self._apiurl,\\n             permission, calls)\\n \\n         # TODO we probably want to validate the HTTP code, media type, etc.\\n \\n         self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)\\n         self._responsef = self._responseexecutor.submit(self._handleresponse,\\n                                                         handler, resp)\\n \\n     def close(self):\\n         if self._closed:\\n             return\\n \\n         self.sendcommands()\\n \\n         self._closed = True\\n \\n         if not self._responsef:\\n             return\\n \\n         # TODO ^C here may not result in immediate program termination.\\n \\n         try:\\n             self._responsef.result()\\n         finally:\\n             self._responseexecutor.shutdown(wait=True)\\n             self._responsef = None\\n             self._responseexecutor = None\\n \\n             # If any of our futures are still in progress, mark them as\\n             # errored, otherwise a result() could wait indefinitely.\\n             for f in self._futures:\\n                 if not f.done():\\n                     f.set_exception(error.ResponseError(\\n                         _('unfulfilled command response')))\\n \\n             self._futures = None\\n \\n     def _handleresponse(self, handler, resp):\\n         # Called in a thread to read the response.\\n \\n         while handler.readframe(resp):\\n             pass\\n \\n # TODO implement interface for version 2 peers\\n @interfaceutil.implementer(repository.ipeerconnection,\\n                            repository.ipeercapabilities,\\n                            repository.ipeerrequests)\\n class httpv2peer(object):\\n     def __init__(self, ui, repourl, apipath, opener, requestbuilder,\\n                  apidescriptor):\\n         self.ui = ui\\n \\n         if repourl.endswith('\\/'):\\n             repourl = repourl[:-1]\\n \\n         self._url = repourl\\n         self._apipath = apipath\\n         self._apiurl = '%s\\/%s' % (repourl, apipath)\\n         self._opener = opener\\n         self._requestbuilder = requestbuilder\\n         self._descriptor = apidescriptor\\n \\n     # Start of ipeerconnection.\\n \\n     def url(self):\\n         return self._url\\n \\n     def local(self):\\n         return None\\n \\n     def peer(self):\\n         return self\\n \\n     def canpush(self):\\n         # TODO change once implemented.\\n         return False\\n \\n     def close(self):\\n         pass\\n \\n     # End of ipeerconnection.\\n \\n     # Start of ipeercapabilities.\\n \\n     def capable(self, name):\\n         # The capabilities used internally historically map to capabilities\\n         # advertised from the \\\"capabilities\\\" wire protocol command. However,\\n         # version 2 of that command works differently.\\n \\n         # Maps to commands that are available.\\n         if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):\\n             return True\\n \\n         # Other concepts.\\n-        if name in ('bundle2',):\\n+        # TODO remove exchangev2 once we have a command implemented.\\n+        if name in ('bundle2', 'exchangev2'):\\n             return True\\n \\n         # Alias command-* to presence of command of that name.\\n         if name.startswith('command-'):\\n             return name[len('command-'):] in self._descriptor['commands']\\n \\n         return False\\n \\n     def requirecap(self, name, purpose):\\n         if self.capable(name):\\n             return\\n \\n         raise error.CapabilityError(\\n             _('cannot %s; client or remote repository does not support the %r '\\n               'capability') % (purpose, name))\\n \\n     # End of ipeercapabilities.\\n \\n     def _call(self, name, **args):\\n         with self.commandexecutor() as e:\\n             return e.callcommand(name, args).result()\\n \\n     def commandexecutor(self):\\n         return httpv2executor(self.ui, self._opener, self._requestbuilder,\\n                               self._apiurl, self._descriptor)\\n \\n # Registry of API service names to metadata about peers that handle it.\\n #\\n # The following keys are meaningful:\\n #\\n # init\\n #    Callable receiving (ui, repourl, servicepath, opener, requestbuilder,\\n #                        apidescriptor) to create a peer.\\n #\\n # priority\\n #    Integer priority for the service. If we could choose from multiple\\n #    services, we choose the one with the highest priority.\\n API_PEERS = {\\n     wireprototypes.HTTP_WIREPROTO_V2: {\\n         'init': httpv2peer,\\n         'priority': 50,\\n     },\\n }\\n \\n def performhandshake(ui, url, opener, requestbuilder):\\n     # The handshake is a request to the capabilities command.\\n \\n     caps = None\\n     def capable(x):\\n         raise error.ProgrammingError('should not be called')\\n \\n     args = {}\\n \\n     # The client advertises support for newer protocols by adding an\\n     # X-HgUpgrade-* header with a list of supported APIs and an\\n     # X-HgProto-* header advertising which serializing formats it supports.\\n     # We only support the HTTP version 2 transport and CBOR responses for\\n     # now.\\n     advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')\\n \\n     if advertisev2:\\n         args['headers'] = {\\n             r'X-HgProto-1': r'cbor',\\n         }\\n \\n         args['headers'].update(\\n             encodevalueinheaders(' '.join(sorted(API_PEERS)),\\n                                  'X-HgUpgrade',\\n                                  # We don't know the header limit this early.\\n                                  # So make it small.\\n                                  1024))\\n \\n     req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,\\n                                            capable, url, 'capabilities',\\n                                            args)\\n     resp = sendrequest(ui, opener, req)\\n \\n     # The server may redirect us to the repo root, stripping the\\n     # ?cmd=capabilities query string from the URL. The server would likely\\n     # return HTML in this case and ``parsev1commandresponse()`` would raise.\\n     # We catch this special case and re-issue the capabilities request against\\n     # the new URL.\\n     #\\n     # We should ideally not do this, as a redirect that drops the query\\n     # string from the URL is arguably a server bug. (Garbage in, garbage out).\\n     # However,  Mercurial clients for several years appeared to handle this\\n     # issue without behavior degradation. And according to issue 5860, it may\\n     # be a longstanding bug in some server implementations. So we allow a\\n     # redirect that drops the query string to \\\"just work.\\\"\\n     try:\\n         respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,\\n                                                    compressible=False,\\n                                                    allowcbor=advertisev2)\\n     except RedirectedRepoError as e:\\n         req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,\\n                                                capable, e.respurl,\\n                                                'capabilities', args)\\n         resp = sendrequest(ui, opener, req)\\n         respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,\\n                                                    compressible=False,\\n                                                    allowcbor=advertisev2)\\n \\n     try:\\n         rawdata = resp.read()\\n     finally:\\n         resp.close()\\n \\n     if not ct.startswith('application\\/mercurial-'):\\n         raise error.ProgrammingError('unexpected content-type: %s' % ct)\\n \\n     if advertisev2:\\n         if ct == 'application\\/mercurial-cbor':\\n             try:\\n                 info = cborutil.decodeall(rawdata)[0]\\n             except cborutil.CBORDecodeError:\\n                 raise error.Abort(_('error decoding CBOR from remote server'),\\n                                   hint=_('try again and consider contacting '\\n                                          'the server operator'))\\n \\n         # We got a legacy response. That's fine.\\n         elif ct in ('application\\/mercurial-0.1', 'application\\/mercurial-0.2'):\\n             info = {\\n                 'v1capabilities': set(rawdata.split())\\n             }\\n \\n         else:\\n             raise error.RepoError(\\n                 _('unexpected response type from server: %s') % ct)\\n     else:\\n         info = {\\n             'v1capabilities': set(rawdata.split())\\n         }\\n \\n     return respurl, info\\n \\n def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):\\n     \\\"\\\"\\\"Construct an appropriate HTTP peer instance.\\n \\n     ``opener`` is an ``url.opener`` that should be used to establish\\n     connections, perform HTTP requests.\\n \\n     ``requestbuilder`` is the type used for constructing HTTP requests.\\n     It exists as an argument so extensions can override the default.\\n     \\\"\\\"\\\"\\n     u = util.url(path)\\n     if u.query or u.fragment:\\n         raise error.Abort(_('unsupported URL component: \\\"%s\\\"') %\\n                           (u.query or u.fragment))\\n \\n     # urllib cannot handle URLs with embedded user or passwd.\\n     url, authinfo = u.authinfo()\\n     ui.debug('using %s\\\\n' % url)\\n \\n     opener = opener or urlmod.opener(ui, authinfo)\\n \\n     respurl, info = performhandshake(ui, url, opener, requestbuilder)\\n \\n     # Given the intersection of APIs that both we and the server support,\\n     # sort by their advertised priority and pick the first one.\\n     #\\n     # TODO consider making this request-based and interface driven. For\\n     # example, the caller could say \\\"I want a peer that does X.\\\" It's quite\\n     # possible that not all peers would do that. Since we know the service\\n     # capabilities, we could filter out services not meeting the\\n     # requirements. Possibly by consulting the interfaces defined by the\\n     # peer type.\\n     apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())\\n \\n     preferredchoices = sorted(apipeerchoices,\\n                               key=lambda x: API_PEERS[x]['priority'],\\n                               reverse=True)\\n \\n     for service in preferredchoices:\\n         apipath = '%s\\/%s' % (info['apibase'].rstrip('\\/'), service)\\n \\n         return API_PEERS[service]['init'](ui, respurl, apipath, opener,\\n                                           requestbuilder,\\n                                           info['apis'][service])\\n \\n     # Failed to construct an API peer. Fall back to legacy.\\n     return httppeer(ui, path, respurl, opener, requestbuilder,\\n                     info['v1capabilities'])\\n \\n def instance(ui, path, create, intents=None, createopts=None):\\n     if create:\\n         raise error.Abort(_('cannot create new http repository'))\\n     try:\\n         if path.startswith('https:') and not urlmod.has_https:\\n             raise error.Abort(_('Python support for SSL and HTTPS '\\n                                 'is not installed'))\\n \\n         inst = makepeer(ui, path)\\n \\n         return inst\\n     except error.RepoError as httpexception:\\n         try:\\n             r = statichttprepo.instance(ui, \\\"static-\\\" + path, create)\\n             ui.note(_('(falling back to static-http)\\\\n'))\\n             return r\\n         except error.RepoError:\\n             raise httpexception # use the original http RepoError instead\\n\"}]},{\"id\":\"24368\",\"metadata\":{\"line:first\":1,\"copy:lines\":{\"4\":[\"mercurial\\/exchange.py\",4,\" \"],\"5\":[\"mercurial\\/exchange.py\",5,\" \"],\"6\":[\"mercurial\\/exchange.py\",6,\" \"],\"7\":[\"mercurial\\/exchange.py\",7,\" \"],\"8\":[\"mercurial\\/exchange.py\",8,\" \"],\"9\":[\"mercurial\\/exchange.py\",9,\" \"]}},\"oldPath\":null,\"currentPath\":\"mercurial\\/exchangev2.py\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"55\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"55\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+# exchangev2.py - repository exchange for wire protocol version 2\\n+#\\n+# Copyright 2018 Gregory Szorc \\u003cgregory.szorc@gmail.com\\u003e\\n+#\\n+# This software may be used and distributed according to the terms of the\\n+# GNU General Public License version 2 or any later version.\\n+\\n+from __future__ import absolute_import\\n+\\n+from .node import (\\n+    nullid,\\n+)\\n+from . import (\\n+    setdiscovery,\\n+)\\n+\\n+def pull(pullop):\\n+    \\\"\\\"\\\"Pull using wire protocol version 2.\\\"\\\"\\\"\\n+    repo = pullop.repo\\n+    remote = pullop.remote\\n+\\n+    # Figure out what needs to be fetched.\\n+    common, fetch, remoteheads = _pullchangesetdiscovery(\\n+        repo, remote, pullop.heads, abortwhenunrelated=pullop.force)\\n+\\n+def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):\\n+    \\\"\\\"\\\"Determine which changesets need to be pulled.\\\"\\\"\\\"\\n+\\n+    if heads:\\n+        knownnode = repo.changelog.hasnode\\n+        if all(knownnode(head) for head in heads):\\n+            return heads, False, heads\\n+\\n+    # TODO wire protocol version 2 is capable of more efficient discovery\\n+    # than setdiscovery. Consider implementing something better.\\n+    common, fetch, remoteheads = setdiscovery.findcommonheads(\\n+        repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated)\\n+\\n+    common = set(common)\\n+    remoteheads = set(remoteheads)\\n+\\n+    # If a remote head is filtered locally, put it back in the common set.\\n+    # See the comment in exchange._pulldiscoverychangegroup() for more.\\n+\\n+    if fetch and remoteheads:\\n+        nodemap = repo.unfiltered().changelog.nodemap\\n+\\n+        common |= {head for head in remoteheads if head in nodemap}\\n+\\n+        if set(remoteheads).issubset(common):\\n+            fetch = []\\n+\\n+    common.discard(nullid)\\n+\\n+    return common, fetch, remoteheads\\n\"}]},{\"id\":\"24367\",\"metadata\":{\"line:first\":29,\"copy:lines\":{\"1514\":[\"\",1509,\"-\"],\"1515\":[\"\",1510,\"-\"],\"1516\":[\"\",1511,\"-\"],\"1517\":[\"\",1512,\"-\"],\"1518\":[\"\",1513,\"-\"],\"1519\":[\"\",1514,\"-\"],\"1520\":[\"\",1515,\"-\"],\"1521\":[\"\",1516,\"-\"],\"1522\":[\"\",1517,\"-\"],\"1523\":[\"\",1518,\"-\"],\"1524\":[\"\",1519,\"-\"],\"1525\":[\"\",1520,\" \"],\"1526\":[\"\",1521,\" \"],\"1527\":[\"\",1522,\" \"],\"1528\":[\"\",1523,\" \"],\"1529\":[\"\",1524,\" \"],\"1530\":[\"\",1525,\" \"],\"1531\":[\"\",1526,\" \"],\"1532\":[\"\",1527,\" \"],\"1533\":[\"\",1528,\" \"],\"1534\":[\"\",1529,\" \"],\"1535\":[\"\",1530,\" \"],\"1536\":[\"\",1531,\" \"],\"1537\":[\"\",1532,\" \"],\"1538\":[\"\",1533,\" \"],\"1539\":[\"\",1534,\" \"],\"1540\":[\"\",1535,\" \"],\"1541\":[\"\",1536,\" \"],\"1542\":[\"\",1537,\" \"],\"1543\":[\"\",1538,\" \"],\"1544\":[\"\",1539,\" \"],\"1545\":[\"\",1540,\" \"],\"1546\":[\"\",1541,\" \"],\"1547\":[\"\",1542,\" \"],\"1548\":[\"\",1543,\" \"],\"1549\":[\"\",1544,\" \"],\"1550\":[\"\",1545,\" \"],\"1551\":[\"\",1546,\" \"],\"1552\":[\"\",1547,\" \"],\"1553\":[\"\",1548,\" \"],\"1554\":[\"\",1549,\" \"],\"1555\":[\"\",1550,\" \"],\"1556\":[\"\",1551,\" \"],\"1557\":[\"\",1552,\" \"],\"1558\":[\"\",1553,\" \"],\"1559\":[\"\",1554,\" \"],\"1560\":[\"\",1555,\" \"],\"1561\":[\"\",1556,\" \"],\"1562\":[\"\",1557,\" \"],\"1563\":[\"\",1558,\" \"],\"1564\":[\"\",1559,\" \"],\"1565\":[\"\",1560,\" \"],\"1566\":[\"\",1561,\" \"],\"1567\":[\"\",1562,\" \"],\"1568\":[\"\",1563,\" \"],\"1569\":[\"\",1564,\" \"],\"1570\":[\"\",1565,\" \"],\"1571\":[\"\",1566,\" \"],\"1572\":[\"\",1567,\" \"],\"1573\":[\"\",1568,\" \"],\"1574\":[\"\",1569,\" \"],\"1575\":[\"\",1570,\" \"],\"1576\":[\"\",1571,\" \"],\"1577\":[\"\",1572,\" \"],\"1578\":[\"\",1573,\" \"],\"1579\":[\"\",1574,\" \"],\"1580\":[\"\",1575,\" \"],\"1581\":[\"\",1576,\" \"],\"1582\":[\"\",1577,\" \"],\"1583\":[\"\",1578,\" \"],\"1584\":[\"\",1579,\" \"],\"1585\":[\"\",1580,\" \"],\"1586\":[\"\",1581,\" \"],\"1587\":[\"\",1582,\" \"],\"1588\":[\"\",1583,\" \"],\"1589\":[\"\",1584,\" \"],\"1590\":[\"\",1585,\" \"],\"1591\":[\"\",1586,\" \"],\"1592\":[\"\",1587,\" \"],\"1593\":[\"\",1588,\" \"],\"1594\":[\"\",1589,\" \"],\"1595\":[\"\",1590,\" \"],\"1596\":[\"\",1591,\" \"],\"1597\":[\"\",1592,\" \"],\"1598\":[\"\",1593,\" \"],\"1599\":[\"\",1594,\" \"],\"1600\":[\"\",1595,\" \"],\"1601\":[\"\",1596,\" \"],\"1602\":[\"\",1597,\" \"],\"1603\":[\"\",1598,\" \"],\"1604\":[\"\",1599,\" \"],\"1605\":[\"\",1600,\" \"],\"1606\":[\"\",1601,\" \"],\"1607\":[\"\",1602,\" \"],\"1608\":[\"\",1603,\" \"],\"1609\":[\"\",1604,\" \"],\"1610\":[\"\",1605,\" \"],\"1611\":[\"\",1606,\" \"],\"1612\":[\"\",1607,\" \"],\"1613\":[\"\",1608,\" \"],\"1614\":[\"\",1609,\" \"],\"1615\":[\"\",1610,\" \"],\"1616\":[\"\",1611,\" \"],\"1617\":[\"\",1612,\" \"],\"1618\":[\"\",1613,\" \"],\"1619\":[\"\",1614,\" \"],\"1620\":[\"\",1615,\" \"],\"1621\":[\"\",1616,\" \"],\"1622\":[\"\",1617,\" \"],\"1623\":[\"\",1618,\" \"],\"1624\":[\"\",1619,\" \"],\"1625\":[\"\",1620,\" \"],\"1626\":[\"\",1621,\" \"],\"1627\":[\"\",1622,\" \"],\"1628\":[\"\",1623,\" \"],\"1629\":[\"\",1624,\" \"],\"1630\":[\"\",1625,\" \"],\"1631\":[\"\",1626,\" \"],\"1632\":[\"\",1627,\" \"],\"1633\":[\"\",1628,\" \"],\"1634\":[\"\",1629,\" \"],\"1635\":[\"\",1630,\" \"],\"1636\":[\"\",1631,\" \"],\"1637\":[\"\",1632,\" \"],\"1638\":[\"\",1633,\" \"],\"1639\":[\"\",1634,\" \"],\"1640\":[\"\",1635,\" \"],\"1641\":[\"\",1636,\" \"],\"1642\":[\"\",1637,\" \"],\"1643\":[\"\",1638,\" \"],\"1644\":[\"\",1639,\" \"],\"1645\":[\"\",1640,\" \"],\"1646\":[\"\",1641,\" \"],\"1647\":[\"\",1642,\" \"],\"1648\":[\"\",1643,\" \"],\"1649\":[\"\",1644,\" \"],\"1650\":[\"\",1645,\" \"],\"1651\":[\"\",1646,\" \"],\"1652\":[\"\",1647,\" \"],\"1653\":[\"\",1648,\" \"],\"1654\":[\"\",1649,\" \"],\"1655\":[\"\",1650,\" \"],\"1656\":[\"\",1651,\" \"],\"1657\":[\"\",1652,\" \"],\"1658\":[\"\",1653,\" \"],\"1659\":[\"\",1654,\" \"],\"1660\":[\"\",1655,\" \"],\"1661\":[\"\",1656,\" \"],\"1662\":[\"\",1657,\" \"],\"1663\":[\"\",1658,\" \"],\"1664\":[\"\",1659,\" \"],\"1665\":[\"\",1660,\" \"],\"1666\":[\"\",1661,\" \"],\"1667\":[\"\",1662,\" \"],\"1668\":[\"\",1663,\" \"],\"1669\":[\"\",1664,\" \"],\"1670\":[\"\",1665,\" \"],\"1671\":[\"\",1666,\" \"],\"1672\":[\"\",1667,\" \"],\"1673\":[\"\",1668,\" \"],\"1674\":[\"\",1669,\" \"],\"1675\":[\"\",1670,\" \"],\"1676\":[\"\",1671,\" \"],\"1677\":[\"\",1672,\" \"],\"1678\":[\"\",1673,\" \"],\"1679\":[\"\",1674,\" \"],\"1680\":[\"\",1675,\" \"],\"1681\":[\"\",1676,\" \"],\"1682\":[\"\",1677,\" \"],\"1683\":[\"\",1678,\" \"],\"1684\":[\"\",1679,\" \"],\"1685\":[\"\",1680,\" \"],\"1686\":[\"\",1681,\" \"],\"1687\":[\"\",1682,\" \"],\"1688\":[\"\",1683,\" \"],\"1689\":[\"\",1684,\" \"],\"1690\":[\"\",1685,\" \"],\"1691\":[\"\",1686,\" \"],\"1692\":[\"\",1687,\" \"],\"1693\":[\"\",1688,\" \"],\"1694\":[\"\",1689,\" \"],\"1695\":[\"\",1690,\" \"],\"1696\":[\"\",1691,\" \"],\"1697\":[\"\",1692,\" \"],\"1698\":[\"\",1693,\" \"],\"1699\":[\"\",1694,\" \"],\"1700\":[\"\",1695,\" \"],\"1701\":[\"\",1696,\" \"],\"1702\":[\"\",1697,\" \"],\"1703\":[\"\",1698,\" \"],\"1704\":[\"\",1699,\" \"],\"1705\":[\"\",1700,\" \"],\"1706\":[\"\",1701,\" \"],\"1707\":[\"\",1702,\" \"],\"1708\":[\"\",1703,\" \"],\"1709\":[\"\",1704,\" \"],\"1710\":[\"\",1705,\" \"],\"1711\":[\"\",1706,\" \"],\"1712\":[\"\",1707,\" \"],\"1713\":[\"\",1708,\" \"],\"1714\":[\"\",1709,\" \"],\"1715\":[\"\",1710,\" \"],\"1716\":[\"\",1711,\" \"],\"1717\":[\"\",1712,\" \"],\"1718\":[\"\",1713,\" \"],\"1719\":[\"\",1714,\" \"],\"1720\":[\"\",1715,\" \"],\"1721\":[\"\",1716,\" \"],\"1722\":[\"\",1717,\" \"],\"1723\":[\"\",1718,\" \"],\"1724\":[\"\",1719,\" \"],\"1725\":[\"\",1720,\" \"],\"1726\":[\"\",1721,\" \"],\"1727\":[\"\",1722,\" \"],\"1728\":[\"\",1723,\" \"],\"1729\":[\"\",1724,\" \"],\"1730\":[\"\",1725,\" \"],\"1731\":[\"\",1726,\" \"],\"1732\":[\"\",1727,\" \"],\"1733\":[\"\",1728,\" \"],\"1734\":[\"\",1729,\" \"],\"1735\":[\"\",1730,\" \"],\"1736\":[\"\",1731,\" \"],\"1737\":[\"\",1732,\" \"],\"1738\":[\"\",1733,\" \"],\"1739\":[\"\",1734,\" \"],\"1740\":[\"\",1735,\" \"],\"1741\":[\"\",1736,\" \"],\"1742\":[\"\",1737,\" \"],\"1743\":[\"\",1738,\" \"],\"1744\":[\"\",1739,\" \"],\"1745\":[\"\",1740,\" \"],\"1746\":[\"\",1741,\" \"],\"1747\":[\"\",1742,\" \"],\"1748\":[\"\",1743,\" \"],\"1749\":[\"\",1744,\" \"],\"1750\":[\"\",1745,\" \"],\"1751\":[\"\",1746,\" \"],\"1752\":[\"\",1747,\" \"],\"1753\":[\"\",1748,\" \"],\"1754\":[\"\",1749,\" \"],\"1755\":[\"\",1750,\" \"],\"1756\":[\"\",1751,\" \"],\"1757\":[\"\",1752,\" \"],\"1758\":[\"\",1753,\" \"],\"1759\":[\"\",1754,\" \"],\"1760\":[\"\",1755,\" \"],\"1761\":[\"\",1756,\" \"],\"1762\":[\"\",1757,\" \"],\"1763\":[\"\",1758,\" \"],\"1764\":[\"\",1759,\" \"],\"1765\":[\"\",1760,\" \"],\"1766\":[\"\",1761,\" \"],\"1767\":[\"\",1762,\" \"],\"1768\":[\"\",1763,\" \"],\"1769\":[\"\",1764,\" \"],\"1770\":[\"\",1765,\" \"],\"1771\":[\"\",1766,\" \"],\"1772\":[\"\",1767,\" \"],\"1773\":[\"\",1768,\" \"],\"1774\":[\"\",1769,\" \"],\"1775\":[\"\",1770,\" \"],\"1776\":[\"\",1771,\" \"],\"1777\":[\"\",1772,\" \"],\"1778\":[\"\",1773,\" \"],\"1779\":[\"\",1774,\" \"],\"1780\":[\"\",1775,\" \"],\"1781\":[\"\",1776,\" \"],\"1782\":[\"\",1777,\" \"],\"1783\":[\"\",1778,\" \"],\"1784\":[\"\",1779,\" \"],\"1785\":[\"\",1780,\" \"],\"1786\":[\"\",1781,\" \"],\"1787\":[\"\",1782,\" \"],\"1788\":[\"\",1783,\" \"],\"1789\":[\"\",1784,\" \"],\"1790\":[\"\",1785,\" \"],\"1791\":[\"\",1786,\" \"],\"1792\":[\"\",1787,\" \"],\"1793\":[\"\",1788,\" \"],\"1794\":[\"\",1789,\" \"],\"1795\":[\"\",1790,\" \"],\"1796\":[\"\",1791,\" \"],\"1797\":[\"\",1792,\" \"],\"1798\":[\"\",1793,\" \"],\"1799\":[\"\",1794,\" \"],\"1800\":[\"\",1795,\" \"],\"1801\":[\"\",1796,\" \"],\"1802\":[\"\",1797,\" \"],\"1803\":[\"\",1798,\" \"],\"1804\":[\"\",1799,\" \"],\"1805\":[\"\",1800,\" \"],\"1806\":[\"\",1801,\" \"],\"1807\":[\"\",1802,\" \"],\"1808\":[\"\",1803,\" \"],\"1809\":[\"\",1804,\" \"],\"1810\":[\"\",1805,\" \"],\"1811\":[\"\",1806,\" \"],\"1812\":[\"\",1807,\" \"],\"1813\":[\"\",1808,\" \"],\"1814\":[\"\",1809,\" \"],\"1815\":[\"\",1810,\" \"],\"1816\":[\"\",1811,\" \"],\"1817\":[\"\",1812,\" \"],\"1818\":[\"\",1813,\" \"],\"1819\":[\"\",1814,\" \"],\"1820\":[\"\",1815,\" \"],\"1821\":[\"\",1816,\" \"],\"1822\":[\"\",1817,\" \"],\"1823\":[\"\",1818,\" \"],\"1824\":[\"\",1819,\" \"],\"1825\":[\"\",1820,\" \"],\"1826\":[\"\",1821,\" \"],\"1827\":[\"\",1822,\" \"],\"1828\":[\"\",1823,\" \"],\"1829\":[\"\",1824,\" \"],\"1830\":[\"\",1825,\" \"],\"1831\":[\"\",1826,\" \"],\"1832\":[\"\",1827,\" \"],\"1833\":[\"\",1828,\" \"],\"1834\":[\"\",1829,\" \"],\"1835\":[\"\",1830,\" \"],\"1836\":[\"\",1831,\" \"],\"1837\":[\"\",1832,\" \"],\"1838\":[\"\",1833,\" \"],\"1839\":[\"\",1834,\" \"],\"1840\":[\"\",1835,\" \"],\"1841\":[\"\",1836,\" \"],\"1842\":[\"\",1837,\" \"],\"1843\":[\"\",1838,\" \"],\"1844\":[\"\",1839,\" \"],\"1845\":[\"\",1840,\" \"],\"1846\":[\"\",1841,\" \"],\"1847\":[\"\",1842,\" \"],\"1848\":[\"\",1843,\" \"],\"1849\":[\"\",1844,\" \"],\"1850\":[\"\",1845,\" \"],\"1851\":[\"\",1846,\" \"],\"1852\":[\"\",1847,\" \"],\"1853\":[\"\",1848,\" \"],\"1854\":[\"\",1849,\" \"],\"1855\":[\"\",1850,\" \"],\"1856\":[\"\",1851,\" \"],\"1857\":[\"\",1852,\" \"],\"1858\":[\"\",1853,\" \"],\"1859\":[\"\",1854,\" \"],\"1860\":[\"\",1855,\" \"],\"1861\":[\"\",1856,\" \"],\"1862\":[\"\",1857,\" \"],\"1863\":[\"\",1858,\" \"],\"1864\":[\"\",1859,\" \"],\"1865\":[\"\",1860,\" \"],\"1866\":[\"\",1861,\" \"],\"1867\":[\"\",1862,\" \"],\"1868\":[\"\",1863,\" \"],\"1869\":[\"\",1864,\" \"],\"1870\":[\"\",1865,\" \"],\"1871\":[\"\",1866,\" \"],\"1872\":[\"\",1867,\" \"],\"1873\":[\"\",1868,\" \"],\"1874\":[\"\",1869,\" \"],\"1875\":[\"\",1870,\" \"],\"1876\":[\"\",1871,\" \"],\"1877\":[\"\",1872,\" \"],\"1878\":[\"\",1873,\" \"],\"1879\":[\"\",1874,\" \"],\"1880\":[\"\",1875,\" \"],\"1881\":[\"\",1876,\" \"],\"1882\":[\"\",1877,\" \"],\"1883\":[\"\",1878,\" \"],\"1884\":[\"\",1879,\" \"],\"1885\":[\"\",1880,\" \"],\"1886\":[\"\",1881,\" \"],\"1887\":[\"\",1882,\" \"],\"1888\":[\"\",1883,\" \"],\"1889\":[\"\",1884,\" \"],\"1890\":[\"\",1885,\" \"],\"1891\":[\"\",1886,\" \"],\"1892\":[\"\",1887,\" \"],\"1893\":[\"\",1888,\" \"],\"1894\":[\"\",1889,\" \"],\"1895\":[\"\",1890,\" \"],\"1896\":[\"\",1891,\" \"],\"1897\":[\"\",1892,\" \"],\"1898\":[\"\",1893,\" \"],\"1899\":[\"\",1894,\" \"],\"1900\":[\"\",1895,\" \"],\"1901\":[\"\",1896,\" \"],\"1902\":[\"\",1897,\" \"],\"1903\":[\"\",1898,\" \"],\"1904\":[\"\",1899,\" \"],\"1905\":[\"\",1900,\" \"],\"1906\":[\"\",1901,\" \"],\"1907\":[\"\",1902,\" \"],\"1908\":[\"\",1903,\" \"],\"1909\":[\"\",1904,\" \"],\"1910\":[\"\",1905,\" \"],\"1911\":[\"\",1906,\" \"],\"1912\":[\"\",1907,\" \"],\"1913\":[\"\",1908,\" \"],\"1914\":[\"\",1909,\" \"],\"1915\":[\"\",1910,\" \"],\"1916\":[\"\",1911,\" \"],\"1917\":[\"\",1912,\" \"],\"1918\":[\"\",1913,\" \"],\"1919\":[\"\",1914,\" \"],\"1920\":[\"\",1915,\" \"],\"1921\":[\"\",1916,\" \"],\"1922\":[\"\",1917,\" \"],\"1923\":[\"\",1918,\" \"],\"1924\":[\"\",1919,\" \"],\"1925\":[\"\",1920,\" \"],\"1926\":[\"\",1921,\" \"],\"1927\":[\"\",1922,\" \"],\"1928\":[\"\",1923,\" \"],\"1929\":[\"\",1924,\" \"],\"1930\":[\"\",1925,\" \"],\"1931\":[\"\",1926,\" \"],\"1932\":[\"\",1927,\" \"],\"1933\":[\"\",1928,\" \"],\"1934\":[\"\",1929,\" \"],\"1935\":[\"\",1930,\" \"],\"1936\":[\"\",1931,\" \"],\"1937\":[\"\",1932,\" \"],\"1938\":[\"\",1933,\" \"],\"1939\":[\"\",1934,\" \"],\"1940\":[\"\",1935,\" \"],\"1941\":[\"\",1936,\" \"],\"1942\":[\"\",1937,\" \"],\"1943\":[\"\",1938,\" \"],\"1944\":[\"\",1939,\" \"],\"1945\":[\"\",1940,\" \"],\"1946\":[\"\",1941,\" \"],\"1947\":[\"\",1942,\" \"],\"1948\":[\"\",1943,\" \"],\"1949\":[\"\",1944,\" \"],\"1950\":[\"\",1945,\" \"],\"1951\":[\"\",1946,\" \"],\"1952\":[\"\",1947,\" \"],\"1953\":[\"\",1948,\" \"],\"1954\":[\"\",1949,\" \"],\"1955\":[\"\",1950,\" \"],\"1956\":[\"\",1951,\" \"],\"1957\":[\"\",1952,\" \"],\"1958\":[\"\",1953,\" \"],\"1959\":[\"\",1954,\" \"],\"1960\":[\"\",1955,\" \"],\"1961\":[\"\",1956,\" \"],\"1962\":[\"\",1957,\" \"],\"1963\":[\"\",1958,\" \"],\"1964\":[\"\",1959,\" \"],\"1965\":[\"\",1960,\" \"],\"1966\":[\"\",1961,\" \"],\"1967\":[\"\",1962,\" \"],\"1968\":[\"\",1963,\" \"],\"1969\":[\"\",1964,\" \"],\"1970\":[\"\",1965,\" \"],\"1971\":[\"\",1966,\" \"],\"1972\":[\"\",1967,\" \"],\"1973\":[\"\",1968,\" \"],\"1974\":[\"\",1969,\" \"],\"1975\":[\"\",1970,\" \"],\"1976\":[\"\",1971,\" \"],\"1977\":[\"\",1972,\" \"],\"1978\":[\"\",1973,\" \"],\"1979\":[\"\",1974,\" \"],\"1980\":[\"\",1975,\" \"],\"1981\":[\"\",1976,\" \"],\"1982\":[\"\",1977,\" \"],\"1983\":[\"\",1978,\" \"],\"1984\":[\"\",1979,\" \"],\"1985\":[\"\",1980,\" \"],\"1986\":[\"\",1981,\" \"],\"1987\":[\"\",1982,\" \"],\"1988\":[\"\",1983,\" \"],\"1989\":[\"\",1984,\" \"],\"1990\":[\"\",1985,\" \"],\"1991\":[\"\",1986,\" \"],\"1992\":[\"\",1987,\" \"],\"1993\":[\"\",1988,\" \"],\"1994\":[\"\",1989,\" \"],\"1995\":[\"\",1990,\" \"],\"1996\":[\"\",1991,\" \"],\"1997\":[\"\",1992,\" \"],\"1998\":[\"\",1993,\" \"],\"1999\":[\"\",1994,\" \"],\"2000\":[\"\",1995,\" \"],\"2001\":[\"\",1996,\" \"],\"2002\":[\"\",1997,\" \"],\"2003\":[\"\",1998,\" \"],\"2004\":[\"\",1999,\" \"],\"2005\":[\"\",2000,\" \"],\"2006\":[\"\",2001,\" \"],\"2007\":[\"\",2002,\" \"],\"2008\":[\"\",2003,\" \"],\"2009\":[\"\",2004,\" \"],\"2010\":[\"\",2005,\" \"],\"2011\":[\"\",2006,\" \"],\"2012\":[\"\",2007,\" \"],\"2013\":[\"\",2008,\" \"],\"2014\":[\"\",2009,\" \"],\"2015\":[\"\",2010,\" \"],\"2016\":[\"\",2011,\" \"],\"2017\":[\"\",2012,\" \"],\"2018\":[\"\",2013,\" \"],\"2019\":[\"\",2014,\" \"],\"2020\":[\"\",2015,\" \"],\"2021\":[\"\",2016,\" \"],\"2022\":[\"\",2017,\" \"],\"2023\":[\"\",2018,\" \"],\"2024\":[\"\",2019,\" \"],\"2025\":[\"\",2020,\" \"],\"2026\":[\"\",2021,\" \"],\"2027\":[\"\",2022,\" \"],\"2028\":[\"\",2023,\" \"],\"2029\":[\"\",2024,\" \"],\"2030\":[\"\",2025,\" \"],\"2031\":[\"\",2026,\" \"],\"2032\":[\"\",2027,\" \"],\"2033\":[\"\",2028,\" \"],\"2034\":[\"\",2029,\" \"],\"2035\":[\"\",2030,\" \"],\"2036\":[\"\",2031,\" \"],\"2037\":[\"\",2032,\" \"],\"2038\":[\"\",2033,\" \"],\"2039\":[\"\",2034,\" \"],\"2040\":[\"\",2035,\" \"],\"2041\":[\"\",2036,\" \"],\"2042\":[\"\",2037,\" \"],\"2043\":[\"\",2038,\" \"],\"2044\":[\"\",2039,\" \"],\"2045\":[\"\",2040,\" \"],\"2046\":[\"\",2041,\" \"],\"2047\":[\"\",2042,\" \"],\"2048\":[\"\",2043,\" \"],\"2049\":[\"\",2044,\" \"],\"2050\":[\"\",2045,\" \"],\"2051\":[\"\",2046,\" \"],\"2052\":[\"\",2047,\" \"],\"2053\":[\"\",2048,\" \"],\"2054\":[\"\",2049,\" \"],\"2055\":[\"\",2050,\" \"],\"2056\":[\"\",2051,\" \"],\"2057\":[\"\",2052,\" \"],\"2058\":[\"\",2053,\" \"],\"2059\":[\"\",2054,\" \"],\"2060\":[\"\",2055,\" \"],\"2061\":[\"\",2056,\" \"],\"2062\":[\"\",2057,\" \"],\"2063\":[\"\",2058,\" \"],\"2064\":[\"\",2059,\" \"],\"2065\":[\"\",2060,\" \"],\"2066\":[\"\",2061,\" \"],\"2067\":[\"\",2062,\" \"],\"2068\":[\"\",2063,\" \"],\"2069\":[\"\",2064,\" \"],\"2070\":[\"\",2065,\" \"],\"2071\":[\"\",2066,\" \"],\"2072\":[\"\",2067,\" \"],\"2073\":[\"\",2068,\" \"],\"2074\":[\"\",2069,\" \"],\"2075\":[\"\",2070,\" \"],\"2076\":[\"\",2071,\" \"],\"2077\":[\"\",2072,\" \"],\"2078\":[\"\",2073,\" \"],\"2079\":[\"\",2074,\" \"],\"2080\":[\"\",2075,\" \"],\"2081\":[\"\",2076,\" \"],\"2082\":[\"\",2077,\" \"],\"2083\":[\"\",2078,\" \"],\"2084\":[\"\",2079,\" \"],\"2085\":[\"\",2080,\" \"],\"2086\":[\"\",2081,\" \"],\"2087\":[\"\",2082,\" \"],\"2088\":[\"\",2083,\" \"],\"2089\":[\"\",2084,\" \"],\"2090\":[\"\",2085,\" \"],\"2091\":[\"\",2086,\" \"],\"2092\":[\"\",2087,\" \"],\"2093\":[\"\",2088,\" \"],\"2094\":[\"\",2089,\" \"],\"2095\":[\"\",2090,\" \"],\"2096\":[\"\",2091,\" \"],\"2097\":[\"\",2092,\" \"],\"2098\":[\"\",2093,\" \"],\"2099\":[\"\",2094,\" \"],\"2100\":[\"\",2095,\" \"],\"2101\":[\"\",2096,\" \"],\"2102\":[\"\",2097,\" \"],\"2103\":[\"\",2098,\" \"],\"2104\":[\"\",2099,\" \"],\"2105\":[\"\",2100,\" \"],\"2106\":[\"\",2101,\" \"],\"2107\":[\"\",2102,\" \"],\"2108\":[\"\",2103,\" \"],\"2109\":[\"\",2104,\" \"],\"2110\":[\"\",2105,\" \"],\"2111\":[\"\",2106,\" \"],\"2112\":[\"\",2107,\" \"],\"2113\":[\"\",2108,\" \"],\"2114\":[\"\",2109,\" \"],\"2115\":[\"\",2110,\" \"],\"2116\":[\"\",2111,\" \"],\"2117\":[\"\",2112,\" \"],\"2118\":[\"\",2113,\" \"],\"2119\":[\"\",2114,\" \"],\"2120\":[\"\",2115,\" \"],\"2121\":[\"\",2116,\" \"],\"2122\":[\"\",2117,\" \"],\"2123\":[\"\",2118,\" \"],\"2124\":[\"\",2119,\" \"],\"2125\":[\"\",2120,\" \"],\"2126\":[\"\",2121,\" \"],\"2127\":[\"\",2122,\" \"],\"2128\":[\"\",2123,\" \"],\"2129\":[\"\",2124,\" \"],\"2130\":[\"\",2125,\" \"],\"2131\":[\"\",2126,\" \"],\"2132\":[\"\",2127,\" \"],\"2133\":[\"\",2128,\" \"],\"2134\":[\"\",2129,\" \"],\"2135\":[\"\",2130,\" \"],\"2136\":[\"\",2131,\" \"],\"2137\":[\"\",2132,\" \"],\"2138\":[\"\",2133,\" \"],\"2139\":[\"\",2134,\" \"],\"2140\":[\"\",2135,\" \"],\"2141\":[\"\",2136,\" \"],\"2142\":[\"\",2137,\" \"],\"2143\":[\"\",2138,\" \"],\"2144\":[\"\",2139,\" \"],\"2145\":[\"\",2140,\" \"],\"2146\":[\"\",2141,\" \"],\"2147\":[\"\",2142,\" \"],\"2148\":[\"\",2143,\" \"],\"2149\":[\"\",2144,\" \"],\"2150\":[\"\",2145,\" \"],\"2151\":[\"\",2146,\" \"],\"2152\":[\"\",2147,\" \"],\"2153\":[\"\",2148,\" \"],\"2154\":[\"\",2149,\" \"],\"2155\":[\"\",2150,\" \"],\"2156\":[\"\",2151,\" \"],\"2157\":[\"\",2152,\" \"],\"2158\":[\"\",2153,\" \"],\"2159\":[\"\",2154,\" \"],\"2160\":[\"\",2155,\" \"],\"2161\":[\"\",2156,\" \"],\"2162\":[\"\",2157,\" \"],\"2163\":[\"\",2158,\" \"],\"2164\":[\"\",2159,\" \"],\"2165\":[\"\",2160,\" \"],\"2166\":[\"\",2161,\" \"],\"2167\":[\"\",2162,\" \"],\"2168\":[\"\",2163,\" \"],\"2169\":[\"\",2164,\" \"],\"2170\":[\"\",2165,\" \"],\"2171\":[\"\",2166,\" \"],\"2172\":[\"\",2167,\" \"],\"2173\":[\"\",2168,\" \"],\"2174\":[\"\",2169,\" \"],\"2175\":[\"\",2170,\" \"],\"2176\":[\"\",2171,\" \"],\"2177\":[\"\",2172,\" \"],\"2178\":[\"\",2173,\" \"],\"2179\":[\"\",2174,\" \"],\"2180\":[\"\",2175,\" \"],\"2181\":[\"\",2176,\" \"],\"2182\":[\"\",2177,\" \"],\"2183\":[\"\",2178,\" \"],\"2184\":[\"\",2179,\" \"],\"2185\":[\"\",2180,\" \"],\"2186\":[\"\",2181,\" \"],\"2187\":[\"\",2182,\" \"],\"2188\":[\"\",2183,\" \"],\"2189\":[\"\",2184,\" \"],\"2190\":[\"\",2185,\" \"],\"2191\":[\"\",2186,\" \"],\"2192\":[\"\",2187,\" \"],\"2193\":[\"\",2188,\" \"],\"2194\":[\"\",2189,\" \"],\"2195\":[\"\",2190,\" \"],\"2196\":[\"\",2191,\" \"],\"2197\":[\"\",2192,\" \"],\"2198\":[\"\",2193,\" \"],\"2199\":[\"\",2194,\" \"],\"2200\":[\"\",2195,\" \"],\"2201\":[\"\",2196,\" \"],\"2202\":[\"\",2197,\" \"],\"2203\":[\"\",2198,\" \"],\"2204\":[\"\",2199,\" \"],\"2205\":[\"\",2200,\" \"],\"2206\":[\"\",2201,\" \"],\"2207\":[\"\",2202,\" \"],\"2208\":[\"\",2203,\" \"],\"2209\":[\"\",2204,\" \"],\"2210\":[\"\",2205,\" \"],\"2211\":[\"\",2206,\" \"],\"2212\":[\"\",2207,\" \"],\"2213\":[\"\",2208,\" \"],\"2214\":[\"\",2209,\" \"],\"2215\":[\"\",2210,\" \"],\"2216\":[\"\",2211,\" \"],\"2217\":[\"\",2212,\" \"],\"2218\":[\"\",2213,\" \"],\"2219\":[\"\",2214,\" \"],\"2220\":[\"\",2215,\" \"],\"2221\":[\"\",2216,\" \"],\"2222\":[\"\",2217,\" \"],\"2223\":[\"\",2218,\" \"],\"2224\":[\"\",2219,\" \"],\"2225\":[\"\",2220,\" \"],\"2226\":[\"\",2221,\" \"],\"2227\":[\"\",2222,\" \"],\"2228\":[\"\",2223,\" \"],\"2229\":[\"\",2224,\" \"],\"2230\":[\"\",2225,\" \"],\"2231\":[\"\",2226,\" \"],\"2232\":[\"\",2227,\" \"],\"2233\":[\"\",2228,\" \"],\"2234\":[\"\",2229,\" \"],\"2235\":[\"\",2230,\" \"],\"2236\":[\"\",2231,\" \"],\"2237\":[\"\",2232,\" \"],\"2238\":[\"\",2233,\" \"],\"2239\":[\"\",2234,\" \"],\"2240\":[\"\",2235,\" \"],\"2241\":[\"\",2236,\" \"],\"2242\":[\"\",2237,\" \"],\"2243\":[\"\",2238,\" \"],\"2244\":[\"\",2239,\" \"],\"2245\":[\"\",2240,\" \"],\"2246\":[\"\",2241,\" \"],\"2247\":[\"\",2242,\" \"],\"2248\":[\"\",2243,\" \"],\"2249\":[\"\",2244,\" \"],\"2250\":[\"\",2245,\" \"],\"2251\":[\"\",2246,\" \"],\"2252\":[\"\",2247,\" \"],\"2253\":[\"\",2248,\" \"],\"2254\":[\"\",2249,\" \"],\"2255\":[\"\",2250,\" \"],\"2256\":[\"\",2251,\" \"],\"2257\":[\"\",2252,\" \"],\"2258\":[\"\",2253,\" \"],\"2259\":[\"\",2254,\" \"],\"2260\":[\"\",2255,\" \"],\"2261\":[\"\",2256,\" \"],\"2262\":[\"\",2257,\" \"],\"2263\":[\"\",2258,\" \"],\"2264\":[\"\",2259,\" \"],\"2265\":[\"\",2260,\" \"],\"2266\":[\"\",2261,\" \"],\"2267\":[\"\",2262,\" \"],\"2268\":[\"\",2263,\" \"],\"2269\":[\"\",2264,\" \"],\"2270\":[\"\",2265,\" \"],\"2271\":[\"\",2266,\" \"],\"2272\":[\"\",2267,\" \"],\"2273\":[\"\",2268,\" \"],\"2274\":[\"\",2269,\" \"],\"2275\":[\"\",2270,\" \"],\"2276\":[\"\",2271,\" \"],\"2277\":[\"\",2272,\" \"],\"2278\":[\"\",2273,\" \"],\"2279\":[\"\",2274,\" \"],\"2280\":[\"\",2275,\" \"],\"2281\":[\"\",2276,\" \"],\"2282\":[\"\",2277,\" \"],\"2283\":[\"\",2278,\" \"],\"2284\":[\"\",2279,\" \"],\"2285\":[\"\",2280,\" \"],\"2286\":[\"\",2281,\" \"],\"2287\":[\"\",2282,\" \"],\"2288\":[\"\",2283,\" \"],\"2289\":[\"\",2284,\" \"],\"2290\":[\"\",2285,\" \"],\"2291\":[\"\",2286,\" \"],\"2292\":[\"\",2287,\" \"],\"2293\":[\"\",2288,\" \"],\"2294\":[\"\",2289,\" \"],\"2295\":[\"\",2290,\" \"],\"2296\":[\"\",2291,\" \"],\"2297\":[\"\",2292,\" \"],\"2298\":[\"\",2293,\" \"],\"2299\":[\"\",2294,\" \"],\"2300\":[\"\",2295,\" \"],\"2301\":[\"\",2296,\" \"],\"2302\":[\"\",2297,\" \"],\"2303\":[\"\",2298,\" \"],\"2304\":[\"\",2299,\" \"],\"2305\":[\"\",2300,\" \"],\"2306\":[\"\",2301,\" \"],\"2307\":[\"\",2302,\" \"],\"2308\":[\"\",2303,\" \"],\"2309\":[\"\",2304,\" \"],\"2310\":[\"\",2305,\" \"],\"2311\":[\"\",2306,\" \"],\"2312\":[\"\",2307,\" \"],\"2313\":[\"\",2308,\" \"],\"2314\":[\"\",2309,\" \"],\"2315\":[\"\",2310,\" \"],\"2316\":[\"\",2311,\" \"],\"2317\":[\"\",2312,\" \"],\"2318\":[\"\",2313,\" \"],\"2319\":[\"\",2314,\" \"],\"2320\":[\"\",2315,\" \"],\"2321\":[\"\",2316,\" \"],\"2322\":[\"\",2317,\" \"],\"2323\":[\"\",2318,\" \"],\"2324\":[\"\",2319,\" \"],\"2325\":[\"\",2320,\" \"],\"2326\":[\"\",2321,\" \"],\"2327\":[\"\",2322,\" \"],\"2328\":[\"\",2323,\" \"],\"2329\":[\"\",2324,\" \"],\"2330\":[\"\",2325,\" \"],\"2331\":[\"\",2326,\" \"],\"2332\":[\"\",2327,\" \"],\"2333\":[\"\",2328,\" \"],\"2334\":[\"\",2329,\" \"],\"2335\":[\"\",2330,\" \"],\"2336\":[\"\",2331,\" \"],\"2337\":[\"\",2332,\" \"],\"2338\":[\"\",2333,\" \"],\"2339\":[\"\",2334,\" \"],\"2340\":[\"\",2335,\" \"],\"2341\":[\"\",2336,\" \"],\"2342\":[\"\",2337,\" \"],\"2343\":[\"\",2338,\" \"],\"2344\":[\"\",2339,\" \"],\"2345\":[\"\",2340,\" \"],\"2346\":[\"\",2341,\" \"],\"2347\":[\"\",2342,\" \"],\"2348\":[\"\",2343,\" \"],\"2349\":[\"\",2344,\" \"],\"2350\":[\"\",2345,\" \"],\"2351\":[\"\",2346,\" \"],\"2352\":[\"\",2347,\" \"],\"2353\":[\"\",2348,\" \"],\"2354\":[\"\",2349,\" \"],\"2355\":[\"\",2350,\" \"],\"2356\":[\"\",2351,\" \"],\"2357\":[\"\",2352,\" \"],\"2358\":[\"\",2353,\" \"],\"2359\":[\"\",2354,\" \"],\"2360\":[\"\",2355,\" \"],\"2361\":[\"\",2356,\" \"],\"2362\":[\"\",2357,\" \"],\"2363\":[\"\",2358,\" \"],\"2364\":[\"\",2359,\" \"],\"2365\":[\"\",2360,\" \"],\"2366\":[\"\",2361,\" \"],\"2367\":[\"\",2362,\" \"],\"2368\":[\"\",2363,\" \"],\"2369\":[\"\",2364,\" \"],\"2370\":[\"\",2365,\" \"],\"2371\":[\"\",2366,\" \"],\"2372\":[\"\",2367,\" \"],\"2373\":[\"\",2368,\" \"],\"2374\":[\"\",2369,\" \"],\"2375\":[\"\",2370,\" \"],\"2376\":[\"\",2371,\" \"],\"2377\":[\"\",2372,\" \"],\"2378\":[\"\",2373,\" \"],\"2379\":[\"\",2374,\" \"],\"2380\":[\"\",2375,\" \"],\"2381\":[\"\",2376,\" \"],\"2382\":[\"\",2377,\" \"],\"2383\":[\"\",2378,\" \"],\"2384\":[\"\",2379,\" \"],\"2385\":[\"\",2380,\" \"],\"2386\":[\"\",2381,\" \"],\"2387\":[\"\",2382,\" \"],\"2388\":[\"\",2383,\" \"],\"2389\":[\"\",2384,\" \"],\"2390\":[\"\",2385,\" \"],\"2391\":[\"\",2386,\" \"],\"2392\":[\"\",2387,\" \"],\"2393\":[\"\",2388,\" \"],\"2394\":[\"\",2389,\" \"],\"2395\":[\"\",2390,\" \"],\"2396\":[\"\",2391,\" \"],\"2397\":[\"\",2392,\" \"],\"2398\":[\"\",2393,\" \"],\"2399\":[\"\",2394,\" \"],\"2400\":[\"\",2395,\" \"],\"2401\":[\"\",2396,\" \"],\"2402\":[\"\",2397,\" \"],\"2403\":[\"\",2398,\" \"],\"2404\":[\"\",2399,\" \"],\"2405\":[\"\",2400,\" \"],\"2406\":[\"\",2401,\" \"],\"2407\":[\"\",2402,\" \"],\"2408\":[\"\",2403,\" \"],\"2409\":[\"\",2404,\" \"],\"2410\":[\"\",2405,\" \"],\"2411\":[\"\",2406,\" \"],\"2412\":[\"\",2407,\" \"],\"2413\":[\"\",2408,\" \"],\"2414\":[\"\",2409,\" \"],\"2415\":[\"\",2410,\" \"],\"2416\":[\"\",2411,\" \"],\"2417\":[\"\",2412,\" \"],\"2418\":[\"\",2413,\" \"],\"2419\":[\"\",2414,\" \"],\"2420\":[\"\",2415,\" \"],\"2421\":[\"\",2416,\" \"],\"2422\":[\"\",2417,\" \"],\"2423\":[\"\",2418,\" \"],\"2424\":[\"\",2419,\" \"],\"2425\":[\"\",2420,\" \"],\"2426\":[\"\",2421,\" \"],\"2427\":[\"\",2422,\" \"],\"2428\":[\"\",2423,\" \"],\"2429\":[\"\",2424,\" \"],\"2430\":[\"\",2425,\" \"],\"2431\":[\"\",2426,\" \"],\"2432\":[\"\",2427,\" \"],\"2433\":[\"\",2428,\" \"],\"2434\":[\"\",2429,\" \"],\"2435\":[\"\",2430,\" \"],\"2436\":[\"\",2431,\" \"],\"2437\":[\"\",2432,\" \"],\"2438\":[\"\",2433,\" \"],\"2439\":[\"\",2434,\" \"],\"2440\":[\"\",2435,\" \"],\"2441\":[\"\",2436,\" \"],\"2442\":[\"\",2437,\" \"],\"2443\":[\"\",2438,\" \"],\"2444\":[\"\",2439,\" \"],\"2445\":[\"\",2440,\" \"],\"2446\":[\"\",2441,\" \"],\"2447\":[\"\",2442,\" \"],\"2448\":[\"\",2443,\" \"],\"2449\":[\"\",2444,\" \"],\"2450\":[\"\",2445,\" \"],\"2451\":[\"\",2446,\" \"],\"2452\":[\"\",2447,\" \"],\"2453\":[\"\",2448,\" \"],\"2454\":[\"\",2449,\" \"],\"2455\":[\"\",2450,\" \"],\"2456\":[\"\",2451,\" \"],\"2457\":[\"\",2452,\" \"],\"2458\":[\"\",2453,\" \"],\"2459\":[\"\",2454,\" \"],\"2460\":[\"\",2455,\" \"],\"2461\":[\"\",2456,\" \"],\"2462\":[\"\",2457,\" \"],\"2463\":[\"\",2458,\" \"],\"2464\":[\"\",2459,\" \"],\"2465\":[\"\",2460,\" \"],\"2466\":[\"\",2461,\" \"],\"2467\":[\"\",2462,\" \"],\"2468\":[\"\",2463,\" \"],\"2469\":[\"\",2464,\" \"],\"2470\":[\"\",2465,\" \"],\"2471\":[\"\",2466,\" \"],\"2472\":[\"\",2467,\" \"],\"2473\":[\"\",2468,\" \"],\"2474\":[\"\",2469,\" \"],\"2475\":[\"\",2470,\" \"],\"2476\":[\"\",2471,\" \"],\"2477\":[\"\",2472,\" \"],\"2478\":[\"\",2473,\" \"],\"2479\":[\"\",2474,\" \"],\"2480\":[\"\",2475,\" \"],\"2481\":[\"\",2476,\" \"],\"2482\":[\"\",2477,\" \"],\"2483\":[\"\",2478,\" \"],\"2484\":[\"\",2479,\" \"],\"2485\":[\"\",2480,\" \"],\"2486\":[\"\",2481,\" \"],\"2487\":[\"\",2482,\" \"],\"2488\":[\"\",2483,\" \"],\"2489\":[\"\",2484,\" \"],\"2490\":[\"\",2485,\" \"],\"2491\":[\"\",2486,\" \"],\"2492\":[\"\",2487,\" \"],\"2493\":[\"\",2488,\" \"],\"2494\":[\"\",2489,\" \"],\"2495\":[\"\",2490,\" \"],\"2496\":[\"\",2491,\" \"],\"2497\":[\"\",2492,\" \"],\"2498\":[\"\",2493,\" \"],\"2499\":[\"\",2494,\" \"],\"2500\":[\"\",2495,\" \"],\"2501\":[\"\",2496,\" \"],\"2502\":[\"\",2497,\" \"],\"2503\":[\"\",2498,\" \"],\"2504\":[\"\",2499,\" \"],\"2505\":[\"\",2500,\" \"],\"2506\":[\"\",2501,\" \"],\"2507\":[\"\",2502,\" \"],\"2508\":[\"\",2503,\" \"],\"2509\":[\"\",2504,\" \"],\"2510\":[\"\",2505,\" \"],\"2511\":[\"\",2506,\" \"],\"2512\":[\"\",2507,\" \"],\"2513\":[\"\",2508,\" \"],\"2514\":[\"\",2509,\" \"],\"2515\":[\"\",2510,\" \"],\"2516\":[\"\",2511,\" \"],\"2517\":[\"\",2512,\" \"],\"2518\":[\"\",2513,\" \"],\"2519\":[\"\",2514,\" \"],\"2520\":[\"\",2515,\" \"],\"2521\":[\"\",2516,\" \"],\"2522\":[\"\",2517,\" \"],\"2523\":[\"\",2518,\" \"],\"2524\":[\"\",2519,\" \"],\"2525\":[\"\",2520,\" \"],\"2526\":[\"\",2521,\" \"],\"2527\":[\"\",2522,\" \"],\"2528\":[\"\",2523,\" \"],\"2529\":[\"\",2524,\" \"],\"2530\":[\"\",2525,\" \"],\"2531\":[\"\",2526,\" \"],\"2532\":[\"\",2527,\" \"],\"2533\":[\"\",2528,\" \"],\"2534\":[\"\",2529,\" \"],\"2535\":[\"\",2530,\" \"],\"2536\":[\"\",2531,\" \"],\"2537\":[\"\",2532,\" \"],\"2538\":[\"\",2533,\" \"],\"2539\":[\"\",2534,\" \"],\"2540\":[\"\",2535,\" \"],\"2541\":[\"\",2536,\" \"],\"2542\":[\"\",2537,\" \"],\"2543\":[\"\",2538,\" \"],\"2544\":[\"\",2539,\" \"],\"2545\":[\"\",2540,\" \"],\"2546\":[\"\",2541,\" \"],\"2547\":[\"\",2542,\" \"],\"2548\":[\"\",2543,\" \"],\"2549\":[\"\",2544,\" \"],\"2550\":[\"\",2545,\" \"],\"2551\":[\"\",2546,\" \"],\"2552\":[\"\",2547,\" \"],\"2553\":[\"\",2548,\" \"],\"2554\":[\"\",2549,\" \"],\"2555\":[\"\",2550,\" \"],\"2556\":[\"\",2551,\" \"],\"2557\":[\"\",2552,\" \"],\"2558\":[\"\",2553,\" \"],\"2559\":[\"\",2554,\" \"],\"2560\":[\"\",2555,\" \"],\"2561\":[\"\",2556,\" \"],\"2562\":[\"\",2557,\" \"],\"2563\":[\"\",2558,\" \"],\"2564\":[\"\",2559,\" \"],\"2565\":[\"\",2560,\" \"],\"2566\":[\"\",2561,\" \"],\"2567\":[\"\",2562,\" \"],\"2568\":[\"\",2563,\" \"],\"2569\":[\"\",2564,\" \"],\"2570\":[\"\",2565,\" \"],\"2571\":[\"\",2566,\" \"],\"2572\":[\"\",2567,\" \"],\"2573\":[\"\",2568,\" \"],\"2574\":[\"\",2569,\" \"],\"2575\":[\"\",2570,\" \"],\"2576\":[\"\",2571,\" \"],\"2577\":[\"\",2572,\" \"],\"2578\":[\"\",2573,\" \"],\"2579\":[\"\",2574,\" \"],\"2580\":[\"\",2575,\" \"],\"2581\":[\"\",2576,\" \"],\"2582\":[\"\",2577,\" \"],\"2583\":[\"\",2578,\" \"],\"2584\":[\"\",2579,\" \"],\"2585\":[\"\",2580,\" \"],\"2586\":[\"\",2581,\" \"],\"2587\":[\"\",2582,\" \"],\"2588\":[\"\",2583,\" \"],\"2589\":[\"\",2584,\" \"],\"2590\":[\"\",2585,\" \"],\"2591\":[\"\",2586,\" \"],\"2592\":[\"\",2587,\" \"],\"2593\":[\"\",2588,\" \"],\"2594\":[\"\",2589,\" \"],\"2595\":[\"\",2590,\" \"],\"2596\":[\"\",2591,\" \"],\"2597\":[\"\",2592,\" \"],\"2598\":[\"\",2593,\" \"],\"2599\":[\"\",2594,\" \"],\"2600\":[\"\",2595,\" \"],\"2601\":[\"\",2596,\" \"],\"2602\":[\"\",2597,\" \"],\"2603\":[\"\",2598,\" \"],\"2604\":[\"\",2599,\" \"],\"2605\":[\"\",2600,\" \"],\"2606\":[\"\",2601,\" \"],\"2607\":[\"\",2602,\" \"],\"2608\":[\"\",2603,\" \"],\"2609\":[\"\",2604,\" \"],\"2610\":[\"\",2605,\" \"],\"2611\":[\"\",2606,\" \"],\"2612\":[\"\",2607,\" \"],\"2613\":[\"\",2608,\" \"],\"2614\":[\"\",2609,\" \"],\"2615\":[\"\",2610,\" \"],\"2616\":[\"\",2611,\" \"],\"2617\":[\"\",2612,\" \"],\"2618\":[\"\",2613,\" \"],\"2619\":[\"\",2614,\" \"],\"2620\":[\"\",2615,\" \"],\"2621\":[\"\",2616,\" \"],\"2622\":[\"\",2617,\" \"],\"2623\":[\"\",2618,\" \"],\"2624\":[\"\",2619,\" \"],\"2625\":[\"\",2620,\" \"],\"2626\":[\"\",2621,\" \"],\"2627\":[\"\",2622,\" \"],\"2628\":[\"\",2623,\" \"],\"2629\":[\"\",2624,\" \"],\"2630\":[\"\",2625,\" \"],\"2631\":[\"\",2626,\" \"],\"2632\":[\"\",2627,\" \"],\"2633\":[\"\",2628,\" \"],\"2634\":[\"\",2629,\" \"],\"2635\":[\"\",2630,\" \"],\"2636\":[\"\",2631,\" \"],\"2637\":[\"\",2632,\" \"],\"2638\":[\"\",2633,\" \"],\"2639\":[\"\",2634,\" \"],\"2640\":[\"\",2635,\" \"],\"2641\":[\"\",2636,\" \"],\"2642\":[\"\",2637,\" \"],\"2643\":[\"\",2638,\" \"],\"2644\":[\"\",2639,\" \"],\"2645\":[\"\",2640,\" \"],\"2646\":[\"\",2641,\" \"],\"2647\":[\"\",2642,\" \"],\"2648\":[\"\",2643,\" \"],\"2649\":[\"\",2644,\" \"]}},\"oldPath\":\"mercurial\\/exchange.py\",\"currentPath\":\"mercurial\\/exchange.py\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":[],\"type\":\"2\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"16\",\"delLines\":\"11\",\"hunks\":[{\"oldOffset\":\"1\",\"newOffset\":\"1\",\"oldLength\":\"2644\",\"newLength\":\"2649\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\" # exchange.py - utility to exchange data between repos.\\n #\\n # Copyright 2005-2007 Matt Mackall \\u003cmpm@selenic.com\\u003e\\n #\\n # This software may be used and distributed according to the terms of the\\n # GNU General Public License version 2 or any later version.\\n \\n from __future__ import absolute_import\\n \\n import collections\\n import hashlib\\n \\n from .i18n import _\\n from .node import (\\n     bin,\\n     hex,\\n     nullid,\\n     nullrev,\\n )\\n from .thirdparty import (\\n     attr,\\n )\\n from . import (\\n     bookmarks as bookmod,\\n     bundle2,\\n     changegroup,\\n     discovery,\\n     error,\\n+    exchangev2,\\n     lock as lockmod,\\n     logexchange,\\n     narrowspec,\\n     obsolete,\\n     phases,\\n     pushkey,\\n     pycompat,\\n     repository,\\n     scmutil,\\n     sslutil,\\n     streamclone,\\n     url as urlmod,\\n     util,\\n )\\n from .utils import (\\n     stringutil,\\n )\\n \\n urlerr = util.urlerr\\n urlreq = util.urlreq\\n \\n _NARROWACL_SECTION = 'narrowhgacl'\\n \\n # Maps bundle version human names to changegroup versions.\\n _bundlespeccgversions = {'v1': '01',\\n                          'v2': '02',\\n                          'packed1': 's1',\\n                          'bundle2': '02', #legacy\\n                         }\\n \\n # Maps bundle version with content opts to choose which part to bundle\\n _bundlespeccontentopts = {\\n     'v1': {\\n         'changegroup': True,\\n         'cg.version': '01',\\n         'obsolescence': False,\\n         'phases': False,\\n         'tagsfnodescache': False,\\n         'revbranchcache': False\\n     },\\n     'v2': {\\n         'changegroup': True,\\n         'cg.version': '02',\\n         'obsolescence': False,\\n         'phases': False,\\n         'tagsfnodescache': True,\\n         'revbranchcache': True\\n     },\\n     'packed1' : {\\n         'cg.version': 's1'\\n     }\\n }\\n _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']\\n \\n _bundlespecvariants = {\\\"streamv2\\\": {\\\"changegroup\\\": False, \\\"streamv2\\\": True,\\n                                     \\\"tagsfnodescache\\\": False,\\n                                     \\\"revbranchcache\\\": False}}\\n \\n # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.\\n _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}\\n \\n @attr.s\\n class bundlespec(object):\\n     compression = attr.ib()\\n     wirecompression = attr.ib()\\n     version = attr.ib()\\n     wireversion = attr.ib()\\n     params = attr.ib()\\n     contentopts = attr.ib()\\n \\n def parsebundlespec(repo, spec, strict=True):\\n     \\\"\\\"\\\"Parse a bundle string specification into parts.\\n \\n     Bundle specifications denote a well-defined bundle\\/exchange format.\\n     The content of a given specification should not change over time in\\n     order to ensure that bundles produced by a newer version of Mercurial are\\n     readable from an older version.\\n \\n     The string currently has the form:\\n \\n        \\u003ccompression\\u003e-\\u003ctype\\u003e[;\\u003cparameter0\\u003e[;\\u003cparameter1\\u003e]]\\n \\n     Where \\u003ccompression\\u003e is one of the supported compression formats\\n     and \\u003ctype\\u003e is (currently) a version string. A \\\";\\\" can follow the type and\\n     all text afterwards is interpreted as URI encoded, \\\";\\\" delimited key=value\\n     pairs.\\n \\n     If ``strict`` is True (the default) \\u003ccompression\\u003e is required. Otherwise,\\n     it is optional.\\n \\n     Returns a bundlespec object of (compression, version, parameters).\\n     Compression will be ``None`` if not in strict mode and a compression isn't\\n     defined.\\n \\n     An ``InvalidBundleSpecification`` is raised when the specification is\\n     not syntactically well formed.\\n \\n     An ``UnsupportedBundleSpecification`` is raised when the compression or\\n     bundle type\\/version is not recognized.\\n \\n     Note: this function will likely eventually return a more complex data\\n     structure, including bundle2 part information.\\n     \\\"\\\"\\\"\\n     def parseparams(s):\\n         if ';' not in s:\\n             return s, {}\\n \\n         params = {}\\n         version, paramstr = s.split(';', 1)\\n \\n         for p in paramstr.split(';'):\\n             if '=' not in p:\\n                 raise error.InvalidBundleSpecification(\\n                     _('invalid bundle specification: '\\n                       'missing \\\"=\\\" in parameter: %s') % p)\\n \\n             key, value = p.split('=', 1)\\n             key = urlreq.unquote(key)\\n             value = urlreq.unquote(value)\\n             params[key] = value\\n \\n         return version, params\\n \\n \\n     if strict and '-' not in spec:\\n         raise error.InvalidBundleSpecification(\\n                 _('invalid bundle specification; '\\n                   'must be prefixed with compression: %s') % spec)\\n \\n     if '-' in spec:\\n         compression, version = spec.split('-', 1)\\n \\n         if compression not in util.compengines.supportedbundlenames:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('%s compression is not supported') % compression)\\n \\n         version, params = parseparams(version)\\n \\n         if version not in _bundlespeccgversions:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('%s is not a recognized bundle version') % version)\\n     else:\\n         # Value could be just the compression or just the version, in which\\n         # case some defaults are assumed (but only when not in strict mode).\\n         assert not strict\\n \\n         spec, params = parseparams(spec)\\n \\n         if spec in util.compengines.supportedbundlenames:\\n             compression = spec\\n             version = 'v1'\\n             # Generaldelta repos require v2.\\n             if 'generaldelta' in repo.requirements:\\n                 version = 'v2'\\n             # Modern compression engines require v2.\\n             if compression not in _bundlespecv1compengines:\\n                 version = 'v2'\\n         elif spec in _bundlespeccgversions:\\n             if spec == 'packed1':\\n                 compression = 'none'\\n             else:\\n                 compression = 'bzip2'\\n             version = spec\\n         else:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('%s is not a recognized bundle specification') % spec)\\n \\n     # Bundle version 1 only supports a known set of compression engines.\\n     if version == 'v1' and compression not in _bundlespecv1compengines:\\n         raise error.UnsupportedBundleSpecification(\\n             _('compression engine %s is not supported on v1 bundles') %\\n             compression)\\n \\n     # The specification for packed1 can optionally declare the data formats\\n     # required to apply it. If we see this metadata, compare against what the\\n     # repo supports and error if the bundle isn't compatible.\\n     if version == 'packed1' and 'requirements' in params:\\n         requirements = set(params['requirements'].split(','))\\n         missingreqs = requirements - repo.supportedformats\\n         if missingreqs:\\n             raise error.UnsupportedBundleSpecification(\\n                     _('missing support for repository features: %s') %\\n                       ', '.join(sorted(missingreqs)))\\n \\n     # Compute contentopts based on the version\\n     contentopts = _bundlespeccontentopts.get(version, {}).copy()\\n \\n     # Process the variants\\n     if \\\"stream\\\" in params and params[\\\"stream\\\"] == \\\"v2\\\":\\n         variant = _bundlespecvariants[\\\"streamv2\\\"]\\n         contentopts.update(variant)\\n \\n     engine = util.compengines.forbundlename(compression)\\n     compression, wirecompression = engine.bundletype()\\n     wireversion = _bundlespeccgversions[version]\\n \\n     return bundlespec(compression, wirecompression, version, wireversion,\\n                       params, contentopts)\\n \\n def readbundle(ui, fh, fname, vfs=None):\\n     header = changegroup.readexactly(fh, 4)\\n \\n     alg = None\\n     if not fname:\\n         fname = \\\"stream\\\"\\n         if not header.startswith('HG') and header.startswith('\\\\0'):\\n             fh = changegroup.headerlessfixup(fh, header)\\n             header = \\\"HG10\\\"\\n             alg = 'UN'\\n     elif vfs:\\n         fname = vfs.join(fname)\\n \\n     magic, version = header[0:2], header[2:4]\\n \\n     if magic != 'HG':\\n         raise error.Abort(_('%s: not a Mercurial bundle') % fname)\\n     if version == '10':\\n         if alg is None:\\n             alg = changegroup.readexactly(fh, 2)\\n         return changegroup.cg1unpacker(fh, alg)\\n     elif version.startswith('2'):\\n         return bundle2.getunbundler(ui, fh, magicstring=magic + version)\\n     elif version == 'S1':\\n         return streamclone.streamcloneapplier(fh)\\n     else:\\n         raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))\\n \\n def getbundlespec(ui, fh):\\n     \\\"\\\"\\\"Infer the bundlespec from a bundle file handle.\\n \\n     The input file handle is seeked and the original seek position is not\\n     restored.\\n     \\\"\\\"\\\"\\n     def speccompression(alg):\\n         try:\\n             return util.compengines.forbundletype(alg).bundletype()[0]\\n         except KeyError:\\n             return None\\n \\n     b = readbundle(ui, fh, None)\\n     if isinstance(b, changegroup.cg1unpacker):\\n         alg = b._type\\n         if alg == '_truncatedBZ':\\n             alg = 'BZ'\\n         comp = speccompression(alg)\\n         if not comp:\\n             raise error.Abort(_('unknown compression algorithm: %s') % alg)\\n         return '%s-v1' % comp\\n     elif isinstance(b, bundle2.unbundle20):\\n         if 'Compression' in b.params:\\n             comp = speccompression(b.params['Compression'])\\n             if not comp:\\n                 raise error.Abort(_('unknown compression algorithm: %s') % comp)\\n         else:\\n             comp = 'none'\\n \\n         version = None\\n         for part in b.iterparts():\\n             if part.type == 'changegroup':\\n                 version = part.params['version']\\n                 if version in ('01', '02'):\\n                     version = 'v2'\\n                 else:\\n                     raise error.Abort(_('changegroup version %s does not have '\\n                                         'a known bundlespec') % version,\\n                                       hint=_('try upgrading your Mercurial '\\n                                               'client'))\\n             elif part.type == 'stream2' and version is None:\\n                 # A stream2 part requires to be part of a v2 bundle\\n                 version = \\\"v2\\\"\\n                 requirements = urlreq.unquote(part.params['requirements'])\\n                 splitted = requirements.split()\\n                 params = bundle2._formatrequirementsparams(splitted)\\n                 return 'none-v2;stream=v2;%s' % params\\n \\n         if not version:\\n             raise error.Abort(_('could not identify changegroup version in '\\n                                 'bundle'))\\n \\n         return '%s-%s' % (comp, version)\\n     elif isinstance(b, streamclone.streamcloneapplier):\\n         requirements = streamclone.readbundle1header(fh)[2]\\n         formatted = bundle2._formatrequirementsparams(requirements)\\n         return 'none-packed1;%s' % formatted\\n     else:\\n         raise error.Abort(_('unknown bundle type: %s') % b)\\n \\n def _computeoutgoing(repo, heads, common):\\n     \\\"\\\"\\\"Computes which revs are outgoing given a set of common\\n     and a set of heads.\\n \\n     This is a separate function so extensions can have access to\\n     the logic.\\n \\n     Returns a discovery.outgoing object.\\n     \\\"\\\"\\\"\\n     cl = repo.changelog\\n     if common:\\n         hasnode = cl.hasnode\\n         common = [n for n in common if hasnode(n)]\\n     else:\\n         common = [nullid]\\n     if not heads:\\n         heads = cl.heads()\\n     return discovery.outgoing(repo, common, heads)\\n \\n def _forcebundle1(op):\\n     \\\"\\\"\\\"return true if a pull\\/push must use bundle1\\n \\n     This function is used to allow testing of the older bundle version\\\"\\\"\\\"\\n     ui = op.repo.ui\\n     # The goal is this config is to allow developer to choose the bundle\\n     # version used during exchanged. This is especially handy during test.\\n     # Value is a list of bundle version to be picked from, highest version\\n     # should be used.\\n     #\\n     # developer config: devel.legacy.exchange\\n     exchange = ui.configlist('devel', 'legacy.exchange')\\n     forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange\\n     return forcebundle1 or not op.remote.capable('bundle2')\\n \\n class pushoperation(object):\\n     \\\"\\\"\\\"A object that represent a single push operation\\n \\n     Its purpose is to carry push related state and very common operations.\\n \\n     A new pushoperation should be created at the beginning of each push and\\n     discarded afterward.\\n     \\\"\\\"\\\"\\n \\n     def __init__(self, repo, remote, force=False, revs=None, newbranch=False,\\n                  bookmarks=(), pushvars=None):\\n         # repo we push from\\n         self.repo = repo\\n         self.ui = repo.ui\\n         # repo we push to\\n         self.remote = remote\\n         # force option provided\\n         self.force = force\\n         # revs to be pushed (None is \\\"all\\\")\\n         self.revs = revs\\n         # bookmark explicitly pushed\\n         self.bookmarks = bookmarks\\n         # allow push of new branch\\n         self.newbranch = newbranch\\n         # step already performed\\n         # (used to check what steps have been already performed through bundle2)\\n         self.stepsdone = set()\\n         # Integer version of the changegroup push result\\n         # - None means nothing to push\\n         # - 0 means HTTP error\\n         # - 1 means we pushed and remote head count is unchanged *or*\\n         #   we have outgoing changesets but refused to push\\n         # - other values as described by addchangegroup()\\n         self.cgresult = None\\n         # Boolean value for the bookmark push\\n         self.bkresult = None\\n         # discover.outgoing object (contains common and outgoing data)\\n         self.outgoing = None\\n         # all remote topological heads before the push\\n         self.remoteheads = None\\n         # Details of the remote branch pre and post push\\n         #\\n         # mapping: {'branch': ([remoteheads],\\n         #                      [newheads],\\n         #                      [unsyncedheads],\\n         #                      [discardedheads])}\\n         # - branch: the branch name\\n         # - remoteheads: the list of remote heads known locally\\n         #                None if the branch is new\\n         # - newheads: the new remote heads (known locally) with outgoing pushed\\n         # - unsyncedheads: the list of remote heads unknown locally.\\n         # - discardedheads: the list of remote heads made obsolete by the push\\n         self.pushbranchmap = None\\n         # testable as a boolean indicating if any nodes are missing locally.\\n         self.incoming = None\\n         # summary of the remote phase situation\\n         self.remotephases = None\\n         # phases changes that must be pushed along side the changesets\\n         self.outdatedphases = None\\n         # phases changes that must be pushed if changeset push fails\\n         self.fallbackoutdatedphases = None\\n         # outgoing obsmarkers\\n         self.outobsmarkers = set()\\n         # outgoing bookmarks\\n         self.outbookmarks = []\\n         # transaction manager\\n         self.trmanager = None\\n         # map { pushkey partid -\\u003e callback handling failure}\\n         # used to handle exception from mandatory pushkey part failure\\n         self.pkfailcb = {}\\n         # an iterable of pushvars or None\\n         self.pushvars = pushvars\\n \\n     @util.propertycache\\n     def futureheads(self):\\n         \\\"\\\"\\\"future remote heads if the changeset push succeeds\\\"\\\"\\\"\\n         return self.outgoing.missingheads\\n \\n     @util.propertycache\\n     def fallbackheads(self):\\n         \\\"\\\"\\\"future remote heads if the changeset push fails\\\"\\\"\\\"\\n         if self.revs is None:\\n             # not target to push, all common are relevant\\n             return self.outgoing.commonheads\\n         unfi = self.repo.unfiltered()\\n         # I want cheads = heads(::missingheads and ::commonheads)\\n         # (missingheads is revs with secret changeset filtered out)\\n         #\\n         # This can be expressed as:\\n         #     cheads = ( (missingheads and ::commonheads)\\n         #              + (commonheads and ::missingheads))\\\"\\n         #              )\\n         #\\n         # while trying to push we already computed the following:\\n         #     common = (::commonheads)\\n         #     missing = ((commonheads::missingheads) - commonheads)\\n         #\\n         # We can pick:\\n         # * missingheads part of common (::commonheads)\\n         common = self.outgoing.common\\n         nm = self.repo.changelog.nodemap\\n         cheads = [node for node in self.revs if nm[node] in common]\\n         # and\\n         # * commonheads parents on missing\\n         revset = unfi.set('%ln and parents(roots(%ln))',\\n                          self.outgoing.commonheads,\\n                          self.outgoing.missing)\\n         cheads.extend(c.node() for c in revset)\\n         return cheads\\n \\n     @property\\n     def commonheads(self):\\n         \\\"\\\"\\\"set of all common heads after changeset bundle push\\\"\\\"\\\"\\n         if self.cgresult:\\n             return self.futureheads\\n         else:\\n             return self.fallbackheads\\n \\n # mapping of message used when pushing bookmark\\n bookmsgmap = {'update': (_(\\\"updating bookmark %s\\\\n\\\"),\\n                          _('updating bookmark %s failed!\\\\n')),\\n               'export': (_(\\\"exporting bookmark %s\\\\n\\\"),\\n                          _('exporting bookmark %s failed!\\\\n')),\\n               'delete': (_(\\\"deleting remote bookmark %s\\\\n\\\"),\\n                          _('deleting remote bookmark %s failed!\\\\n')),\\n               }\\n \\n \\n def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),\\n          opargs=None):\\n     '''Push outgoing changesets (limited by revs) from a local\\n     repository to remote. Return an integer:\\n       - None means nothing to push\\n       - 0 means HTTP error\\n       - 1 means we pushed and remote head count is unchanged *or*\\n         we have outgoing changesets but refused to push\\n       - other values as described by addchangegroup()\\n     '''\\n     if opargs is None:\\n         opargs = {}\\n     pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,\\n                            **pycompat.strkwargs(opargs))\\n     if pushop.remote.local():\\n         missing = (set(pushop.repo.requirements)\\n                    - pushop.remote.local().supported)\\n         if missing:\\n             msg = _(\\\"required features are not\\\"\\n                     \\\" supported in the destination:\\\"\\n                     \\\" %s\\\") % (', '.join(sorted(missing)))\\n             raise error.Abort(msg)\\n \\n     if not pushop.remote.canpush():\\n         raise error.Abort(_(\\\"destination does not support push\\\"))\\n \\n     if not pushop.remote.capable('unbundle'):\\n         raise error.Abort(_('cannot push: destination does not support the '\\n                             'unbundle wire protocol command'))\\n \\n     # get lock as we might write phase data\\n     wlock = lock = None\\n     try:\\n         # bundle2 push may receive a reply bundle touching bookmarks or other\\n         # things requiring the wlock. Take it now to ensure proper ordering.\\n         maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')\\n         if (not _forcebundle1(pushop)) and maypushback:\\n             wlock = pushop.repo.wlock()\\n         lock = pushop.repo.lock()\\n         pushop.trmanager = transactionmanager(pushop.repo,\\n                                               'push-response',\\n                                               pushop.remote.url())\\n     except error.LockUnavailable as err:\\n         # source repo cannot be locked.\\n         # We do not abort the push, but just disable the local phase\\n         # synchronisation.\\n         msg = 'cannot lock source repository: %s\\\\n' % err\\n         pushop.ui.debug(msg)\\n \\n     with wlock or util.nullcontextmanager(), \\\\\\n             lock or util.nullcontextmanager(), \\\\\\n             pushop.trmanager or util.nullcontextmanager():\\n         pushop.repo.checkpush(pushop)\\n         _pushdiscovery(pushop)\\n         if not _forcebundle1(pushop):\\n             _pushbundle2(pushop)\\n         _pushchangeset(pushop)\\n         _pushsyncphase(pushop)\\n         _pushobsolete(pushop)\\n         _pushbookmark(pushop)\\n \\n     if repo.ui.configbool('experimental', 'remotenames'):\\n         logexchange.pullremotenames(repo, remote)\\n \\n     return pushop\\n \\n # list of steps to perform discovery before push\\n pushdiscoveryorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n pushdiscoverymapping = {}\\n \\n def pushdiscovery(stepname):\\n     \\\"\\\"\\\"decorator for function performing discovery before push\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated function will be added in order (this\\n     may matter).\\n \\n     You can only use this decorator for a new step, if you want to wrap a step\\n     from an extension, change the pushdiscovery dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in pushdiscoverymapping\\n         pushdiscoverymapping[stepname] = func\\n         pushdiscoveryorder.append(stepname)\\n         return func\\n     return dec\\n \\n def _pushdiscovery(pushop):\\n     \\\"\\\"\\\"Run all discovery steps\\\"\\\"\\\"\\n     for stepname in pushdiscoveryorder:\\n         step = pushdiscoverymapping[stepname]\\n         step(pushop)\\n \\n @pushdiscovery('changeset')\\n def _pushdiscoverychangeset(pushop):\\n     \\\"\\\"\\\"discover the changeset that need to be pushed\\\"\\\"\\\"\\n     fci = discovery.findcommonincoming\\n     if pushop.revs:\\n         commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,\\n                         ancestorsof=pushop.revs)\\n     else:\\n         commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)\\n     common, inc, remoteheads = commoninc\\n     fco = discovery.findcommonoutgoing\\n     outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,\\n                    commoninc=commoninc, force=pushop.force)\\n     pushop.outgoing = outgoing\\n     pushop.remoteheads = remoteheads\\n     pushop.incoming = inc\\n \\n @pushdiscovery('phase')\\n def _pushdiscoveryphase(pushop):\\n     \\\"\\\"\\\"discover the phase that needs to be pushed\\n \\n     (computed for both success and failure case for changesets push)\\\"\\\"\\\"\\n     outgoing = pushop.outgoing\\n     unfi = pushop.repo.unfiltered()\\n     remotephases = listkeys(pushop.remote, 'phases')\\n \\n     if (pushop.ui.configbool('ui', '_usedassubrepo')\\n         and remotephases    # server supports phases\\n         and not pushop.outgoing.missing # no changesets to be pushed\\n         and remotephases.get('publishing', False)):\\n         # When:\\n         # - this is a subrepo push\\n         # - and remote support phase\\n         # - and no changeset are to be pushed\\n         # - and remote is publishing\\n         # We may be in issue 3781 case!\\n         # We drop the possible phase synchronisation done by\\n         # courtesy to publish changesets possibly locally draft\\n         # on the remote.\\n         pushop.outdatedphases = []\\n         pushop.fallbackoutdatedphases = []\\n         return\\n \\n     pushop.remotephases = phases.remotephasessummary(pushop.repo,\\n                                                      pushop.fallbackheads,\\n                                                      remotephases)\\n     droots = pushop.remotephases.draftroots\\n \\n     extracond = ''\\n     if not pushop.remotephases.publishing:\\n         extracond = ' and public()'\\n     revset = 'heads((%%ln::%%ln) %s)' % extracond\\n     # Get the list of all revs draft on remote by public here.\\n     # XXX Beware that revset break if droots is not strictly\\n     # XXX root we may want to ensure it is but it is costly\\n     fallback = list(unfi.set(revset, droots, pushop.fallbackheads))\\n     if not outgoing.missing:\\n         future = fallback\\n     else:\\n         # adds changeset we are going to push as draft\\n         #\\n         # should not be necessary for publishing server, but because of an\\n         # issue fixed in xxxxx we have to do it anyway.\\n         fdroots = list(unfi.set('roots(%ln  + %ln::)',\\n                        outgoing.missing, droots))\\n         fdroots = [f.node() for f in fdroots]\\n         future = list(unfi.set(revset, fdroots, pushop.futureheads))\\n     pushop.outdatedphases = future\\n     pushop.fallbackoutdatedphases = fallback\\n \\n @pushdiscovery('obsmarker')\\n def _pushdiscoveryobsmarkers(pushop):\\n     if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):\\n         return\\n \\n     if not pushop.repo.obsstore:\\n         return\\n \\n     if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):\\n         return\\n \\n     repo = pushop.repo\\n     # very naive computation, that can be quite expensive on big repo.\\n     # However: evolution is currently slow on them anyway.\\n     nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))\\n     pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)\\n \\n @pushdiscovery('bookmarks')\\n def _pushdiscoverybookmarks(pushop):\\n     ui = pushop.ui\\n     repo = pushop.repo.unfiltered()\\n     remote = pushop.remote\\n     ui.debug(\\\"checking for updated bookmarks\\\\n\\\")\\n     ancestors = ()\\n     if pushop.revs:\\n         revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)\\n         ancestors = repo.changelog.ancestors(revnums, inclusive=True)\\n \\n     remotebookmark = listkeys(remote, 'bookmarks')\\n \\n     explicit = set([repo._bookmarks.expandname(bookmark)\\n                     for bookmark in pushop.bookmarks])\\n \\n     remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)\\n     comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)\\n \\n     def safehex(x):\\n         if x is None:\\n             return x\\n         return hex(x)\\n \\n     def hexifycompbookmarks(bookmarks):\\n         return [(b, safehex(scid), safehex(dcid))\\n                 for (b, scid, dcid) in bookmarks]\\n \\n     comp = [hexifycompbookmarks(marks) for marks in comp]\\n     return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)\\n \\n def _processcompared(pushop, pushed, explicit, remotebms, comp):\\n     \\\"\\\"\\\"take decision on bookmark to pull from the remote bookmark\\n \\n     Exist to help extensions who want to alter this behavior.\\n     \\\"\\\"\\\"\\n     addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp\\n \\n     repo = pushop.repo\\n \\n     for b, scid, dcid in advsrc:\\n         if b in explicit:\\n             explicit.remove(b)\\n         if not pushed or repo[scid].rev() in pushed:\\n             pushop.outbookmarks.append((b, dcid, scid))\\n     # search added bookmark\\n     for b, scid, dcid in addsrc:\\n         if b in explicit:\\n             explicit.remove(b)\\n             pushop.outbookmarks.append((b, '', scid))\\n     # search for overwritten bookmark\\n     for b, scid, dcid in list(advdst) + list(diverge) + list(differ):\\n         if b in explicit:\\n             explicit.remove(b)\\n             pushop.outbookmarks.append((b, dcid, scid))\\n     # search for bookmark to delete\\n     for b, scid, dcid in adddst:\\n         if b in explicit:\\n             explicit.remove(b)\\n             # treat as \\\"deleted locally\\\"\\n             pushop.outbookmarks.append((b, dcid, ''))\\n     # identical bookmarks shouldn't get reported\\n     for b, scid, dcid in same:\\n         if b in explicit:\\n             explicit.remove(b)\\n \\n     if explicit:\\n         explicit = sorted(explicit)\\n         # we should probably list all of them\\n         pushop.ui.warn(_('bookmark %s does not exist on the local '\\n                          'or remote repository!\\\\n') % explicit[0])\\n         pushop.bkresult = 2\\n \\n     pushop.outbookmarks.sort()\\n \\n def _pushcheckoutgoing(pushop):\\n     outgoing = pushop.outgoing\\n     unfi = pushop.repo.unfiltered()\\n     if not outgoing.missing:\\n         # nothing to push\\n         scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)\\n         return False\\n     # something to push\\n     if not pushop.force:\\n         # if repo.obsstore == False --\\u003e no obsolete\\n         # then, save the iteration\\n         if unfi.obsstore:\\n             # this message are here for 80 char limit reason\\n             mso = _(\\\"push includes obsolete changeset: %s!\\\")\\n             mspd = _(\\\"push includes phase-divergent changeset: %s!\\\")\\n             mscd = _(\\\"push includes content-divergent changeset: %s!\\\")\\n             mst = {\\\"orphan\\\": _(\\\"push includes orphan changeset: %s!\\\"),\\n                    \\\"phase-divergent\\\": mspd,\\n                    \\\"content-divergent\\\": mscd}\\n             # If we are to push if there is at least one\\n             # obsolete or unstable changeset in missing, at\\n             # least one of the missinghead will be obsolete or\\n             # unstable. So checking heads only is ok\\n             for node in outgoing.missingheads:\\n                 ctx = unfi[node]\\n                 if ctx.obsolete():\\n                     raise error.Abort(mso % ctx)\\n                 elif ctx.isunstable():\\n                     # TODO print more than one instability in the abort\\n                     # message\\n                     raise error.Abort(mst[ctx.instabilities()[0]] % ctx)\\n \\n         discovery.checkheads(pushop)\\n     return True\\n \\n # List of names of steps to perform for an outgoing bundle2, order matters.\\n b2partsgenorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n b2partsgenmapping = {}\\n \\n def b2partsgenerator(stepname, idx=None):\\n     \\\"\\\"\\\"decorator for function generating bundle2 part\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated functions will be added in order\\n     (this may matter).\\n \\n     You can only use this decorator for new steps, if you want to wrap a step\\n     from an extension, attack the b2partsgenmapping dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in b2partsgenmapping\\n         b2partsgenmapping[stepname] = func\\n         if idx is None:\\n             b2partsgenorder.append(stepname)\\n         else:\\n             b2partsgenorder.insert(idx, stepname)\\n         return func\\n     return dec\\n \\n def _pushb2ctxcheckheads(pushop, bundler):\\n     \\\"\\\"\\\"Generate race condition checking parts\\n \\n     Exists as an independent function to aid extensions\\n     \\\"\\\"\\\"\\n     # * 'force' do not check for push race,\\n     # * if we don't push anything, there are nothing to check.\\n     if not pushop.force and pushop.outgoing.missingheads:\\n         allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())\\n         emptyremote = pushop.pushbranchmap is None\\n         if not allowunrelated or emptyremote:\\n             bundler.newpart('check:heads', data=iter(pushop.remoteheads))\\n         else:\\n             affected = set()\\n             for branch, heads in pushop.pushbranchmap.iteritems():\\n                 remoteheads, newheads, unsyncedheads, discardedheads = heads\\n                 if remoteheads is not None:\\n                     remote = set(remoteheads)\\n                     affected |= set(discardedheads) & remote\\n                     affected |= remote - set(newheads)\\n             if affected:\\n                 data = iter(sorted(affected))\\n                 bundler.newpart('check:updated-heads', data=data)\\n \\n def _pushing(pushop):\\n     \\\"\\\"\\\"return True if we are pushing anything\\\"\\\"\\\"\\n     return bool(pushop.outgoing.missing\\n                 or pushop.outdatedphases\\n                 or pushop.outobsmarkers\\n                 or pushop.outbookmarks)\\n \\n @b2partsgenerator('check-bookmarks')\\n def _pushb2checkbookmarks(pushop, bundler):\\n     \\\"\\\"\\\"insert bookmark move checking\\\"\\\"\\\"\\n     if not _pushing(pushop) or pushop.force:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     hasbookmarkcheck = 'bookmarks' in b2caps\\n     if not (pushop.outbookmarks and hasbookmarkcheck):\\n         return\\n     data = []\\n     for book, old, new in pushop.outbookmarks:\\n         old = bin(old)\\n         data.append((book, old))\\n     checkdata = bookmod.binaryencode(data)\\n     bundler.newpart('check:bookmarks', data=checkdata)\\n \\n @b2partsgenerator('check-phases')\\n def _pushb2checkphases(pushop, bundler):\\n     \\\"\\\"\\\"insert phase move checking\\\"\\\"\\\"\\n     if not _pushing(pushop) or pushop.force:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     hasphaseheads = 'heads' in b2caps.get('phases', ())\\n     if pushop.remotephases is not None and hasphaseheads:\\n         # check that the remote phase has not changed\\n         checks = [[] for p in phases.allphases]\\n         checks[phases.public].extend(pushop.remotephases.publicheads)\\n         checks[phases.draft].extend(pushop.remotephases.draftroots)\\n         if any(checks):\\n             for nodes in checks:\\n                 nodes.sort()\\n             checkdata = phases.binaryencode(checks)\\n             bundler.newpart('check:phases', data=checkdata)\\n \\n @b2partsgenerator('changeset')\\n def _pushb2ctx(pushop, bundler):\\n     \\\"\\\"\\\"handle changegroup push through bundle2\\n \\n     addchangegroup result is stored in the ``pushop.cgresult`` attribute.\\n     \\\"\\\"\\\"\\n     if 'changesets' in pushop.stepsdone:\\n         return\\n     pushop.stepsdone.add('changesets')\\n     # Send known heads to the server for race detection.\\n     if not _pushcheckoutgoing(pushop):\\n         return\\n     pushop.repo.prepushoutgoinghooks(pushop)\\n \\n     _pushb2ctxcheckheads(pushop, bundler)\\n \\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     version = '01'\\n     cgversions = b2caps.get('changegroup')\\n     if cgversions:  # 3.1 and 3.2 ship with an empty value\\n         cgversions = [v for v in cgversions\\n                       if v in changegroup.supportedoutgoingversions(\\n                           pushop.repo)]\\n         if not cgversions:\\n             raise ValueError(_('no common changegroup version'))\\n         version = max(cgversions)\\n     cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,\\n                                       'push')\\n     cgpart = bundler.newpart('changegroup', data=cgstream)\\n     if cgversions:\\n         cgpart.addparam('version', version)\\n     if 'treemanifest' in pushop.repo.requirements:\\n         cgpart.addparam('treemanifest', '1')\\n     def handlereply(op):\\n         \\\"\\\"\\\"extract addchangegroup returns from server reply\\\"\\\"\\\"\\n         cgreplies = op.records.getreplies(cgpart.id)\\n         assert len(cgreplies['changegroup']) == 1\\n         pushop.cgresult = cgreplies['changegroup'][0]['return']\\n     return handlereply\\n \\n @b2partsgenerator('phase')\\n def _pushb2phases(pushop, bundler):\\n     \\\"\\\"\\\"handle phase push through bundle2\\\"\\\"\\\"\\n     if 'phases' in pushop.stepsdone:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n     ui = pushop.repo.ui\\n \\n     legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')\\n     haspushkey = 'pushkey' in b2caps\\n     hasphaseheads = 'heads' in b2caps.get('phases', ())\\n \\n     if hasphaseheads and not legacyphase:\\n         return _pushb2phaseheads(pushop, bundler)\\n     elif haspushkey:\\n         return _pushb2phasespushkey(pushop, bundler)\\n \\n def _pushb2phaseheads(pushop, bundler):\\n     \\\"\\\"\\\"push phase information through a bundle2 - binary part\\\"\\\"\\\"\\n     pushop.stepsdone.add('phases')\\n     if pushop.outdatedphases:\\n         updates = [[] for p in phases.allphases]\\n         updates[0].extend(h.node() for h in pushop.outdatedphases)\\n         phasedata = phases.binaryencode(updates)\\n         bundler.newpart('phase-heads', data=phasedata)\\n \\n def _pushb2phasespushkey(pushop, bundler):\\n     \\\"\\\"\\\"push phase information through a bundle2 - pushkey part\\\"\\\"\\\"\\n     pushop.stepsdone.add('phases')\\n     part2node = []\\n \\n     def handlefailure(pushop, exc):\\n         targetid = int(exc.partid)\\n         for partid, node in part2node:\\n             if partid == targetid:\\n                 raise error.Abort(_('updating %s to public failed') % node)\\n \\n     enc = pushkey.encode\\n     for newremotehead in pushop.outdatedphases:\\n         part = bundler.newpart('pushkey')\\n         part.addparam('namespace', enc('phases'))\\n         part.addparam('key', enc(newremotehead.hex()))\\n         part.addparam('old', enc('%d' % phases.draft))\\n         part.addparam('new', enc('%d' % phases.public))\\n         part2node.append((part.id, newremotehead))\\n         pushop.pkfailcb[part.id] = handlefailure\\n \\n     def handlereply(op):\\n         for partid, node in part2node:\\n             partrep = op.records.getreplies(partid)\\n             results = partrep['pushkey']\\n             assert len(results) \\u003c= 1\\n             msg = None\\n             if not results:\\n                 msg = _('server ignored update of %s to public!\\\\n') % node\\n             elif not int(results[0]['return']):\\n                 msg = _('updating %s to public failed!\\\\n') % node\\n             if msg is not None:\\n                 pushop.ui.warn(msg)\\n     return handlereply\\n \\n @b2partsgenerator('obsmarkers')\\n def _pushb2obsmarkers(pushop, bundler):\\n     if 'obsmarkers' in pushop.stepsdone:\\n         return\\n     remoteversions = bundle2.obsmarkersversion(bundler.capabilities)\\n     if obsolete.commonversion(remoteversions) is None:\\n         return\\n     pushop.stepsdone.add('obsmarkers')\\n     if pushop.outobsmarkers:\\n         markers = sorted(pushop.outobsmarkers)\\n         bundle2.buildobsmarkerspart(bundler, markers)\\n \\n @b2partsgenerator('bookmarks')\\n def _pushb2bookmarks(pushop, bundler):\\n     \\\"\\\"\\\"handle bookmark push through bundle2\\\"\\\"\\\"\\n     if 'bookmarks' in pushop.stepsdone:\\n         return\\n     b2caps = bundle2.bundle2caps(pushop.remote)\\n \\n     legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')\\n     legacybooks = 'bookmarks' in legacy\\n \\n     if not legacybooks and 'bookmarks' in b2caps:\\n         return _pushb2bookmarkspart(pushop, bundler)\\n     elif 'pushkey' in b2caps:\\n         return _pushb2bookmarkspushkey(pushop, bundler)\\n \\n def _bmaction(old, new):\\n     \\\"\\\"\\\"small utility for bookmark pushing\\\"\\\"\\\"\\n     if not old:\\n         return 'export'\\n     elif not new:\\n         return 'delete'\\n     return 'update'\\n \\n def _pushb2bookmarkspart(pushop, bundler):\\n     pushop.stepsdone.add('bookmarks')\\n     if not pushop.outbookmarks:\\n         return\\n \\n     allactions = []\\n     data = []\\n     for book, old, new in pushop.outbookmarks:\\n         new = bin(new)\\n         data.append((book, new))\\n         allactions.append((book, _bmaction(old, new)))\\n     checkdata = bookmod.binaryencode(data)\\n     bundler.newpart('bookmarks', data=checkdata)\\n \\n     def handlereply(op):\\n         ui = pushop.ui\\n         # if success\\n         for book, action in allactions:\\n             ui.status(bookmsgmap[action][0] % book)\\n \\n     return handlereply\\n \\n def _pushb2bookmarkspushkey(pushop, bundler):\\n     pushop.stepsdone.add('bookmarks')\\n     part2book = []\\n     enc = pushkey.encode\\n \\n     def handlefailure(pushop, exc):\\n         targetid = int(exc.partid)\\n         for partid, book, action in part2book:\\n             if partid == targetid:\\n                 raise error.Abort(bookmsgmap[action][1].rstrip() % book)\\n         # we should not be called for part we did not generated\\n         assert False\\n \\n     for book, old, new in pushop.outbookmarks:\\n         part = bundler.newpart('pushkey')\\n         part.addparam('namespace', enc('bookmarks'))\\n         part.addparam('key', enc(book))\\n         part.addparam('old', enc(old))\\n         part.addparam('new', enc(new))\\n         action = 'update'\\n         if not old:\\n             action = 'export'\\n         elif not new:\\n             action = 'delete'\\n         part2book.append((part.id, book, action))\\n         pushop.pkfailcb[part.id] = handlefailure\\n \\n     def handlereply(op):\\n         ui = pushop.ui\\n         for partid, book, action in part2book:\\n             partrep = op.records.getreplies(partid)\\n             results = partrep['pushkey']\\n             assert len(results) \\u003c= 1\\n             if not results:\\n                 pushop.ui.warn(_('server ignored bookmark %s update\\\\n') % book)\\n             else:\\n                 ret = int(results[0]['return'])\\n                 if ret:\\n                     ui.status(bookmsgmap[action][0] % book)\\n                 else:\\n                     ui.warn(bookmsgmap[action][1] % book)\\n                     if pushop.bkresult is not None:\\n                         pushop.bkresult = 1\\n     return handlereply\\n \\n @b2partsgenerator('pushvars', idx=0)\\n def _getbundlesendvars(pushop, bundler):\\n     '''send shellvars via bundle2'''\\n     pushvars = pushop.pushvars\\n     if pushvars:\\n         shellvars = {}\\n         for raw in pushvars:\\n             if '=' not in raw:\\n                 msg = (\\\"unable to parse variable '%s', should follow \\\"\\n                         \\\"'KEY=VALUE' or 'KEY=' format\\\")\\n                 raise error.Abort(msg % raw)\\n             k, v = raw.split('=', 1)\\n             shellvars[k] = v\\n \\n         part = bundler.newpart('pushvars')\\n \\n         for key, value in shellvars.iteritems():\\n             part.addparam(key, value, mandatory=False)\\n \\n def _pushbundle2(pushop):\\n     \\\"\\\"\\\"push data to the remote using bundle2\\n \\n     The only currently supported type of data is changegroup but this will\\n     evolve in the future.\\\"\\\"\\\"\\n     bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))\\n     pushback = (pushop.trmanager\\n                 and pushop.ui.configbool('experimental', 'bundle2.pushback'))\\n \\n     # create reply capability\\n     capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,\\n                                                       allowpushback=pushback,\\n                                                       role='client'))\\n     bundler.newpart('replycaps', data=capsblob)\\n     replyhandlers = []\\n     for partgenname in b2partsgenorder:\\n         partgen = b2partsgenmapping[partgenname]\\n         ret = partgen(pushop, bundler)\\n         if callable(ret):\\n             replyhandlers.append(ret)\\n     # do not push if nothing to push\\n     if bundler.nbparts \\u003c= 1:\\n         return\\n     stream = util.chunkbuffer(bundler.getchunks())\\n     try:\\n         try:\\n             with pushop.remote.commandexecutor() as e:\\n                 reply = e.callcommand('unbundle', {\\n                     'bundle': stream,\\n                     'heads': ['force'],\\n                     'url': pushop.remote.url(),\\n                 }).result()\\n         except error.BundleValueError as exc:\\n             raise error.Abort(_('missing support for %s') % exc)\\n         try:\\n             trgetter = None\\n             if pushback:\\n                 trgetter = pushop.trmanager.transaction\\n             op = bundle2.processbundle(pushop.repo, reply, trgetter)\\n         except error.BundleValueError as exc:\\n             raise error.Abort(_('missing support for %s') % exc)\\n         except bundle2.AbortFromPart as exc:\\n             pushop.ui.status(_('remote: %s\\\\n') % exc)\\n             if exc.hint is not None:\\n                 pushop.ui.status(_('remote: %s\\\\n') % ('(%s)' % exc.hint))\\n             raise error.Abort(_('push failed on remote'))\\n     except error.PushkeyFailed as exc:\\n         partid = int(exc.partid)\\n         if partid not in pushop.pkfailcb:\\n             raise\\n         pushop.pkfailcb[partid](pushop, exc)\\n     for rephand in replyhandlers:\\n         rephand(op)\\n \\n def _pushchangeset(pushop):\\n     \\\"\\\"\\\"Make the actual push of changeset bundle to remote repo\\\"\\\"\\\"\\n     if 'changesets' in pushop.stepsdone:\\n         return\\n     pushop.stepsdone.add('changesets')\\n     if not _pushcheckoutgoing(pushop):\\n         return\\n \\n     # Should have verified this in push().\\n     assert pushop.remote.capable('unbundle')\\n \\n     pushop.repo.prepushoutgoinghooks(pushop)\\n     outgoing = pushop.outgoing\\n     # TODO: get bundlecaps from remote\\n     bundlecaps = None\\n     # create a changegroup from local\\n     if pushop.revs is None and not (outgoing.excluded\\n                             or pushop.repo.changelog.filteredrevs):\\n         # push everything,\\n         # use the fast path, no race possible on push\\n         cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',\\n                 fastpath=True, bundlecaps=bundlecaps)\\n     else:\\n         cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',\\n                                         'push', bundlecaps=bundlecaps)\\n \\n     # apply changegroup to remote\\n     # local repo finds heads on server, finds out what\\n     # revs it must push. once revs transferred, if server\\n     # finds it has different heads (someone else won\\n     # commit\\/push race), server aborts.\\n     if pushop.force:\\n         remoteheads = ['force']\\n     else:\\n         remoteheads = pushop.remoteheads\\n     # ssh: return remote's addchangegroup()\\n     # http: return remote's addchangegroup() or 0 for error\\n     pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,\\n                                         pushop.repo.url())\\n \\n def _pushsyncphase(pushop):\\n     \\\"\\\"\\\"synchronise phase information locally and remotely\\\"\\\"\\\"\\n     cheads = pushop.commonheads\\n     # even when we don't push, exchanging phase data is useful\\n     remotephases = listkeys(pushop.remote, 'phases')\\n     if (pushop.ui.configbool('ui', '_usedassubrepo')\\n         and remotephases    # server supports phases\\n         and pushop.cgresult is None # nothing was pushed\\n         and remotephases.get('publishing', False)):\\n         # When:\\n         # - this is a subrepo push\\n         # - and remote support phase\\n         # - and no changeset was pushed\\n         # - and remote is publishing\\n         # We may be in issue 3871 case!\\n         # We drop the possible phase synchronisation done by\\n         # courtesy to publish changesets possibly locally draft\\n         # on the remote.\\n         remotephases = {'publishing': 'True'}\\n     if not remotephases: # old server or public only reply from non-publishing\\n         _localphasemove(pushop, cheads)\\n         # don't push any phase data as there is nothing to push\\n     else:\\n         ana = phases.analyzeremotephases(pushop.repo, cheads,\\n                                          remotephases)\\n         pheads, droots = ana\\n         ### Apply remote phase on local\\n         if remotephases.get('publishing', False):\\n             _localphasemove(pushop, cheads)\\n         else: # publish = False\\n             _localphasemove(pushop, pheads)\\n             _localphasemove(pushop, cheads, phases.draft)\\n         ### Apply local phase on remote\\n \\n         if pushop.cgresult:\\n             if 'phases' in pushop.stepsdone:\\n                 # phases already pushed though bundle2\\n                 return\\n             outdated = pushop.outdatedphases\\n         else:\\n             outdated = pushop.fallbackoutdatedphases\\n \\n         pushop.stepsdone.add('phases')\\n \\n         # filter heads already turned public by the push\\n         outdated = [c for c in outdated if c.node() not in pheads]\\n         # fallback to independent pushkey command\\n         for newremotehead in outdated:\\n             with pushop.remote.commandexecutor() as e:\\n                 r = e.callcommand('pushkey', {\\n                     'namespace': 'phases',\\n                     'key': newremotehead.hex(),\\n                     'old': '%d' % phases.draft,\\n                     'new': '%d' % phases.public\\n                 }).result()\\n \\n             if not r:\\n                 pushop.ui.warn(_('updating %s to public failed!\\\\n')\\n                                % newremotehead)\\n \\n def _localphasemove(pushop, nodes, phase=phases.public):\\n     \\\"\\\"\\\"move \\u003cnodes\\u003e to \\u003cphase\\u003e in the local source repo\\\"\\\"\\\"\\n     if pushop.trmanager:\\n         phases.advanceboundary(pushop.repo,\\n                                pushop.trmanager.transaction(),\\n                                phase,\\n                                nodes)\\n     else:\\n         # repo is not locked, do not change any phases!\\n         # Informs the user that phases should have been moved when\\n         # applicable.\\n         actualmoves = [n for n in nodes if phase \\u003c pushop.repo[n].phase()]\\n         phasestr = phases.phasenames[phase]\\n         if actualmoves:\\n             pushop.ui.status(_('cannot lock source repo, skipping '\\n                                'local %s phase update\\\\n') % phasestr)\\n \\n def _pushobsolete(pushop):\\n     \\\"\\\"\\\"utility function to push obsolete markers to a remote\\\"\\\"\\\"\\n     if 'obsmarkers' in pushop.stepsdone:\\n         return\\n     repo = pushop.repo\\n     remote = pushop.remote\\n     pushop.stepsdone.add('obsmarkers')\\n     if pushop.outobsmarkers:\\n         pushop.ui.debug('try to push obsolete markers to remote\\\\n')\\n         rslts = []\\n         remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))\\n         for key in sorted(remotedata, reverse=True):\\n             # reverse sort to ensure we end with dump0\\n             data = remotedata[key]\\n             rslts.append(remote.pushkey('obsolete', key, '', data))\\n         if [r for r in rslts if not r]:\\n             msg = _('failed to push some obsolete markers!\\\\n')\\n             repo.ui.warn(msg)\\n \\n def _pushbookmark(pushop):\\n     \\\"\\\"\\\"Update bookmark position on remote\\\"\\\"\\\"\\n     if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:\\n         return\\n     pushop.stepsdone.add('bookmarks')\\n     ui = pushop.ui\\n     remote = pushop.remote\\n \\n     for b, old, new in pushop.outbookmarks:\\n         action = 'update'\\n         if not old:\\n             action = 'export'\\n         elif not new:\\n             action = 'delete'\\n \\n         with remote.commandexecutor() as e:\\n             r = e.callcommand('pushkey', {\\n                 'namespace': 'bookmarks',\\n                 'key': b,\\n                 'old': old,\\n                 'new': new,\\n             }).result()\\n \\n         if r:\\n             ui.status(bookmsgmap[action][0] % b)\\n         else:\\n             ui.warn(bookmsgmap[action][1] % b)\\n             # discovery can have set the value form invalid entry\\n             if pushop.bkresult is not None:\\n                 pushop.bkresult = 1\\n \\n class pulloperation(object):\\n     \\\"\\\"\\\"A object that represent a single pull operation\\n \\n     It purpose is to carry pull related state and very common operation.\\n \\n     A new should be created at the beginning of each pull and discarded\\n     afterward.\\n     \\\"\\\"\\\"\\n \\n     def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),\\n                  remotebookmarks=None, streamclonerequested=None,\\n                  includepats=None, excludepats=None):\\n         # repo we pull into\\n         self.repo = repo\\n         # repo we pull from\\n         self.remote = remote\\n         # revision we try to pull (None is \\\"all\\\")\\n         self.heads = heads\\n         # bookmark pulled explicitly\\n         self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)\\n                                   for bookmark in bookmarks]\\n         # do we force pull?\\n         self.force = force\\n         # whether a streaming clone was requested\\n         self.streamclonerequested = streamclonerequested\\n         # transaction manager\\n         self.trmanager = None\\n         # set of common changeset between local and remote before pull\\n         self.common = None\\n         # set of pulled head\\n         self.rheads = None\\n         # list of missing changeset to fetch remotely\\n         self.fetch = None\\n         # remote bookmarks data\\n         self.remotebookmarks = remotebookmarks\\n         # result of changegroup pulling (used as return code by pull)\\n         self.cgresult = None\\n         # list of step already done\\n         self.stepsdone = set()\\n         # Whether we attempted a clone from pre-generated bundles.\\n         self.clonebundleattempted = False\\n         # Set of file patterns to include.\\n         self.includepats = includepats\\n         # Set of file patterns to exclude.\\n         self.excludepats = excludepats\\n \\n     @util.propertycache\\n     def pulledsubset(self):\\n         \\\"\\\"\\\"heads of the set of changeset target by the pull\\\"\\\"\\\"\\n         # compute target subset\\n         if self.heads is None:\\n             # We pulled every thing possible\\n             # sync on everything common\\n             c = set(self.common)\\n             ret = list(self.common)\\n             for n in self.rheads:\\n                 if n not in c:\\n                     ret.append(n)\\n             return ret\\n         else:\\n             # We pulled a specific subset\\n             # sync on this subset\\n             return self.heads\\n \\n     @util.propertycache\\n     def canusebundle2(self):\\n         return not _forcebundle1(self)\\n \\n     @util.propertycache\\n     def remotebundle2caps(self):\\n         return bundle2.bundle2caps(self.remote)\\n \\n     def gettransaction(self):\\n         # deprecated; talk to trmanager directly\\n         return self.trmanager.transaction()\\n \\n class transactionmanager(util.transactional):\\n     \\\"\\\"\\\"An object to manage the life cycle of a transaction\\n \\n     It creates the transaction on demand and calls the appropriate hooks when\\n     closing the transaction.\\\"\\\"\\\"\\n     def __init__(self, repo, source, url):\\n         self.repo = repo\\n         self.source = source\\n         self.url = url\\n         self._tr = None\\n \\n     def transaction(self):\\n         \\\"\\\"\\\"Return an open transaction object, constructing if necessary\\\"\\\"\\\"\\n         if not self._tr:\\n             trname = '%s\\\\n%s' % (self.source, util.hidepassword(self.url))\\n             self._tr = self.repo.transaction(trname)\\n             self._tr.hookargs['source'] = self.source\\n             self._tr.hookargs['url'] = self.url\\n         return self._tr\\n \\n     def close(self):\\n         \\\"\\\"\\\"close transaction if created\\\"\\\"\\\"\\n         if self._tr is not None:\\n             self._tr.close()\\n \\n     def release(self):\\n         \\\"\\\"\\\"release transaction if created\\\"\\\"\\\"\\n         if self._tr is not None:\\n             self._tr.release()\\n \\n def listkeys(remote, namespace):\\n     with remote.commandexecutor() as e:\\n         return e.callcommand('listkeys', {'namespace': namespace}).result()\\n \\n def _fullpullbundle2(repo, pullop):\\n     # The server may send a partial reply, i.e. when inlining\\n     # pre-computed bundles. In that case, update the common\\n     # set based on the results and pull another bundle.\\n     #\\n     # There are two indicators that the process is finished:\\n     # - no changeset has been added, or\\n     # - all remote heads are known locally.\\n     # The head check must use the unfiltered view as obsoletion\\n     # markers can hide heads.\\n     unfi = repo.unfiltered()\\n     unficl = unfi.changelog\\n     def headsofdiff(h1, h2):\\n         \\\"\\\"\\\"Returns heads(h1 % h2)\\\"\\\"\\\"\\n         res = unfi.set('heads(%ln %% %ln)', h1, h2)\\n         return set(ctx.node() for ctx in res)\\n     def headsofunion(h1, h2):\\n         \\\"\\\"\\\"Returns heads((h1 + h2) - null)\\\"\\\"\\\"\\n         res = unfi.set('heads((%ln + %ln - null))', h1, h2)\\n         return set(ctx.node() for ctx in res)\\n     while True:\\n         old_heads = unficl.heads()\\n         clstart = len(unficl)\\n         _pullbundle2(pullop)\\n         if repository.NARROW_REQUIREMENT in repo.requirements:\\n             # XXX narrow clones filter the heads on the server side during\\n             # XXX getbundle and result in partial replies as well.\\n             # XXX Disable pull bundles in this case as band aid to avoid\\n             # XXX extra round trips.\\n             break\\n         if clstart == len(unficl):\\n             break\\n         if all(unficl.hasnode(n) for n in pullop.rheads):\\n             break\\n         new_heads = headsofdiff(unficl.heads(), old_heads)\\n         pullop.common = headsofunion(new_heads, pullop.common)\\n         pullop.rheads = set(pullop.rheads) - pullop.common\\n \\n def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,\\n          streamclonerequested=None, includepats=None, excludepats=None):\\n     \\\"\\\"\\\"Fetch repository data from a remote.\\n \\n     This is the main function used to retrieve data from a remote repository.\\n \\n     ``repo`` is the local repository to clone into.\\n     ``remote`` is a peer instance.\\n     ``heads`` is an iterable of revisions we want to pull. ``None`` (the\\n     default) means to pull everything from the remote.\\n     ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By\\n     default, all remote bookmarks are pulled.\\n     ``opargs`` are additional keyword arguments to pass to ``pulloperation``\\n     initialization.\\n     ``streamclonerequested`` is a boolean indicating whether a \\\"streaming\\n     clone\\\" is requested. A \\\"streaming clone\\\" is essentially a raw file copy\\n     of revlogs from the server. This only works when the local repository is\\n     empty. The default value of ``None`` means to respect the server\\n     configuration for preferring stream clones.\\n     ``includepats`` and ``excludepats`` define explicit file patterns to\\n     include and exclude in storage, respectively. If not defined, narrow\\n     patterns from the repo instance are used, if available.\\n \\n     Returns the ``pulloperation`` created for this pull.\\n     \\\"\\\"\\\"\\n     if opargs is None:\\n         opargs = {}\\n \\n     # We allow the narrow patterns to be passed in explicitly to provide more\\n     # flexibility for API consumers.\\n     if includepats or excludepats:\\n         includepats = includepats or set()\\n         excludepats = excludepats or set()\\n     else:\\n         includepats, excludepats = repo.narrowpats\\n \\n     narrowspec.validatepatterns(includepats)\\n     narrowspec.validatepatterns(excludepats)\\n \\n     pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,\\n                            streamclonerequested=streamclonerequested,\\n                            includepats=includepats, excludepats=excludepats,\\n                            **pycompat.strkwargs(opargs))\\n \\n     peerlocal = pullop.remote.local()\\n     if peerlocal:\\n         missing = set(peerlocal.requirements) - pullop.repo.supported\\n         if missing:\\n             msg = _(\\\"required features are not\\\"\\n                     \\\" supported in the destination:\\\"\\n                     \\\" %s\\\") % (', '.join(sorted(missing)))\\n             raise error.Abort(msg)\\n \\n     pullop.trmanager = transactionmanager(repo, 'pull', remote.url())\\n     with repo.wlock(), repo.lock(), pullop.trmanager:\\n-        # This should ideally be in _pullbundle2(). However, it needs to run\\n-        # before discovery to avoid extra work.\\n-        _maybeapplyclonebundle(pullop)\\n-        streamclone.maybeperformlegacystreamclone(pullop)\\n-        _pulldiscovery(pullop)\\n-        if pullop.canusebundle2:\\n-            _fullpullbundle2(repo, pullop)\\n-        _pullchangeset(pullop)\\n-        _pullphase(pullop)\\n-        _pullbookmarks(pullop)\\n-        _pullobsolete(pullop)\\n+        # Use the modern wire protocol, if available.\\n+        if remote.capable('exchangev2'):\\n+            exchangev2.pull(pullop)\\n+        else:\\n+            # This should ideally be in _pullbundle2(). However, it needs to run\\n+            # before discovery to avoid extra work.\\n+            _maybeapplyclonebundle(pullop)\\n+            streamclone.maybeperformlegacystreamclone(pullop)\\n+            _pulldiscovery(pullop)\\n+            if pullop.canusebundle2:\\n+                _fullpullbundle2(repo, pullop)\\n+            _pullchangeset(pullop)\\n+            _pullphase(pullop)\\n+            _pullbookmarks(pullop)\\n+            _pullobsolete(pullop)\\n \\n     # storing remotenames\\n     if repo.ui.configbool('experimental', 'remotenames'):\\n         logexchange.pullremotenames(repo, remote)\\n \\n     return pullop\\n \\n # list of steps to perform discovery before pull\\n pulldiscoveryorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n pulldiscoverymapping = {}\\n \\n def pulldiscovery(stepname):\\n     \\\"\\\"\\\"decorator for function performing discovery before pull\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated function will be added in order (this\\n     may matter).\\n \\n     You can only use this decorator for a new step, if you want to wrap a step\\n     from an extension, change the pulldiscovery dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in pulldiscoverymapping\\n         pulldiscoverymapping[stepname] = func\\n         pulldiscoveryorder.append(stepname)\\n         return func\\n     return dec\\n \\n def _pulldiscovery(pullop):\\n     \\\"\\\"\\\"Run all discovery steps\\\"\\\"\\\"\\n     for stepname in pulldiscoveryorder:\\n         step = pulldiscoverymapping[stepname]\\n         step(pullop)\\n \\n @pulldiscovery('b1:bookmarks')\\n def _pullbookmarkbundle1(pullop):\\n     \\\"\\\"\\\"fetch bookmark data in bundle1 case\\n \\n     If not using bundle2, we have to fetch bookmarks before changeset\\n     discovery to reduce the chance and impact of race conditions.\\\"\\\"\\\"\\n     if pullop.remotebookmarks is not None:\\n         return\\n     if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:\\n         # all known bundle2 servers now support listkeys, but lets be nice with\\n         # new implementation.\\n         return\\n     books = listkeys(pullop.remote, 'bookmarks')\\n     pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)\\n \\n \\n @pulldiscovery('changegroup')\\n def _pulldiscoverychangegroup(pullop):\\n     \\\"\\\"\\\"discovery phase for the pull\\n \\n     Current handle changeset discovery only, will change handle all discovery\\n     at some point.\\\"\\\"\\\"\\n     tmp = discovery.findcommonincoming(pullop.repo,\\n                                        pullop.remote,\\n                                        heads=pullop.heads,\\n                                        force=pullop.force)\\n     common, fetch, rheads = tmp\\n     nm = pullop.repo.unfiltered().changelog.nodemap\\n     if fetch and rheads:\\n         # If a remote heads is filtered locally, put in back in common.\\n         #\\n         # This is a hackish solution to catch most of \\\"common but locally\\n         # hidden situation\\\".  We do not performs discovery on unfiltered\\n         # repository because it end up doing a pathological amount of round\\n         # trip for w huge amount of changeset we do not care about.\\n         #\\n         # If a set of such \\\"common but filtered\\\" changeset exist on the server\\n         # but are not including a remote heads, we'll not be able to detect it,\\n         scommon = set(common)\\n         for n in rheads:\\n             if n in nm:\\n                 if n not in scommon:\\n                     common.append(n)\\n         if set(rheads).issubset(set(common)):\\n             fetch = []\\n     pullop.common = common\\n     pullop.fetch = fetch\\n     pullop.rheads = rheads\\n \\n def _pullbundle2(pullop):\\n     \\\"\\\"\\\"pull data using bundle2\\n \\n     For now, the only supported data are changegroup.\\\"\\\"\\\"\\n     kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}\\n \\n     # make ui easier to access\\n     ui = pullop.repo.ui\\n \\n     # At the moment we don't do stream clones over bundle2. If that is\\n     # implemented then here's where the check for that will go.\\n     streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]\\n \\n     # declare pull perimeters\\n     kwargs['common'] = pullop.common\\n     kwargs['heads'] = pullop.heads or pullop.rheads\\n \\n     if streaming:\\n         kwargs['cg'] = False\\n         kwargs['stream'] = True\\n         pullop.stepsdone.add('changegroup')\\n         pullop.stepsdone.add('phases')\\n \\n     else:\\n         # pulling changegroup\\n         pullop.stepsdone.add('changegroup')\\n \\n         kwargs['cg'] = pullop.fetch\\n \\n         legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')\\n         hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())\\n         if (not legacyphase and hasbinaryphase):\\n             kwargs['phases'] = True\\n             pullop.stepsdone.add('phases')\\n \\n         if 'listkeys' in pullop.remotebundle2caps:\\n             if 'phases' not in pullop.stepsdone:\\n                 kwargs['listkeys'] = ['phases']\\n \\n     bookmarksrequested = False\\n     legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')\\n     hasbinarybook = 'bookmarks' in pullop.remotebundle2caps\\n \\n     if pullop.remotebookmarks is not None:\\n         pullop.stepsdone.add('request-bookmarks')\\n \\n     if ('request-bookmarks' not in pullop.stepsdone\\n         and pullop.remotebookmarks is None\\n         and not legacybookmark and hasbinarybook):\\n         kwargs['bookmarks'] = True\\n         bookmarksrequested = True\\n \\n     if 'listkeys' in pullop.remotebundle2caps:\\n         if 'request-bookmarks' not in pullop.stepsdone:\\n             # make sure to always includes bookmark data when migrating\\n             # `hg incoming --bundle` to using this function.\\n             pullop.stepsdone.add('request-bookmarks')\\n             kwargs.setdefault('listkeys', []).append('bookmarks')\\n \\n     # If this is a full pull \\/ clone and the server supports the clone bundles\\n     # feature, tell the server whether we attempted a clone bundle. The\\n     # presence of this flag indicates the client supports clone bundles. This\\n     # will enable the server to treat clients that support clone bundles\\n     # differently from those that don't.\\n     if (pullop.remote.capable('clonebundles')\\n         and pullop.heads is None and list(pullop.common) == [nullid]):\\n         kwargs['cbattempted'] = pullop.clonebundleattempted\\n \\n     if streaming:\\n         pullop.repo.ui.status(_('streaming all changes\\\\n'))\\n     elif not pullop.fetch:\\n         pullop.repo.ui.status(_(\\\"no changes found\\\\n\\\"))\\n         pullop.cgresult = 0\\n     else:\\n         if pullop.heads is None and list(pullop.common) == [nullid]:\\n             pullop.repo.ui.status(_(\\\"requesting all changes\\\\n\\\"))\\n     if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):\\n         remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)\\n         if obsolete.commonversion(remoteversions) is not None:\\n             kwargs['obsmarkers'] = True\\n             pullop.stepsdone.add('obsmarkers')\\n     _pullbundle2extraprepare(pullop, kwargs)\\n \\n     with pullop.remote.commandexecutor() as e:\\n         args = dict(kwargs)\\n         args['source'] = 'pull'\\n         bundle = e.callcommand('getbundle', args).result()\\n \\n         try:\\n             op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,\\n                                          source='pull')\\n             op.modes['bookmarks'] = 'records'\\n             bundle2.processbundle(pullop.repo, bundle, op=op)\\n         except bundle2.AbortFromPart as exc:\\n             pullop.repo.ui.status(_('remote: abort: %s\\\\n') % exc)\\n             raise error.Abort(_('pull failed on remote'), hint=exc.hint)\\n         except error.BundleValueError as exc:\\n             raise error.Abort(_('missing support for %s') % exc)\\n \\n     if pullop.fetch:\\n         pullop.cgresult = bundle2.combinechangegroupresults(op)\\n \\n     # processing phases change\\n     for namespace, value in op.records['listkeys']:\\n         if namespace == 'phases':\\n             _pullapplyphases(pullop, value)\\n \\n     # processing bookmark update\\n     if bookmarksrequested:\\n         books = {}\\n         for record in op.records['bookmarks']:\\n             books[record['bookmark']] = record[\\\"node\\\"]\\n         pullop.remotebookmarks = books\\n     else:\\n         for namespace, value in op.records['listkeys']:\\n             if namespace == 'bookmarks':\\n                 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)\\n \\n     # bookmark data were either already there or pulled in the bundle\\n     if pullop.remotebookmarks is not None:\\n         _pullbookmarks(pullop)\\n \\n def _pullbundle2extraprepare(pullop, kwargs):\\n     \\\"\\\"\\\"hook function so that extensions can extend the getbundle call\\\"\\\"\\\"\\n \\n def _pullchangeset(pullop):\\n     \\\"\\\"\\\"pull changeset from unbundle into the local repo\\\"\\\"\\\"\\n     # We delay the open of the transaction as late as possible so we\\n     # don't open transaction for nothing or you break future useful\\n     # rollback call\\n     if 'changegroup' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('changegroup')\\n     if not pullop.fetch:\\n         pullop.repo.ui.status(_(\\\"no changes found\\\\n\\\"))\\n         pullop.cgresult = 0\\n         return\\n     tr = pullop.gettransaction()\\n     if pullop.heads is None and list(pullop.common) == [nullid]:\\n         pullop.repo.ui.status(_(\\\"requesting all changes\\\\n\\\"))\\n     elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):\\n         # issue1320, avoid a race if remote changed after discovery\\n         pullop.heads = pullop.rheads\\n \\n     if pullop.remote.capable('getbundle'):\\n         # TODO: get bundlecaps from remote\\n         cg = pullop.remote.getbundle('pull', common=pullop.common,\\n                                      heads=pullop.heads or pullop.rheads)\\n     elif pullop.heads is None:\\n         with pullop.remote.commandexecutor() as e:\\n             cg = e.callcommand('changegroup', {\\n                 'nodes': pullop.fetch,\\n                 'source': 'pull',\\n             }).result()\\n \\n     elif not pullop.remote.capable('changegroupsubset'):\\n         raise error.Abort(_(\\\"partial pull cannot be done because \\\"\\n                            \\\"other repository doesn't support \\\"\\n                            \\\"changegroupsubset.\\\"))\\n     else:\\n         with pullop.remote.commandexecutor() as e:\\n             cg = e.callcommand('changegroupsubset', {\\n                 'bases': pullop.fetch,\\n                 'heads': pullop.heads,\\n                 'source': 'pull',\\n             }).result()\\n \\n     bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',\\n                                    pullop.remote.url())\\n     pullop.cgresult = bundle2.combinechangegroupresults(bundleop)\\n \\n def _pullphase(pullop):\\n     # Get remote phases data from remote\\n     if 'phases' in pullop.stepsdone:\\n         return\\n     remotephases = listkeys(pullop.remote, 'phases')\\n     _pullapplyphases(pullop, remotephases)\\n \\n def _pullapplyphases(pullop, remotephases):\\n     \\\"\\\"\\\"apply phase movement from observed remote state\\\"\\\"\\\"\\n     if 'phases' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('phases')\\n     publishing = bool(remotephases.get('publishing', False))\\n     if remotephases and not publishing:\\n         # remote is new and non-publishing\\n         pheads, _dr = phases.analyzeremotephases(pullop.repo,\\n                                                  pullop.pulledsubset,\\n                                                  remotephases)\\n         dheads = pullop.pulledsubset\\n     else:\\n         # Remote is old or publishing all common changesets\\n         # should be seen as public\\n         pheads = pullop.pulledsubset\\n         dheads = []\\n     unfi = pullop.repo.unfiltered()\\n     phase = unfi._phasecache.phase\\n     rev = unfi.changelog.nodemap.get\\n     public = phases.public\\n     draft = phases.draft\\n \\n     # exclude changesets already public locally and update the others\\n     pheads = [pn for pn in pheads if phase(unfi, rev(pn)) \\u003e public]\\n     if pheads:\\n         tr = pullop.gettransaction()\\n         phases.advanceboundary(pullop.repo, tr, public, pheads)\\n \\n     # exclude changesets already draft locally and update the others\\n     dheads = [pn for pn in dheads if phase(unfi, rev(pn)) \\u003e draft]\\n     if dheads:\\n         tr = pullop.gettransaction()\\n         phases.advanceboundary(pullop.repo, tr, draft, dheads)\\n \\n def _pullbookmarks(pullop):\\n     \\\"\\\"\\\"process the remote bookmark information to update the local one\\\"\\\"\\\"\\n     if 'bookmarks' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('bookmarks')\\n     repo = pullop.repo\\n     remotebookmarks = pullop.remotebookmarks\\n     bookmod.updatefromremote(repo.ui, repo, remotebookmarks,\\n                              pullop.remote.url(),\\n                              pullop.gettransaction,\\n                              explicit=pullop.explicitbookmarks)\\n \\n def _pullobsolete(pullop):\\n     \\\"\\\"\\\"utility function to pull obsolete markers from a remote\\n \\n     The `gettransaction` is function that return the pull transaction, creating\\n     one if necessary. We return the transaction to inform the calling code that\\n     a new transaction have been created (when applicable).\\n \\n     Exists mostly to allow overriding for experimentation purpose\\\"\\\"\\\"\\n     if 'obsmarkers' in pullop.stepsdone:\\n         return\\n     pullop.stepsdone.add('obsmarkers')\\n     tr = None\\n     if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):\\n         pullop.repo.ui.debug('fetching remote obsolete markers\\\\n')\\n         remoteobs = listkeys(pullop.remote, 'obsolete')\\n         if 'dump0' in remoteobs:\\n             tr = pullop.gettransaction()\\n             markers = []\\n             for key in sorted(remoteobs, reverse=True):\\n                 if key.startswith('dump'):\\n                     data = util.b85decode(remoteobs[key])\\n                     version, newmarks = obsolete._readmarkers(data)\\n                     markers += newmarks\\n             if markers:\\n                 pullop.repo.obsstore.add(tr, markers)\\n             pullop.repo.invalidatevolatilesets()\\n     return tr\\n \\n def applynarrowacl(repo, kwargs):\\n     \\\"\\\"\\\"Apply narrow fetch access control.\\n \\n     This massages the named arguments for getbundle wire protocol commands\\n     so requested data is filtered through access control rules.\\n     \\\"\\\"\\\"\\n     ui = repo.ui\\n     # TODO this assumes existence of HTTP and is a layering violation.\\n     username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())\\n     user_includes = ui.configlist(\\n         _NARROWACL_SECTION, username + '.includes',\\n         ui.configlist(_NARROWACL_SECTION, 'default.includes'))\\n     user_excludes = ui.configlist(\\n         _NARROWACL_SECTION, username + '.excludes',\\n         ui.configlist(_NARROWACL_SECTION, 'default.excludes'))\\n     if not user_includes:\\n         raise error.Abort(_(\\\"{} configuration for user {} is empty\\\")\\n                           .format(_NARROWACL_SECTION, username))\\n \\n     user_includes = [\\n         'path:.' if p == '*' else 'path:' + p for p in user_includes]\\n     user_excludes = [\\n         'path:.' if p == '*' else 'path:' + p for p in user_excludes]\\n \\n     req_includes = set(kwargs.get(r'includepats', []))\\n     req_excludes = set(kwargs.get(r'excludepats', []))\\n \\n     req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(\\n         req_includes, req_excludes, user_includes, user_excludes)\\n \\n     if invalid_includes:\\n         raise error.Abort(\\n             _(\\\"The following includes are not accessible for {}: {}\\\")\\n             .format(username, invalid_includes))\\n \\n     new_args = {}\\n     new_args.update(kwargs)\\n     new_args[r'narrow'] = True\\n     new_args[r'includepats'] = req_includes\\n     if req_excludes:\\n         new_args[r'excludepats'] = req_excludes\\n \\n     return new_args\\n \\n def _computeellipsis(repo, common, heads, known, match, depth=None):\\n     \\\"\\\"\\\"Compute the shape of a narrowed DAG.\\n \\n     Args:\\n       repo: The repository we're transferring.\\n       common: The roots of the DAG range we're transferring.\\n               May be just [nullid], which means all ancestors of heads.\\n       heads: The heads of the DAG range we're transferring.\\n       match: The narrowmatcher that allows us to identify relevant changes.\\n       depth: If not None, only consider nodes to be full nodes if they are at\\n              most depth changesets away from one of heads.\\n \\n     Returns:\\n       A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:\\n \\n         visitnodes: The list of nodes (either full or ellipsis) which\\n                     need to be sent to the client.\\n         relevant_nodes: The set of changelog nodes which change a file inside\\n                  the narrowspec. The client needs these as non-ellipsis nodes.\\n         ellipsisroots: A dict of {rev: parents} that is used in\\n                        narrowchangegroup to produce ellipsis nodes with the\\n                        correct parents.\\n     \\\"\\\"\\\"\\n     cl = repo.changelog\\n     mfl = repo.manifestlog\\n \\n     clrev = cl.rev\\n \\n     commonrevs = {clrev(n) for n in common} | {nullrev}\\n     headsrevs = {clrev(n) for n in heads}\\n \\n     if depth:\\n         revdepth = {h: 0 for h in headsrevs}\\n \\n     ellipsisheads = collections.defaultdict(set)\\n     ellipsisroots = collections.defaultdict(set)\\n \\n     def addroot(head, curchange):\\n         \\\"\\\"\\\"Add a root to an ellipsis head, splitting heads with 3 roots.\\\"\\\"\\\"\\n         ellipsisroots[head].add(curchange)\\n         # Recursively split ellipsis heads with 3 roots by finding the\\n         # roots' youngest common descendant which is an elided merge commit.\\n         # That descendant takes 2 of the 3 roots as its own, and becomes a\\n         # root of the head.\\n         while len(ellipsisroots[head]) \\u003e 2:\\n             child, roots = splithead(head)\\n             splitroots(head, child, roots)\\n             head = child  # Recurse in case we just added a 3rd root\\n \\n     def splitroots(head, child, roots):\\n         ellipsisroots[head].difference_update(roots)\\n         ellipsisroots[head].add(child)\\n         ellipsisroots[child].update(roots)\\n         ellipsisroots[child].discard(child)\\n \\n     def splithead(head):\\n         r1, r2, r3 = sorted(ellipsisroots[head])\\n         for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):\\n             mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',\\n                             nr1, head, nr2, head)\\n             for j in mid:\\n                 if j == nr2:\\n                     return nr2, (nr1, nr2)\\n                 if j not in ellipsisroots or len(ellipsisroots[j]) \\u003c 2:\\n                     return j, (nr1, nr2)\\n         raise error.Abort(_('Failed to split up ellipsis node! head: %d, '\\n                             'roots: %d %d %d') % (head, r1, r2, r3))\\n \\n     missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))\\n     visit = reversed(missing)\\n     relevant_nodes = set()\\n     visitnodes = [cl.node(m) for m in missing]\\n     required = set(headsrevs) | known\\n     for rev in visit:\\n         clrev = cl.changelogrevision(rev)\\n         ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]\\n         if depth is not None:\\n             curdepth = revdepth[rev]\\n             for p in ps:\\n                 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))\\n         needed = False\\n         shallow_enough = depth is None or revdepth[rev] \\u003c= depth\\n         if shallow_enough:\\n             curmf = mfl[clrev.manifest].read()\\n             if ps:\\n                 # We choose to not trust the changed files list in\\n                 # changesets because it's not always correct. TODO: could\\n                 # we trust it for the non-merge case?\\n                 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()\\n                 needed = bool(curmf.diff(p1mf, match))\\n                 if not needed and len(ps) \\u003e 1:\\n                     # For merge changes, the list of changed files is not\\n                     # helpful, since we need to emit the merge if a file\\n                     # in the narrow spec has changed on either side of the\\n                     # merge. As a result, we do a manifest diff to check.\\n                     p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()\\n                     needed = bool(curmf.diff(p2mf, match))\\n             else:\\n                 # For a root node, we need to include the node if any\\n                 # files in the node match the narrowspec.\\n                 needed = any(curmf.walk(match))\\n \\n         if needed:\\n             for head in ellipsisheads[rev]:\\n                 addroot(head, rev)\\n             for p in ps:\\n                 required.add(p)\\n             relevant_nodes.add(cl.node(rev))\\n         else:\\n             if not ps:\\n                 ps = [nullrev]\\n             if rev in required:\\n                 for head in ellipsisheads[rev]:\\n                     addroot(head, rev)\\n                 for p in ps:\\n                     ellipsisheads[p].add(rev)\\n             else:\\n                 for p in ps:\\n                     ellipsisheads[p] |= ellipsisheads[rev]\\n \\n     # add common changesets as roots of their reachable ellipsis heads\\n     for c in commonrevs:\\n         for head in ellipsisheads[c]:\\n             addroot(head, c)\\n     return visitnodes, relevant_nodes, ellipsisroots\\n \\n def caps20to10(repo, role):\\n     \\\"\\\"\\\"return a set with appropriate options to use bundle20 during getbundle\\\"\\\"\\\"\\n     caps = {'HG20'}\\n     capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))\\n     caps.add('bundle2=' + urlreq.quote(capsblob))\\n     return caps\\n \\n # List of names of steps to perform for a bundle2 for getbundle, order matters.\\n getbundle2partsorder = []\\n \\n # Mapping between step name and function\\n #\\n # This exists to help extensions wrap steps if necessary\\n getbundle2partsmapping = {}\\n \\n def getbundle2partsgenerator(stepname, idx=None):\\n     \\\"\\\"\\\"decorator for function generating bundle2 part for getbundle\\n \\n     The function is added to the step -\\u003e function mapping and appended to the\\n     list of steps.  Beware that decorated functions will be added in order\\n     (this may matter).\\n \\n     You can only use this decorator for new steps, if you want to wrap a step\\n     from an extension, attack the getbundle2partsmapping dictionary directly.\\\"\\\"\\\"\\n     def dec(func):\\n         assert stepname not in getbundle2partsmapping\\n         getbundle2partsmapping[stepname] = func\\n         if idx is None:\\n             getbundle2partsorder.append(stepname)\\n         else:\\n             getbundle2partsorder.insert(idx, stepname)\\n         return func\\n     return dec\\n \\n def bundle2requested(bundlecaps):\\n     if bundlecaps is not None:\\n         return any(cap.startswith('HG2') for cap in bundlecaps)\\n     return False\\n \\n def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,\\n                     **kwargs):\\n     \\\"\\\"\\\"Return chunks constituting a bundle's raw data.\\n \\n     Could be a bundle HG10 or a bundle HG20 depending on bundlecaps\\n     passed.\\n \\n     Returns a 2-tuple of a dict with metadata about the generated bundle\\n     and an iterator over raw chunks (of varying sizes).\\n     \\\"\\\"\\\"\\n     kwargs = pycompat.byteskwargs(kwargs)\\n     info = {}\\n     usebundle2 = bundle2requested(bundlecaps)\\n     # bundle10 case\\n     if not usebundle2:\\n         if bundlecaps and not kwargs.get('cg', True):\\n             raise ValueError(_('request for bundle10 must include changegroup'))\\n \\n         if kwargs:\\n             raise ValueError(_('unsupported getbundle arguments: %s')\\n                              % ', '.join(sorted(kwargs.keys())))\\n         outgoing = _computeoutgoing(repo, heads, common)\\n         info['bundleversion'] = 1\\n         return info, changegroup.makestream(repo, outgoing, '01', source,\\n                                             bundlecaps=bundlecaps)\\n \\n     # bundle20 case\\n     info['bundleversion'] = 2\\n     b2caps = {}\\n     for bcaps in bundlecaps:\\n         if bcaps.startswith('bundle2='):\\n             blob = urlreq.unquote(bcaps[len('bundle2='):])\\n             b2caps.update(bundle2.decodecaps(blob))\\n     bundler = bundle2.bundle20(repo.ui, b2caps)\\n \\n     kwargs['heads'] = heads\\n     kwargs['common'] = common\\n \\n     for name in getbundle2partsorder:\\n         func = getbundle2partsmapping[name]\\n         func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,\\n              **pycompat.strkwargs(kwargs))\\n \\n     info['prefercompressed'] = bundler.prefercompressed\\n \\n     return info, bundler.getchunks()\\n \\n @getbundle2partsgenerator('stream2')\\n def _getbundlestream2(bundler, repo, *args, **kwargs):\\n     return bundle2.addpartbundlestream2(bundler, repo, **kwargs)\\n \\n @getbundle2partsgenerator('changegroup')\\n def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,\\n                               b2caps=None, heads=None, common=None, **kwargs):\\n     \\\"\\\"\\\"add a changegroup part to the requested bundle\\\"\\\"\\\"\\n     if not kwargs.get(r'cg', True):\\n         return\\n \\n     version = '01'\\n     cgversions = b2caps.get('changegroup')\\n     if cgversions:  # 3.1 and 3.2 ship with an empty value\\n         cgversions = [v for v in cgversions\\n                       if v in changegroup.supportedoutgoingversions(repo)]\\n         if not cgversions:\\n             raise ValueError(_('no common changegroup version'))\\n         version = max(cgversions)\\n \\n     outgoing = _computeoutgoing(repo, heads, common)\\n     if not outgoing.missing:\\n         return\\n \\n     if kwargs.get(r'narrow', False):\\n         include = sorted(filter(bool, kwargs.get(r'includepats', [])))\\n         exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))\\n         filematcher = narrowspec.match(repo.root, include=include,\\n                                        exclude=exclude)\\n     else:\\n         filematcher = None\\n \\n     cgstream = changegroup.makestream(repo, outgoing, version, source,\\n                                       bundlecaps=bundlecaps,\\n                                       filematcher=filematcher)\\n \\n     part = bundler.newpart('changegroup', data=cgstream)\\n     if cgversions:\\n         part.addparam('version', version)\\n \\n     part.addparam('nbchanges', '%d' % len(outgoing.missing),\\n                   mandatory=False)\\n \\n     if 'treemanifest' in repo.requirements:\\n         part.addparam('treemanifest', '1')\\n \\n     if kwargs.get(r'narrow', False) and (include or exclude):\\n         narrowspecpart = bundler.newpart('narrow:spec')\\n         if include:\\n             narrowspecpart.addparam(\\n                 'include', '\\\\n'.join(include), mandatory=True)\\n         if exclude:\\n             narrowspecpart.addparam(\\n                 'exclude', '\\\\n'.join(exclude), mandatory=True)\\n \\n @getbundle2partsgenerator('bookmarks')\\n def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,\\n                               b2caps=None, **kwargs):\\n     \\\"\\\"\\\"add a bookmark part to the requested bundle\\\"\\\"\\\"\\n     if not kwargs.get(r'bookmarks', False):\\n         return\\n     if 'bookmarks' not in b2caps:\\n         raise ValueError(_('no common bookmarks exchange method'))\\n     books  = bookmod.listbinbookmarks(repo)\\n     data = bookmod.binaryencode(books)\\n     if data:\\n         bundler.newpart('bookmarks', data=data)\\n \\n @getbundle2partsgenerator('listkeys')\\n def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,\\n                             b2caps=None, **kwargs):\\n     \\\"\\\"\\\"add parts containing listkeys namespaces to the requested bundle\\\"\\\"\\\"\\n     listkeys = kwargs.get(r'listkeys', ())\\n     for namespace in listkeys:\\n         part = bundler.newpart('listkeys')\\n         part.addparam('namespace', namespace)\\n         keys = repo.listkeys(namespace).items()\\n         part.data = pushkey.encodekeys(keys)\\n \\n @getbundle2partsgenerator('obsmarkers')\\n def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,\\n                             b2caps=None, heads=None, **kwargs):\\n     \\\"\\\"\\\"add an obsolescence markers part to the requested bundle\\\"\\\"\\\"\\n     if kwargs.get(r'obsmarkers', False):\\n         if heads is None:\\n             heads = repo.heads()\\n         subset = [c.node() for c in repo.set('::%ln', heads)]\\n         markers = repo.obsstore.relevantmarkers(subset)\\n         markers = sorted(markers)\\n         bundle2.buildobsmarkerspart(bundler, markers)\\n \\n @getbundle2partsgenerator('phases')\\n def _getbundlephasespart(bundler, repo, source, bundlecaps=None,\\n                             b2caps=None, heads=None, **kwargs):\\n     \\\"\\\"\\\"add phase heads part to the requested bundle\\\"\\\"\\\"\\n     if kwargs.get(r'phases', False):\\n         if not 'heads' in b2caps.get('phases'):\\n             raise ValueError(_('no common phases exchange method'))\\n         if heads is None:\\n             heads = repo.heads()\\n \\n         headsbyphase = collections.defaultdict(set)\\n         if repo.publishing():\\n             headsbyphase[phases.public] = heads\\n         else:\\n             # find the appropriate heads to move\\n \\n             phase = repo._phasecache.phase\\n             node = repo.changelog.node\\n             rev = repo.changelog.rev\\n             for h in heads:\\n                 headsbyphase[phase(repo, rev(h))].add(h)\\n             seenphases = list(headsbyphase.keys())\\n \\n             # We do not handle anything but public and draft phase for now)\\n             if seenphases:\\n                 assert max(seenphases) \\u003c= phases.draft\\n \\n             # if client is pulling non-public changesets, we need to find\\n             # intermediate public heads.\\n             draftheads = headsbyphase.get(phases.draft, set())\\n             if draftheads:\\n                 publicheads = headsbyphase.get(phases.public, set())\\n \\n                 revset = 'heads(only(%ln, %ln) and public())'\\n                 extraheads = repo.revs(revset, draftheads, publicheads)\\n                 for r in extraheads:\\n                     headsbyphase[phases.public].add(node(r))\\n \\n         # transform data in a format used by the encoding function\\n         phasemapping = []\\n         for phase in phases.allphases:\\n             phasemapping.append(sorted(headsbyphase[phase]))\\n \\n         # generate the actual part\\n         phasedata = phases.binaryencode(phasemapping)\\n         bundler.newpart('phase-heads', data=phasedata)\\n \\n @getbundle2partsgenerator('hgtagsfnodes')\\n def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,\\n                          b2caps=None, heads=None, common=None,\\n                          **kwargs):\\n     \\\"\\\"\\\"Transfer the .hgtags filenodes mapping.\\n \\n     Only values for heads in this bundle will be transferred.\\n \\n     The part data consists of pairs of 20 byte changeset node and .hgtags\\n     filenodes raw values.\\n     \\\"\\\"\\\"\\n     # Don't send unless:\\n     # - changeset are being exchanged,\\n     # - the client supports it.\\n     if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):\\n         return\\n \\n     outgoing = _computeoutgoing(repo, heads, common)\\n     bundle2.addparttagsfnodescache(repo, bundler, outgoing)\\n \\n @getbundle2partsgenerator('cache:rev-branch-cache')\\n def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,\\n                              b2caps=None, heads=None, common=None,\\n                              **kwargs):\\n     \\\"\\\"\\\"Transfer the rev-branch-cache mapping\\n \\n     The payload is a series of data related to each branch\\n \\n     1) branch name length\\n     2) number of open heads\\n     3) number of closed heads\\n     4) open heads nodes\\n     5) closed heads nodes\\n     \\\"\\\"\\\"\\n     # Don't send unless:\\n     # - changeset are being exchanged,\\n     # - the client supports it.\\n     # - narrow bundle isn't in play (not currently compatible).\\n     if (not kwargs.get(r'cg', True)\\n         or 'rev-branch-cache' not in b2caps\\n         or kwargs.get(r'narrow', False)\\n         or repo.ui.has_section(_NARROWACL_SECTION)):\\n         return\\n \\n     outgoing = _computeoutgoing(repo, heads, common)\\n     bundle2.addpartrevbranchcache(repo, bundler, outgoing)\\n \\n def check_heads(repo, their_heads, context):\\n     \\\"\\\"\\\"check if the heads of a repo have been modified\\n \\n     Used by peer for unbundling.\\n     \\\"\\\"\\\"\\n     heads = repo.heads()\\n     heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()\\n     if not (their_heads == ['force'] or their_heads == heads or\\n             their_heads == ['hashed', heads_hash]):\\n         # someone else committed\\/pushed\\/unbundled while we\\n         # were transferring data\\n         raise error.PushRaced('repository changed while %s - '\\n                               'please try again' % context)\\n \\n def unbundle(repo, cg, heads, source, url):\\n     \\\"\\\"\\\"Apply a bundle to a repo.\\n \\n     this function makes sure the repo is locked during the application and have\\n     mechanism to check that no push race occurred between the creation of the\\n     bundle and its application.\\n \\n     If the push was raced as PushRaced exception is raised.\\\"\\\"\\\"\\n     r = 0\\n     # need a transaction when processing a bundle2 stream\\n     # [wlock, lock, tr] - needs to be an array so nested functions can modify it\\n     lockandtr = [None, None, None]\\n     recordout = None\\n     # quick fix for output mismatch with bundle2 in 3.4\\n     captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')\\n     if url.startswith('remote:http:') or url.startswith('remote:https:'):\\n         captureoutput = True\\n     try:\\n         # note: outside bundle1, 'heads' is expected to be empty and this\\n         # 'check_heads' call wil be a no-op\\n         check_heads(repo, heads, 'uploading changes')\\n         # push can proceed\\n         if not isinstance(cg, bundle2.unbundle20):\\n             # legacy case: bundle1 (changegroup 01)\\n             txnname = \\\"\\\\n\\\".join([source, util.hidepassword(url)])\\n             with repo.lock(), repo.transaction(txnname) as tr:\\n                 op = bundle2.applybundle(repo, cg, tr, source, url)\\n                 r = bundle2.combinechangegroupresults(op)\\n         else:\\n             r = None\\n             try:\\n                 def gettransaction():\\n                     if not lockandtr[2]:\\n                         lockandtr[0] = repo.wlock()\\n                         lockandtr[1] = repo.lock()\\n                         lockandtr[2] = repo.transaction(source)\\n                         lockandtr[2].hookargs['source'] = source\\n                         lockandtr[2].hookargs['url'] = url\\n                         lockandtr[2].hookargs['bundle2'] = '1'\\n                     return lockandtr[2]\\n \\n                 # Do greedy locking by default until we're satisfied with lazy\\n                 # locking.\\n                 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):\\n                     gettransaction()\\n \\n                 op = bundle2.bundleoperation(repo, gettransaction,\\n                                              captureoutput=captureoutput,\\n                                              source='push')\\n                 try:\\n                     op = bundle2.processbundle(repo, cg, op=op)\\n                 finally:\\n                     r = op.reply\\n                     if captureoutput and r is not None:\\n                         repo.ui.pushbuffer(error=True, subproc=True)\\n                         def recordout(output):\\n                             r.newpart('output', data=output, mandatory=False)\\n                 if lockandtr[2] is not None:\\n                     lockandtr[2].close()\\n             except BaseException as exc:\\n                 exc.duringunbundle2 = True\\n                 if captureoutput and r is not None:\\n                     parts = exc._bundle2salvagedoutput = r.salvageoutput()\\n                     def recordout(output):\\n                         part = bundle2.bundlepart('output', data=output,\\n                                                   mandatory=False)\\n                         parts.append(part)\\n                 raise\\n     finally:\\n         lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])\\n         if recordout is not None:\\n             recordout(repo.ui.popbuffer())\\n     return r\\n \\n def _maybeapplyclonebundle(pullop):\\n     \\\"\\\"\\\"Apply a clone bundle from a remote, if possible.\\\"\\\"\\\"\\n \\n     repo = pullop.repo\\n     remote = pullop.remote\\n \\n     if not repo.ui.configbool('ui', 'clonebundles'):\\n         return\\n \\n     # Only run if local repo is empty.\\n     if len(repo):\\n         return\\n \\n     if pullop.heads:\\n         return\\n \\n     if not remote.capable('clonebundles'):\\n         return\\n \\n     with remote.commandexecutor() as e:\\n         res = e.callcommand('clonebundles', {}).result()\\n \\n     # If we call the wire protocol command, that's good enough to record the\\n     # attempt.\\n     pullop.clonebundleattempted = True\\n \\n     entries = parseclonebundlesmanifest(repo, res)\\n     if not entries:\\n         repo.ui.note(_('no clone bundles available on remote; '\\n                        'falling back to regular clone\\\\n'))\\n         return\\n \\n     entries = filterclonebundleentries(\\n         repo, entries, streamclonerequested=pullop.streamclonerequested)\\n \\n     if not entries:\\n         # There is a thundering herd concern here. However, if a server\\n         # operator doesn't advertise bundles appropriate for its clients,\\n         # they deserve what's coming. Furthermore, from a client's\\n         # perspective, no automatic fallback would mean not being able to\\n         # clone!\\n         repo.ui.warn(_('no compatible clone bundles available on server; '\\n                        'falling back to regular clone\\\\n'))\\n         repo.ui.warn(_('(you may want to report this to the server '\\n                        'operator)\\\\n'))\\n         return\\n \\n     entries = sortclonebundleentries(repo.ui, entries)\\n \\n     url = entries[0]['URL']\\n     repo.ui.status(_('applying clone bundle from %s\\\\n') % url)\\n     if trypullbundlefromurl(repo.ui, repo, url):\\n         repo.ui.status(_('finished applying clone bundle\\\\n'))\\n     # Bundle failed.\\n     #\\n     # We abort by default to avoid the thundering herd of\\n     # clients flooding a server that was expecting expensive\\n     # clone load to be offloaded.\\n     elif repo.ui.configbool('ui', 'clonebundlefallback'):\\n         repo.ui.warn(_('falling back to normal clone\\\\n'))\\n     else:\\n         raise error.Abort(_('error applying bundle'),\\n                           hint=_('if this error persists, consider contacting '\\n                                  'the server operator or disable clone '\\n                                  'bundles via '\\n                                  '\\\"--config ui.clonebundles=false\\\"'))\\n \\n def parseclonebundlesmanifest(repo, s):\\n     \\\"\\\"\\\"Parses the raw text of a clone bundles manifest.\\n \\n     Returns a list of dicts. The dicts have a ``URL`` key corresponding\\n     to the URL and other keys are the attributes for the entry.\\n     \\\"\\\"\\\"\\n     m = []\\n     for line in s.splitlines():\\n         fields = line.split()\\n         if not fields:\\n             continue\\n         attrs = {'URL': fields[0]}\\n         for rawattr in fields[1:]:\\n             key, value = rawattr.split('=', 1)\\n             key = urlreq.unquote(key)\\n             value = urlreq.unquote(value)\\n             attrs[key] = value\\n \\n             # Parse BUNDLESPEC into components. This makes client-side\\n             # preferences easier to specify since you can prefer a single\\n             # component of the BUNDLESPEC.\\n             if key == 'BUNDLESPEC':\\n                 try:\\n                     bundlespec = parsebundlespec(repo, value)\\n                     attrs['COMPRESSION'] = bundlespec.compression\\n                     attrs['VERSION'] = bundlespec.version\\n                 except error.InvalidBundleSpecification:\\n                     pass\\n                 except error.UnsupportedBundleSpecification:\\n                     pass\\n \\n         m.append(attrs)\\n \\n     return m\\n \\n def isstreamclonespec(bundlespec):\\n     # Stream clone v1\\n     if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):\\n         return True\\n \\n     # Stream clone v2\\n     if (bundlespec.wirecompression == 'UN' and \\\\\\n         bundlespec.wireversion == '02' and \\\\\\n         bundlespec.contentopts.get('streamv2')):\\n         return True\\n \\n     return False\\n \\n def filterclonebundleentries(repo, entries, streamclonerequested=False):\\n     \\\"\\\"\\\"Remove incompatible clone bundle manifest entries.\\n \\n     Accepts a list of entries parsed with ``parseclonebundlesmanifest``\\n     and returns a new list consisting of only the entries that this client\\n     should be able to apply.\\n \\n     There is no guarantee we'll be able to apply all returned entries because\\n     the metadata we use to filter on may be missing or wrong.\\n     \\\"\\\"\\\"\\n     newentries = []\\n     for entry in entries:\\n         spec = entry.get('BUNDLESPEC')\\n         if spec:\\n             try:\\n                 bundlespec = parsebundlespec(repo, spec, strict=True)\\n \\n                 # If a stream clone was requested, filter out non-streamclone\\n                 # entries.\\n                 if streamclonerequested and not isstreamclonespec(bundlespec):\\n                     repo.ui.debug('filtering %s because not a stream clone\\\\n' %\\n                                   entry['URL'])\\n                     continue\\n \\n             except error.InvalidBundleSpecification as e:\\n                 repo.ui.debug(stringutil.forcebytestr(e) + '\\\\n')\\n                 continue\\n             except error.UnsupportedBundleSpecification as e:\\n                 repo.ui.debug('filtering %s because unsupported bundle '\\n                               'spec: %s\\\\n' % (\\n                                   entry['URL'], stringutil.forcebytestr(e)))\\n                 continue\\n         # If we don't have a spec and requested a stream clone, we don't know\\n         # what the entry is so don't attempt to apply it.\\n         elif streamclonerequested:\\n             repo.ui.debug('filtering %s because cannot determine if a stream '\\n                           'clone bundle\\\\n' % entry['URL'])\\n             continue\\n \\n         if 'REQUIRESNI' in entry and not sslutil.hassni:\\n             repo.ui.debug('filtering %s because SNI not supported\\\\n' %\\n                           entry['URL'])\\n             continue\\n \\n         newentries.append(entry)\\n \\n     return newentries\\n \\n class clonebundleentry(object):\\n     \\\"\\\"\\\"Represents an item in a clone bundles manifest.\\n \\n     This rich class is needed to support sorting since sorted() in Python 3\\n     doesn't support ``cmp`` and our comparison is complex enough that ``key=``\\n     won't work.\\n     \\\"\\\"\\\"\\n \\n     def __init__(self, value, prefers):\\n         self.value = value\\n         self.prefers = prefers\\n \\n     def _cmp(self, other):\\n         for prefkey, prefvalue in self.prefers:\\n             avalue = self.value.get(prefkey)\\n             bvalue = other.value.get(prefkey)\\n \\n             # Special case for b missing attribute and a matches exactly.\\n             if avalue is not None and bvalue is None and avalue == prefvalue:\\n                 return -1\\n \\n             # Special case for a missing attribute and b matches exactly.\\n             if bvalue is not None and avalue is None and bvalue == prefvalue:\\n                 return 1\\n \\n             # We can't compare unless attribute present on both.\\n             if avalue is None or bvalue is None:\\n                 continue\\n \\n             # Same values should fall back to next attribute.\\n             if avalue == bvalue:\\n                 continue\\n \\n             # Exact matches come first.\\n             if avalue == prefvalue:\\n                 return -1\\n             if bvalue == prefvalue:\\n                 return 1\\n \\n             # Fall back to next attribute.\\n             continue\\n \\n         # If we got here we couldn't sort by attributes and prefers. Fall\\n         # back to index order.\\n         return 0\\n \\n     def __lt__(self, other):\\n         return self._cmp(other) \\u003c 0\\n \\n     def __gt__(self, other):\\n         return self._cmp(other) \\u003e 0\\n \\n     def __eq__(self, other):\\n         return self._cmp(other) == 0\\n \\n     def __le__(self, other):\\n         return self._cmp(other) \\u003c= 0\\n \\n     def __ge__(self, other):\\n         return self._cmp(other) \\u003e= 0\\n \\n     def __ne__(self, other):\\n         return self._cmp(other) != 0\\n \\n def sortclonebundleentries(ui, entries):\\n     prefers = ui.configlist('ui', 'clonebundleprefers')\\n     if not prefers:\\n         return list(entries)\\n \\n     prefers = [p.split('=', 1) for p in prefers]\\n \\n     items = sorted(clonebundleentry(v, prefers) for v in entries)\\n     return [i.value for i in items]\\n \\n def trypullbundlefromurl(ui, repo, url):\\n     \\\"\\\"\\\"Attempt to apply a bundle from a URL.\\\"\\\"\\\"\\n     with repo.lock(), repo.transaction('bundleurl') as tr:\\n         try:\\n             fh = urlmod.open(ui, url)\\n             cg = readbundle(ui, fh, 'stream')\\n \\n             if isinstance(cg, streamclone.streamcloneapplier):\\n                 cg.apply(repo)\\n             else:\\n                 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)\\n             return True\\n         except urlerr.httperror as e:\\n             ui.warn(_('HTTP error fetching bundle: %s\\\\n') %\\n                     stringutil.forcebytestr(e))\\n         except urlerr.urlerror as e:\\n             ui.warn(_('error fetching bundle: %s\\\\n') %\\n                     stringutil.forcebytestr(e.reason))\\n \\n         return False\\n\"}]}],\"properties\":[]}},\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "59"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ]
+                }, 
+                "uri": "https://phab.mercurial-scm.org//api/differential.querydiffs", 
+                "method": "POST", 
+                "body": "ids%5B0%5D=11058&api.token=cli-hahayouwish"
+            }
+        }, 
+        {
+            "response": {
+                "headers": {
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fsh6hsdu5dzfurm5gsiy2cmi6kqw33cqikoawcqz2; expires=Thu, 14-Sep-2023 04:15:58 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:15:58 GMT"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ]
+                }, 
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "body": {
+                    "string": "{\"result\":\"diff --git a\\/tests\\/wireprotohelpers.sh b\\/tests\\/wireprotohelpers.sh\\n--- a\\/tests\\/wireprotohelpers.sh\\n+++ b\\/tests\\/wireprotohelpers.sh\\n@@ -56,3 +56,10 @@\\n web.api.http-v2 = true\\n EOF\\n }\\n+\\n+enablehttpv2client() {\\n+  cat \\u003e\\u003e $HGRCPATH \\u003c\\u003c EOF\\n+[experimental]\\n+httppeer.advertise-v2 = true\\n+EOF\\n+}\\ndiff --git a\\/tests\\/test-wireproto-exchangev2.t b\\/tests\\/test-wireproto-exchangev2.t\\nnew file mode 100644\\n--- \\/dev\\/null\\n+++ b\\/tests\\/test-wireproto-exchangev2.t\\n@@ -0,0 +1,53 @@\\n+Tests for wire protocol version 2 exchange.\\n+Tests in this file should be folded into existing tests once protocol\\n+v2 has enough features that it can be enabled via #testcase in existing\\n+tests.\\n+\\n+  $ . $TESTDIR\\/wireprotohelpers.sh\\n+  $ enablehttpv2client\\n+\\n+  $ hg init server-simple\\n+  $ enablehttpv2 server-simple\\n+  $ cd server-simple\\n+  $ cat \\u003e\\u003e .hg\\/hgrc \\u003c\\u003c EOF\\n+  \\u003e [phases]\\n+  \\u003e publish = false\\n+  \\u003e EOF\\n+  $ echo a0 \\u003e a\\n+  $ echo b0 \\u003e b\\n+  $ hg -q commit -A -m 'commit 0'\\n+\\n+  $ echo a1 \\u003e a\\n+  $ hg commit -m 'commit 1'\\n+  $ hg phase --public -r .\\n+  $ echo a2 \\u003e a\\n+  $ hg commit -m 'commit 2'\\n+\\n+  $ hg -q up -r 0\\n+  $ echo b1 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 1'\\n+  $ echo b2 \\u003e b\\n+  $ hg -q commit -m 'head 2 commit 2'\\n+\\n+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log\\n+  $ cat hg.pid \\u003e $DAEMON_PIDS\\n+\\n+  $ cd ..\\n+\\n+Test basic clone\\n+\\n+  $ hg --debug clone -U http:\\/\\/localhost:$HGPORT client-simple\\n+  using http:\\/\\/localhost:$HGPORT\\/\\n+  sending capabilities command\\n+  query 1; heads\\n+  sending 2 commands\\n+  sending command heads: {}\\n+  sending command known: {\\n+    'nodes': []\\n+  }\\n+  received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)\\n+  received frame(size=43; request=1; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)\\n+  received frame(size=11; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=1; request=3; stream=2; streamflags=; type=command-response; flags=continuation)\\n+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)\\ndiff --git a\\/mercurial\\/httppeer.py b\\/mercurial\\/httppeer.py\\n--- a\\/mercurial\\/httppeer.py\\n+++ b\\/mercurial\\/httppeer.py\\n@@ -802,7 +802,8 @@\\n             return True\\n \\n         # Other concepts.\\n-        if name in ('bundle2',):\\n+        # TODO remove exchangev2 once we have a command implemented.\\n+        if name in ('bundle2', 'exchangev2'):\\n             return True\\n \\n         # Alias command-* to presence of command of that name.\\ndiff --git a\\/mercurial\\/exchangev2.py b\\/mercurial\\/exchangev2.py\\nnew file mode 100644\\n--- \\/dev\\/null\\n+++ b\\/mercurial\\/exchangev2.py\\n@@ -0,0 +1,55 @@\\n+# exchangev2.py - repository exchange for wire protocol version 2\\n+#\\n+# Copyright 2018 Gregory Szorc \\u003cgregory.szorc@gmail.com\\u003e\\n+#\\n+# This software may be used and distributed according to the terms of the\\n+# GNU General Public License version 2 or any later version.\\n+\\n+from __future__ import absolute_import\\n+\\n+from .node import (\\n+    nullid,\\n+)\\n+from . import (\\n+    setdiscovery,\\n+)\\n+\\n+def pull(pullop):\\n+    \\\"\\\"\\\"Pull using wire protocol version 2.\\\"\\\"\\\"\\n+    repo = pullop.repo\\n+    remote = pullop.remote\\n+\\n+    # Figure out what needs to be fetched.\\n+    common, fetch, remoteheads = _pullchangesetdiscovery(\\n+        repo, remote, pullop.heads, abortwhenunrelated=pullop.force)\\n+\\n+def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):\\n+    \\\"\\\"\\\"Determine which changesets need to be pulled.\\\"\\\"\\\"\\n+\\n+    if heads:\\n+        knownnode = repo.changelog.hasnode\\n+        if all(knownnode(head) for head in heads):\\n+            return heads, False, heads\\n+\\n+    # TODO wire protocol version 2 is capable of more efficient discovery\\n+    # than setdiscovery. Consider implementing something better.\\n+    common, fetch, remoteheads = setdiscovery.findcommonheads(\\n+        repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated)\\n+\\n+    common = set(common)\\n+    remoteheads = set(remoteheads)\\n+\\n+    # If a remote head is filtered locally, put it back in the common set.\\n+    # See the comment in exchange._pulldiscoverychangegroup() for more.\\n+\\n+    if fetch and remoteheads:\\n+        nodemap = repo.unfiltered().changelog.nodemap\\n+\\n+        common |= {head for head in remoteheads if head in nodemap}\\n+\\n+        if set(remoteheads).issubset(common):\\n+            fetch = []\\n+\\n+    common.discard(nullid)\\n+\\n+    return common, fetch, remoteheads\\ndiff --git a\\/mercurial\\/exchange.py b\\/mercurial\\/exchange.py\\n--- a\\/mercurial\\/exchange.py\\n+++ b\\/mercurial\\/exchange.py\\n@@ -26,6 +26,7 @@\\n     changegroup,\\n     discovery,\\n     error,\\n+    exchangev2,\\n     lock as lockmod,\\n     logexchange,\\n     narrowspec,\\n@@ -1506,17 +1507,21 @@\\n \\n     pullop.trmanager = transactionmanager(repo, 'pull', remote.url())\\n     with repo.wlock(), repo.lock(), pullop.trmanager:\\n-        # This should ideally be in _pullbundle2(). However, it needs to run\\n-        # before discovery to avoid extra work.\\n-        _maybeapplyclonebundle(pullop)\\n-        streamclone.maybeperformlegacystreamclone(pullop)\\n-        _pulldiscovery(pullop)\\n-        if pullop.canusebundle2:\\n-            _fullpullbundle2(repo, pullop)\\n-        _pullchangeset(pullop)\\n-        _pullphase(pullop)\\n-        _pullbookmarks(pullop)\\n-        _pullobsolete(pullop)\\n+        # Use the modern wire protocol, if available.\\n+        if remote.capable('exchangev2'):\\n+            exchangev2.pull(pullop)\\n+        else:\\n+            # This should ideally be in _pullbundle2(). However, it needs to run\\n+            # before discovery to avoid extra work.\\n+            _maybeapplyclonebundle(pullop)\\n+            streamclone.maybeperformlegacystreamclone(pullop)\\n+            _pulldiscovery(pullop)\\n+            if pullop.canusebundle2:\\n+                _fullpullbundle2(repo, pullop)\\n+            _pullchangeset(pullop)\\n+            _pullphase(pullop)\\n+            _pullbookmarks(pullop)\\n+            _pullobsolete(pullop)\\n \\n     # storing remotenames\\n     if repo.ui.configbool('experimental', 'remotenames'):\\n\\n\",\"error_code\":null,\"error_info\":null}"
+                }
+            }, 
+            "request": {
+                "headers": {
+                    "content-length": [
+                        "55"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ]
+                }, 
+                "uri": "https://phab.mercurial-scm.org//api/differential.getrawdiff", 
+                "method": "POST", 
+                "body": "diffID=11058&api.token=cli-hahayouwish"
+            }
+        }
+    ]
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabread-conduit-error.json	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,70 @@
+{
+    "interactions": [
+        {
+            "response": {
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":\"ERR-INVALID-AUTH\",\"error_info\":\"API token \\\"cli-notavalidtoken\\\" has the wrong length. API tokens should be 32 characters long.\"}"
+                }, 
+                "headers": {
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F6jvmizfvgaa6bkls264secsim5nlgid4vj55jpe6; expires=Thu, 14-Sep-2023 04:38:21 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:38:21 GMT"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ]
+                }, 
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }
+            }, 
+            "request": {
+                "body": "api.token=cli-notavalidtoken&ids%5B0%5D=4480", 
+                "headers": {
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "content-length": [
+                        "44"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+861-aa7e312375cf)"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ]
+                }, 
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query"
+            }
+        }
+    ], 
+    "version": 1
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabsend-create-alpha.json	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,590 @@
+{
+    "version": 1, 
+    "interactions": [
+        {
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"data\":[{\"id\":2,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":null,\"status\":\"active\",\"isImporting\":false,\"spacePHID\":null,\"dateCreated\":1498761653,\"dateModified\":1500403184,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F4wycgjx3wajuukr7ggfpqedpe7czucr7mvmaems3; expires=Thu, 14-Sep-2023 04:47:40 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:47:40 GMT"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ]
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search", 
+                "body": "constraints%5Bcallsigns%5D%5B0%5D=HG&api.token=cli-hahayouwish", 
+                "headers": {
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-length": [
+                        "79"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+866-5f07496726a1+20180915)"
+                    ]
+                }
+            }
+        }, 
+        {
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"id\":11072,\"phid\":\"PHID-DIFF-xm6cw76uivc6g56xiuv2\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/11072\\/\"},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fll65pt562b6d7ifhjva4jwqqzxh2oopj4tuc6lfa; expires=Thu, 14-Sep-2023 04:47:40 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:47:40 GMT"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ]
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.createrawdiff", 
+                "body": "repositoryPHID=PHID-REPO-bvunnehri4u2isyr7bc3&diff=diff+--git+a%2Falpha+b%2Falpha%0Anew+file+mode+100644%0A---+%2Fdev%2Fnull%0A%2B%2B%2B+b%2Falpha%0A%40%40+-0%2C0+%2B1%2C1+%40%40%0A%2Balpha%0A&api.token=cli-hahayouwish", 
+                "headers": {
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-length": [
+                        "235"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+866-5f07496726a1+20180915)"
+                    ]
+                }
+            }
+        }, 
+        {
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F5ivszbehkvbetlnks7omsqmbsu7r5by3p3yqw3ep; expires=Thu, 14-Sep-2023 04:47:41 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:47:41 GMT"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ]
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "body": "data=%7B%22date%22%3A+%220+0%22%2C+%22node%22%3A+%225206a4fa1e6cd7dbc027640267c109e05a9d2341%22%2C+%22user%22%3A+%22test%22%2C+%22parent%22%3A+%220000000000000000000000000000000000000000%22%7D&name=hg%3Ameta&diff_id=11072&api.token=cli-hahayouwish", 
+                "headers": {
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-length": [
+                        "264"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+866-5f07496726a1+20180915)"
+                    ]
+                }
+            }
+        }, 
+        {
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fxvwxxrmwpjntx6dlohrstyox7yjssdbzufiwygcg; expires=Thu, 14-Sep-2023 04:47:41 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:47:41 GMT"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ]
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "body": "data=%7B%225206a4fa1e6cd7dbc027640267c109e05a9d2341%22%3A+%7B%22time%22%3A+0.0%2C+%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%7D%7D&name=local%3Acommits&diff_id=11072&api.token=cli-hahayouwish", 
+                "headers": {
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-length": [
+                        "227"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+866-5f07496726a1+20180915)"
+                    ]
+                }
+            }
+        }, 
+        {
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create alpha for phabricator test\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"}},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fy3s5iysh6h2javfdo2u7myspyjypv4mvojegqr6j; expires=Thu, 14-Sep-2023 04:47:42 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:47:42 GMT"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ]
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "body": "corpus=create+alpha+for+phabricator+test&api.token=cli-hahayouwish", 
+                "headers": {
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-length": [
+                        "83"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+866-5f07496726a1+20180915)"
+                    ]
+                }
+            }
+        }, 
+        {
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":4596,\"phid\":\"PHID-DREV-bntcdwe74cw3vwkzt6nq\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-mnqxquobbhdgttd\"},{\"phid\":\"PHID-XACT-DREV-nd34pqrjamxbhop\"},{\"phid\":\"PHID-XACT-DREV-4ka4rghn6b7xooc\"},{\"phid\":\"PHID-XACT-DREV-mfuvfyiijdqwpyg\"},{\"phid\":\"PHID-XACT-DREV-ckar54h6yenx24s\"}]},\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Foe7kd7hhldo25tzbegntkyfxm6wnztgdfmsfubo2; expires=Thu, 14-Sep-2023 04:47:42 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:47:42 GMT"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ]
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "body": "transactions%5B0%5D%5Bvalue%5D=PHID-DIFF-xm6cw76uivc6g56xiuv2&transactions%5B0%5D%5Btype%5D=update&transactions%5B1%5D%5Bvalue%5D=create+alpha+for+phabricator+test&transactions%5B1%5D%5Btype%5D=title&api.token=cli-hahayouwish", 
+                "headers": {
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-length": [
+                        "242"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+866-5f07496726a1+20180915)"
+                    ]
+                }
+            }
+        }, 
+        {
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"4596\",\"phid\":\"PHID-DREV-bntcdwe74cw3vwkzt6nq\",\"title\":\"create alpha for phabricator test\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D4596\",\"dateCreated\":\"1536986862\",\"dateModified\":\"1536986862\",\"authorPHID\":\"PHID-USER-cgcdlc6c3gpxapbmkwa2\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":[],\"branch\":null,\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"1\",\"activeDiffPHID\":\"PHID-DIFF-xm6cw76uivc6g56xiuv2\",\"diffs\":[\"11072\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null}],\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F5d2bgafhoqhg5thqxeu6y4fngq7lqezf5h6eo5pd; expires=Thu, 14-Sep-2023 04:47:43 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:47:43 GMT"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ]
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "body": "api.token=cli-hahayouwish&ids%5B0%5D=4596", 
+                "headers": {
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-length": [
+                        "58"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+866-5f07496726a1+20180915)"
+                    ]
+                }
+            }
+        }, 
+        {
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F2cewrqifmvko6evm2sy2nvksvcvhk6hpsj36lcv2; expires=Thu, 14-Sep-2023 04:47:43 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:47:43 GMT"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ]
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "body": "data=%7B%22date%22%3A+%220+0%22%2C+%22node%22%3A+%22d8f232f7d799e1064d3da179df41a2b5d04334e9%22%2C+%22user%22%3A+%22test%22%2C+%22parent%22%3A+%220000000000000000000000000000000000000000%22%7D&name=hg%3Ameta&diff_id=11072&api.token=cli-hahayouwish", 
+                "headers": {
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-length": [
+                        "264"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+866-5f07496726a1+20180915)"
+                    ]
+                }
+            }
+        }, 
+        {
+            "response": {
+                "status": {
+                    "message": "OK", 
+                    "code": 200
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }, 
+                "headers": {
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fped6v7jlldydnkfolkdmecyyjrkciqhkr7opvbt2; expires=Thu, 14-Sep-2023 04:47:44 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:47:44 GMT"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ]
+                }
+            }, 
+            "request": {
+                "method": "POST", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "body": "data=%7B%22d8f232f7d799e1064d3da179df41a2b5d04334e9%22%3A+%7B%22time%22%3A+0.0%2C+%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%7D%7D&name=local%3Acommits&diff_id=11072&api.token=cli-hahayouwish", 
+                "headers": {
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-length": [
+                        "227"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+866-5f07496726a1+20180915)"
+                    ]
+                }
+            }
+        }
+    ]
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabsend-update-alpha-create-beta.json	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,915 @@
+{
+    "version": 1, 
+    "interactions": [
+        {
+            "request": {
+                "body": "api.token=cli-hahayouwish&revisionIDs%5B0%5D=4596", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.querydiffs", 
+                "headers": {
+                    "content-length": [
+                        "66"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F5bjqjyefdbiq65cc3qepzxq7ncczgfqo2xxsybaf; expires=Thu, 14-Sep-2023 04:53:46 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:46 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"11073\":{\"id\":\"11073\",\"revisionID\":\"4596\",\"dateCreated\":\"1536986866\",\"dateModified\":\"1536986868\",\"sourceControlBaseRevision\":null,\"sourceControlPath\":null,\"sourceControlSystem\":null,\"branch\":null,\"bookmark\":null,\"creationMethod\":\"web\",\"description\":null,\"unitStatus\":\"4\",\"lintStatus\":\"4\",\"changes\":[{\"id\":\"24417\",\"metadata\":{\"line:first\":1},\"oldPath\":null,\"currentPath\":\"alpha\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"2\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"2\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+alpha\\n+more\\n\"}]}],\"properties\":{\"hg:meta\":{\"parent\":\"0000000000000000000000000000000000000000\",\"node\":\"f70265671c65ab4b5416e611a6bd61887c013122\",\"user\":\"test\",\"date\":\"0 0\"},\"local:commits\":{\"f70265671c65ab4b5416e611a6bd61887c013122\":{\"time\":0,\"authorEmail\":\"test\",\"author\":\"test\"}}},\"authorName\":\"test\",\"authorEmail\":\"test\"},\"11072\":{\"id\":\"11072\",\"revisionID\":\"4596\",\"dateCreated\":\"1536986860\",\"dateModified\":\"1536986862\",\"sourceControlBaseRevision\":null,\"sourceControlPath\":null,\"sourceControlSystem\":null,\"branch\":null,\"bookmark\":null,\"creationMethod\":\"web\",\"description\":null,\"unitStatus\":\"4\",\"lintStatus\":\"4\",\"changes\":[{\"id\":\"24416\",\"metadata\":{\"line:first\":1},\"oldPath\":null,\"currentPath\":\"alpha\",\"awayPaths\":[],\"oldProperties\":[],\"newProperties\":{\"unix:filemode\":\"100644\"},\"type\":\"1\",\"fileType\":\"1\",\"commitHash\":null,\"addLines\":\"1\",\"delLines\":\"0\",\"hunks\":[{\"oldOffset\":\"0\",\"newOffset\":\"1\",\"oldLength\":\"0\",\"newLength\":\"1\",\"addLines\":null,\"delLines\":null,\"isMissingOldNewline\":null,\"isMissingNewNewline\":null,\"corpus\":\"+alpha\\n\"}]}],\"properties\":{\"hg:meta\":{\"date\":\"0 0\",\"node\":\"d8f232f7d799e1064d3da179df41a2b5d04334e9\",\"user\":\"test\",\"parent\":\"0000000000000000000000000000000000000000\"},\"local:commits\":{\"d8f232f7d799e1064d3da179df41a2b5d04334e9\":{\"time\":0,\"author\":\"test\",\"authorEmail\":\"test\"}}},\"authorName\":\"test\",\"authorEmail\":\"test\"}},\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "diff_id=11073&api.token=cli-hahayouwish&data=%7B%22parent%22%3A+%220000000000000000000000000000000000000000%22%2C+%22node%22%3A+%22f70265671c65ab4b5416e611a6bd61887c013122%22%2C+%22user%22%3A+%22test%22%2C+%22date%22%3A+%220+0%22%7D&name=hg%3Ameta", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-length": [
+                        "264"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Ff6o4ingm2wmr3ma4aht2kytfrrxvrkitj6ipkf5k; expires=Thu, 14-Sep-2023 04:53:46 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:46 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "diff_id=11073&api.token=cli-hahayouwish&data=%7B%22f70265671c65ab4b5416e611a6bd61887c013122%22%3A+%7B%22time%22%3A+0.0%2C+%22authorEmail%22%3A+%22test%22%2C+%22author%22%3A+%22test%22%7D%7D&name=local%3Acommits", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-length": [
+                        "227"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F4fitvy4kno46zkca6hq7npvuxvnh4dxlbvscmodb; expires=Thu, 14-Sep-2023 04:53:47 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:47 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "api.token=cli-hahayouwish&corpus=create+alpha+for+phabricator+test%0A%0ADifferential+Revision%3A+https%3A%2F%2Fphab.mercurial-scm.org%2FD4596", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "headers": {
+                    "content-length": [
+                        "158"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F7u2j7nsrtq2dtxqws7pnsnjyaufsamwj44e45euz; expires=Thu, 14-Sep-2023 04:53:47 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:47 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create alpha for phabricator test\",\"revisionID\":4596},\"revisionIDFieldInfo\":{\"value\":4596,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"}},\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "api.token=cli-hahayouwish&objectIdentifier=4596&transactions%5B0%5D%5Btype%5D=title&transactions%5B0%5D%5Bvalue%5D=create+alpha+for+phabricator+test", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "headers": {
+                    "content-length": [
+                        "165"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F7ubtculubfazivfxjxbmnyt3wzjcgdxnfdn57t42; expires=Thu, 14-Sep-2023 04:53:48 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:47 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":\"4596\",\"phid\":\"PHID-DREV-bntcdwe74cw3vwkzt6nq\"},\"transactions\":[]},\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "api.token=cli-hahayouwish&constraints%5Bcallsigns%5D%5B0%5D=HG", 
+                "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search", 
+                "headers": {
+                    "content-length": [
+                        "79"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fdpvy3rwephm5krs7posuadvjmkh7o7wbytgdhisv; expires=Thu, 14-Sep-2023 04:53:48 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:48 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"data\":[{\"id\":2,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":null,\"status\":\"active\",\"isImporting\":false,\"spacePHID\":null,\"dateCreated\":1498761653,\"dateModified\":1500403184,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "api.token=cli-hahayouwish&diff=diff+--git+a%2Fbeta+b%2Fbeta%0Anew+file+mode+100644%0A---+%2Fdev%2Fnull%0A%2B%2B%2B+b%2Fbeta%0A%40%40+-0%2C0+%2B1%2C1+%40%40%0A%2Bbeta%0A&repositoryPHID=PHID-REPO-bvunnehri4u2isyr7bc3", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.createrawdiff", 
+                "headers": {
+                    "content-length": [
+                        "231"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fafqgsnm7vbqi3vyfg5c7xgxyiv7fgi77vauw6wnv; expires=Thu, 14-Sep-2023 04:53:49 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:49 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"id\":11074,\"phid\":\"PHID-DIFF-sitmath22fwgsfsbdmne\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/11074\\/\"},\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "diff_id=11074&api.token=cli-hahayouwish&data=%7B%22parent%22%3A+%22f70265671c65ab4b5416e611a6bd61887c013122%22%2C+%22node%22%3A+%221a5640df7bbfc26fc4f6ef38e4d1581d5b2a3122%22%2C+%22user%22%3A+%22test%22%2C+%22date%22%3A+%220+0%22%7D&name=hg%3Ameta", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-length": [
+                        "264"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Frvpld6nyjmtrq3qynmldbquhgwbrhcdhythbot6r; expires=Thu, 14-Sep-2023 04:53:49 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:49 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "diff_id=11074&api.token=cli-hahayouwish&data=%7B%221a5640df7bbfc26fc4f6ef38e4d1581d5b2a3122%22%3A+%7B%22time%22%3A+0.0%2C+%22authorEmail%22%3A+%22test%22%2C+%22author%22%3A+%22test%22%7D%7D&name=local%3Acommits", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-length": [
+                        "227"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Flpkv333zitgztqx2clpg2uibjy633myliembguf2; expires=Thu, 14-Sep-2023 04:53:50 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:49 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "api.token=cli-hahayouwish&corpus=create+beta+for+phabricator+test", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage", 
+                "headers": {
+                    "content-length": [
+                        "82"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fav6ovbqxoy3dijysouoabcz7jqescejugeedwspi; expires=Thu, 14-Sep-2023 04:53:50 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:50 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create beta for phabricator test\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"}},\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "api.token=cli-hahayouwish&transactions%5B0%5D%5Btype%5D=update&transactions%5B0%5D%5Bvalue%5D=PHID-DIFF-sitmath22fwgsfsbdmne&transactions%5B1%5D%5Btype%5D=summary&transactions%5B1%5D%5Bvalue%5D=Depends+on+D4596&transactions%5B2%5D%5Btype%5D=summary&transactions%5B2%5D%5Bvalue%5D=+&transactions%5B3%5D%5Btype%5D=title&transactions%5B3%5D%5Bvalue%5D=create+beta+for+phabricator+test", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit", 
+                "headers": {
+                    "content-length": [
+                        "398"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fywrdtdafcn5p267qiqfgfh7h4buaqxmnrgan6fh2; expires=Thu, 14-Sep-2023 04:53:50 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:50 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":{\"object\":{\"id\":4597,\"phid\":\"PHID-DREV-as7flhipq636gqvnyrsf\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-bwzosyyqmzlhe6g\"},{\"phid\":\"PHID-XACT-DREV-ina5ktuwp6eiwv6\"},{\"phid\":\"PHID-XACT-DREV-22bjztn3szeyicy\"},{\"phid\":\"PHID-XACT-DREV-kcv6zk2yboepbmo\"},{\"phid\":\"PHID-XACT-DREV-mnbp6f6sq54hzs2\"},{\"phid\":\"PHID-XACT-DREV-qlakltzsdzclpha\"},{\"phid\":\"PHID-XACT-DREV-a5347cobhvqnc22\"},{\"phid\":\"PHID-XACT-DREV-sciqq5cqfuqfh67\"}]},\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "api.token=cli-hahayouwish&ids%5B0%5D=4596&ids%5B1%5D=4597", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.query", 
+                "headers": {
+                    "content-length": [
+                        "74"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2F2iio6iugurtd7ml2tnwfwv24hkrfhs62yshvmouv; expires=Thu, 14-Sep-2023 04:53:51 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:51 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":[{\"id\":\"4597\",\"phid\":\"PHID-DREV-as7flhipq636gqvnyrsf\",\"title\":\"create beta for phabricator test\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D4597\",\"dateCreated\":\"1536987231\",\"dateModified\":\"1536987231\",\"authorPHID\":\"PHID-USER-cgcdlc6c3gpxapbmkwa2\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":[],\"branch\":null,\"summary\":\" \",\"testPlan\":\"\",\"lineCount\":\"1\",\"activeDiffPHID\":\"PHID-DIFF-sitmath22fwgsfsbdmne\",\"diffs\":[\"11074\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-bntcdwe74cw3vwkzt6nq\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null},{\"id\":\"4596\",\"phid\":\"PHID-DREV-bntcdwe74cw3vwkzt6nq\",\"title\":\"create alpha for phabricator test\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D4596\",\"dateCreated\":\"1536986862\",\"dateModified\":\"1536987231\",\"authorPHID\":\"PHID-USER-cgcdlc6c3gpxapbmkwa2\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":[],\"branch\":null,\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-vwre7kpjdq52wbt56ftl\",\"diffs\":[\"11073\",\"11072\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null}],\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "diff_id=11074&api.token=cli-hahayouwish&data=%7B%22parent%22%3A+%22f70265671c65ab4b5416e611a6bd61887c013122%22%2C+%22node%22%3A+%22c2b605ada280b38c38031b5d31622869c72b0d8d%22%2C+%22user%22%3A+%22test%22%2C+%22date%22%3A+%220+0%22%7D&name=hg%3Ameta", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-length": [
+                        "264"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fvwsd2gtkeg64gticvthsxnpufne42t4eqityra25; expires=Thu, 14-Sep-2023 04:53:52 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:52 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }, 
+        {
+            "request": {
+                "body": "diff_id=11074&api.token=cli-hahayouwish&data=%7B%22c2b605ada280b38c38031b5d31622869c72b0d8d%22%3A+%7B%22time%22%3A+0.0%2C+%22authorEmail%22%3A+%22test%22%2C+%22author%22%3A+%22test%22%7D%7D&name=local%3Acommits", 
+                "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty", 
+                "headers": {
+                    "content-length": [
+                        "227"
+                    ], 
+                    "host": [
+                        "phab.mercurial-scm.org"
+                    ], 
+                    "content-type": [
+                        "application/x-www-form-urlencoded"
+                    ], 
+                    "accept": [
+                        "application/mercurial-0.1"
+                    ], 
+                    "user-agent": [
+                        "mercurial/proto-1.0 (Mercurial 4.7.1+867-34bcd3af7109+20180915)"
+                    ]
+                }, 
+                "method": "POST"
+            }, 
+            "response": {
+                "status": {
+                    "code": 200, 
+                    "message": "OK"
+                }, 
+                "headers": {
+                    "server": [
+                        "Apache/2.4.10 (Debian)"
+                    ], 
+                    "strict-transport-security": [
+                        "max-age=0; includeSubdomains; preload"
+                    ], 
+                    "x-frame-options": [
+                        "Deny"
+                    ], 
+                    "x-content-type-options": [
+                        "nosniff"
+                    ], 
+                    "expires": [
+                        "Sat, 01 Jan 2000 00:00:00 GMT"
+                    ], 
+                    "set-cookie": [
+                        "phsid=A%2Fflxjbmx24qcq7qhggolo6b7iue7utwp7kyoazduk; expires=Thu, 14-Sep-2023 04:53:52 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+                    ], 
+                    "x-xss-protection": [
+                        "1; mode=block"
+                    ], 
+                    "content-type": [
+                        "application/json"
+                    ], 
+                    "cache-control": [
+                        "no-store"
+                    ], 
+                    "date": [
+                        "Sat, 15 Sep 2018 04:53:52 GMT"
+                    ]
+                }, 
+                "body": {
+                    "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+                }
+            }
+        }
+    ]
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/printrevset.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,41 @@
+from __future__ import absolute_import
+from mercurial import (
+  cmdutil,
+  commands,
+  extensions,
+  logcmdutil,
+  revsetlang,
+  smartset,
+)
+
+from mercurial.utils import (
+  stringutil,
+)
+
+def logrevset(repo, pats, opts):
+    revs = logcmdutil._initialrevs(repo, opts)
+    if not revs:
+        return None
+    match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
+    return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
+
+def uisetup(ui):
+    def printrevset(orig, repo, pats, opts):
+        revs, filematcher = orig(repo, pats, opts)
+        if opts.get(b'print_revset'):
+            expr = logrevset(repo, pats, opts)
+            if expr:
+                tree = revsetlang.parse(expr)
+                tree = revsetlang.analyze(tree)
+            else:
+                tree = []
+            ui = repo.ui
+            ui.write(b'%s\n' % stringutil.pprint(opts.get(b'rev', [])))
+            ui.write(revsetlang.prettyformat(tree) + b'\n')
+            ui.write(stringutil.prettyrepr(revs) + b'\n')
+            revs = smartset.baseset()  # display no revisions
+        return revs, filematcher
+    extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
+    aliases, entry = cmdutil.findcmd(b'log', commands.table)
+    entry[1].append((b'', b'print-revset', False,
+                     b'print generated revset and exit (DEPRECATED)'))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/pullext.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,50 @@
+# pullext.py - Simple extension to test pulling
+#
+# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+    commands,
+    error,
+    extensions,
+    localrepo,
+    repository,
+)
+
+def clonecommand(orig, ui, repo, *args, **kwargs):
+    if kwargs.get(r'include') or kwargs.get(r'exclude'):
+        kwargs[r'narrow'] = True
+
+    if kwargs.get(r'depth'):
+        try:
+            kwargs[r'depth'] = int(kwargs[r'depth'])
+        except ValueError:
+            raise error.Abort(_('--depth must be an integer'))
+
+    return orig(ui, repo, *args, **kwargs)
+
+def featuresetup(ui, features):
+    features.add(repository.NARROW_REQUIREMENT)
+
+def extsetup(ui):
+    entry = extensions.wrapcommand(commands.table, 'clone', clonecommand)
+
+    hasinclude = any(x[1] == 'include' for x in entry[1])
+    hasdepth = any(x[1] == 'depth' for x in entry[1])
+
+    if not hasinclude:
+        entry[1].append(('', 'include', [],
+                         _('pattern of file/directory to clone')))
+        entry[1].append(('', 'exclude', [],
+                         _('pattern of file/directory to not clone')))
+
+    if not hasdepth:
+        entry[1].append(('', 'depth', '',
+                         _('ancestry depth of changesets to fetch')))
+
+    localrepo.featuresetupfuncs.add(featuresetup)
--- a/tests/run-tests.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/run-tests.py	Mon Oct 22 14:46:06 2018 -0400
@@ -51,6 +51,7 @@
 import distutils.version as version
 import errno
 import json
+import multiprocessing
 import os
 import random
 import re
@@ -64,6 +65,7 @@
 import threading
 import time
 import unittest
+import uuid
 import xml.dom.minidom as minidom
 
 try:
@@ -85,8 +87,6 @@
     except NameError:
         pass
 
-origenviron = os.environ.copy()
-osenvironb = getattr(os, 'environb', os.environ)
 processlock = threading.Lock()
 
 pygmentspresent = False
@@ -140,6 +140,8 @@
     runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
     runnerlexer = TestRunnerLexer()
 
+origenviron = os.environ.copy()
+
 if sys.version_info > (3, 5, 0):
     PYTHON3 = True
     xrange = range # we use xrange in one place, and we'd rather not use range
@@ -153,6 +155,40 @@
             return p
         return p.decode('utf-8')
 
+    osenvironb = getattr(os, 'environb', None)
+    if osenvironb is None:
+        # Windows lacks os.environb, for instance.  A proxy over the real thing
+        # instead of a copy allows the environment to be updated via bytes on
+        # all platforms.
+        class environbytes(object):
+            def __init__(self, strenv):
+                self.__len__ = strenv.__len__
+                self.clear = strenv.clear
+                self._strenv = strenv
+            def __getitem__(self, k):
+                v = self._strenv.__getitem__(_strpath(k))
+                return _bytespath(v)
+            def __setitem__(self, k, v):
+                self._strenv.__setitem__(_strpath(k), _strpath(v))
+            def __delitem__(self, k):
+                self._strenv.__delitem__(_strpath(k))
+            def __contains__(self, k):
+                return self._strenv.__contains__(_strpath(k))
+            def __iter__(self):
+                return iter([_bytespath(k) for k in iter(self._strenv)])
+            def get(self, k, default=None):
+                v = self._strenv.get(_strpath(k), _strpath(default))
+                return _bytespath(v)
+            def pop(self, k, default=None):
+                v = self._strenv.pop(_strpath(k), _strpath(default))
+                return _bytespath(v)
+
+        osenvironb = environbytes(os.environ)
+
+    getcwdb = getattr(os, 'getcwdb')
+    if not getcwdb or os.name == 'nt':
+        getcwdb = lambda: _bytespath(os.getcwd())
+
 elif sys.version_info >= (3, 0, 0):
     print('%s is only supported on Python 3.5+ and 2.7, not %s' %
           (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
@@ -168,6 +204,8 @@
         return p
 
     _strpath = _bytespath
+    osenvironb = os.environ
+    getcwdb = os.getcwd
 
 # For Windows support
 wifexited = getattr(os, "WIFEXITED", lambda x: False)
@@ -220,7 +258,8 @@
 closefds = os.name == 'posix'
 def Popen4(cmd, wd, timeout, env=None):
     processlock.acquire()
-    p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
+    p = subprocess.Popen(_strpath(cmd), shell=True, bufsize=-1,
+                         cwd=_strpath(wd), env=env,
                          close_fds=closefds,
                          stdin=subprocess.PIPE, stdout=subprocess.PIPE,
                          stderr=subprocess.STDOUT)
@@ -249,7 +288,7 @@
     IMPL_PATH = b'JYTHONPATH'
 
 defaults = {
-    'jobs': ('HGTEST_JOBS', 1),
+    'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
     'timeout': ('HGTEST_TIMEOUT', 180),
     'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
     'port': ('HGTEST_PORT', 20059),
@@ -285,12 +324,12 @@
 
     If path does not exist, return an empty set.
     """
-    cases = set()
+    cases = []
     try:
         with open(path, 'rb') as f:
             for l in f:
                 if l.startswith(b'#testcases '):
-                    cases.update(l[11:].split())
+                    cases.append(sorted(l[11:].split()))
     except IOError as ex:
         if ex.errno != errno.ENOENT:
             raise
@@ -386,7 +425,7 @@
         help="prefer IPv6 to IPv4 for network related tests")
     hgconf.add_argument("--pure", action="store_true",
         help="use pure Python code instead of C extensions")
-    hgconf.add_argument("-3", "--py3k-warnings", action="store_true",
+    hgconf.add_argument("-3", "--py3-warnings", action="store_true",
         help="enable Py3k warnings on Python 2.7+")
     hgconf.add_argument("--with-chg", metavar="CHG",
         help="use specified chg wrapper in place of hg")
@@ -394,11 +433,6 @@
         metavar="HG",
         help="test using specified hg script rather than a "
              "temporary installation")
-    # This option should be deleted once test-check-py3-compat.t and other
-    # Python 3 tests run with Python 3.
-    hgconf.add_argument("--with-python3", metavar="PYTHON3",
-        help="Python 3 interpreter (if running under Python 2)"
-             " (TEMPORARY)")
 
     reporting = parser.add_argument_group('Results Reporting')
     reporting.add_argument("-C", "--annotate", action="store_true",
@@ -528,31 +562,10 @@
                 'warning: --slowtimeout option ignored with --debug\n')
         options.timeout = 0
         options.slowtimeout = 0
-    if options.py3k_warnings:
+    if options.py3_warnings:
         if PYTHON3:
             parser.error(
-                '--py3k-warnings can only be used on Python 2.7')
-    if options.with_python3:
-        if PYTHON3:
-            parser.error('--with-python3 cannot be used when executing with '
-                         'Python 3')
-
-        options.with_python3 = canonpath(options.with_python3)
-        # Verify Python3 executable is acceptable.
-        proc = subprocess.Popen([options.with_python3, b'--version'],
-                                stdout=subprocess.PIPE,
-                                stderr=subprocess.STDOUT)
-        out, _err = proc.communicate()
-        ret = proc.wait()
-        if ret != 0:
-            parser.error('could not determine version of python 3')
-        if not out.startswith('Python '):
-            parser.error('unexpected output from python3 --version: %s' %
-                         out)
-        vers = version.LooseVersion(out[len('Python '):])
-        if vers < version.LooseVersion('3.5.0'):
-            parser.error('--with-python3 version must be 3.5.0 or greater; '
-                         'got %s' % out)
+                '--py3-warnings can only be used on Python 2.7')
 
     if options.blacklist:
         options.blacklist = parselistfiles(options.blacklist, 'blacklist')
@@ -676,7 +689,7 @@
                  first=False,
                  timeout=None,
                  startport=None, extraconfigopts=None,
-                 py3kwarnings=False, shell=None, hgcommand=None,
+                 py3warnings=False, shell=None, hgcommand=None,
                  slowtimeout=None, usechg=False,
                  useipv6=False):
         """Create a test from parameters.
@@ -705,7 +718,7 @@
         must have the form "key=value" (something understood by hgrc). Values
         of the form "foo.key=value" will result in "[foo] key=value".
 
-        py3kwarnings enables Py3k warnings.
+        py3warnings enables Py3k warnings.
 
         shell is the shell to execute tests in.
         """
@@ -731,7 +744,7 @@
         self._slowtimeout = slowtimeout
         self._startport = startport
         self._extraconfigopts = extraconfigopts or []
-        self._py3kwarnings = py3kwarnings
+        self._py3warnings = py3warnings
         self._shell = _bytespath(shell)
         self._hgcommand = hgcommand or b'hg'
         self._usechg = usechg
@@ -1005,7 +1018,7 @@
             return (
                 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
                     c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
-                    for c in p))
+                    for c in [p[i:i + 1] for i in range(len(p))]))
             )
         else:
             return re.escape(p)
@@ -1021,7 +1034,7 @@
         environment."""
         # Put the restoreenv script inside self._threadtmp
         scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
-        testenv['HGTEST_RESTOREENV'] = scriptpath
+        testenv['HGTEST_RESTOREENV'] = _strpath(scriptpath)
 
         # Only restore environment variable names that the shell allows
         # us to export.
@@ -1053,22 +1066,26 @@
         env = os.environ.copy()
         env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
         env['HGEMITWARNINGS'] = '1'
-        env['TESTTMP'] = self._testtmp
+        env['TESTTMP'] = _strpath(self._testtmp)
         env['TESTNAME'] = self.name
-        env['HOME'] = self._testtmp
+        env['HOME'] = _strpath(self._testtmp)
         # This number should match portneeded in _getport
         for port in xrange(3):
             # This list should be parallel to _portmap in _getreplacements
             defineport(port)
-        env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
-        env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
+        env["HGRCPATH"] = _strpath(os.path.join(self._threadtmp, b'.hgrc'))
+        env["DAEMON_PIDS"] = _strpath(os.path.join(self._threadtmp,
+                                                   b'daemon.pids'))
         env["HGEDITOR"] = ('"' + sys.executable + '"'
                            + ' -c "import sys; sys.exit(0)"')
         env["HGMERGE"] = "internal:merge"
         env["HGUSER"]   = "test"
         env["HGENCODING"] = "ascii"
         env["HGENCODINGMODE"] = "strict"
+        env["HGHOSTNAME"] = "test-hostname"
         env['HGIPV6'] = str(int(self._useipv6))
+        if 'HGCATAPULTSERVERPIPE' not in env:
+            env['HGCATAPULTSERVERPIPE'] = os.devnull
 
         extraextensions = []
         for opt in self._extraconfigopts:
@@ -1083,7 +1100,7 @@
 
         # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
         # IP addresses.
-        env['LOCALIP'] = self._localip()
+        env['LOCALIP'] = _strpath(self._localip())
 
         # Reset some environment variables to well-known values so that
         # the tests produce repeatable output.
@@ -1150,7 +1167,8 @@
         Return a tuple (exitcode, output). output is None in debug mode.
         """
         if self._debug:
-            proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
+            proc = subprocess.Popen(_strpath(cmd), shell=True,
+                                    cwd=_strpath(self._testtmp),
                                     env=env)
             ret = proc.wait()
             return (ret, None)
@@ -1164,7 +1182,7 @@
             killdaemons(env['DAEMON_PIDS'])
             return ret
 
-        output = ''
+        output = b''
         proc.tochild.close()
 
         try:
@@ -1188,7 +1206,7 @@
             output = re.sub(s, r, output)
 
         if normalizenewlines:
-            output = output.replace('\r\n', '\n')
+            output = output.replace(b'\r\n', b'\n')
 
         return ret, output.splitlines(True)
 
@@ -1200,8 +1218,9 @@
         return os.path.join(self._testdir, b'%s.out' % self.bname)
 
     def _run(self, env):
-        py3kswitch = self._py3kwarnings and b' -3' or b''
-        cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
+        py3switch = self._py3warnings and b' -3' or b''
+        # Quote the python(3) executable for Windows
+        cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path)
         vlog("# Running", cmd)
         normalizenewlines = os.name == 'nt'
         result = self._runcommand(cmd, env,
@@ -1242,14 +1261,15 @@
 
     def __init__(self, path, *args, **kwds):
         # accept an extra "case" parameter
-        case = kwds.pop('case', None)
+        case = kwds.pop('case', [])
         self._case = case
-        self._allcases = parsettestcases(path)
+        self._allcases = {x for y in parsettestcases(path) for x in y}
         super(TTest, self).__init__(path, *args, **kwds)
         if case:
-            self.name = '%s#%s' % (self.name, _strpath(case))
-            self.errpath = b'%s.%s.err' % (self.errpath[:-4], case)
-            self._tmpname += b'-%s' % case
+            casepath = b'#'.join(case)
+            self.name = '%s#%s' % (self.name, _strpath(casepath))
+            self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
+            self._tmpname += b'-%s' % casepath
         self._have = {}
 
     @property
@@ -1323,10 +1343,10 @@
         reqs = []
         for arg in args:
             if arg.startswith(b'no-') and arg[3:] in self._allcases:
-                if arg[3:] == self._case:
+                if arg[3:] in self._case:
                     return False
             elif arg in self._allcases:
-                if arg != self._case:
+                if arg not in self._case:
                     return False
             else:
                 reqs.append(arg)
@@ -1342,6 +1362,24 @@
                 script.append(b'%s %d 0\n' % (salt, line))
             else:
                 script.append(b'echo %s %d $?\n' % (salt, line))
+        active = []
+        session = str(uuid.uuid4())
+        if PYTHON3:
+            session = session.encode('ascii')
+        def toggletrace(cmd):
+            if isinstance(cmd, str):
+                quoted = shellquote(cmd.strip())
+            else:
+                quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
+            quoted = quoted.replace(b'\\', b'\\\\')
+            if active:
+                script.append(
+                    b'echo END %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
+                        session, active[0]))
+                script.append(
+                    b'echo START %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
+                        session, quoted))
+                active[0:] = [quoted]
 
         script = []
 
@@ -1369,11 +1407,36 @@
             script.append(b'alias hg="%s"\n' % self._hgcommand)
         if os.getenv('MSYSTEM'):
             script.append(b'alias pwd="pwd -W"\n')
+
+        hgcatapult = os.getenv('HGCATAPULTSERVERPIPE')
+        if hgcatapult and hgcatapult != os.devnull:
+            # Kludge: use a while loop to keep the pipe from getting
+            # closed by our echo commands. The still-running file gets
+            # reaped at the end of the script, which causes the while
+            # loop to exit and closes the pipe. Sigh.
+            script.append(
+                b'rtendtracing() {\n'
+                b'  echo END %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
+                b'  rm -f "$TESTTMP/.still-running"\n'
+                b'}\n'
+                b'trap "rtendtracing" 0\n'
+                b'touch "$TESTTMP/.still-running"\n'
+                b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
+                b'> $HGCATAPULTSERVERPIPE &\n'
+                b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
+                b'echo START %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
+                % {
+                    'name': self.name,
+                    'session': session,
+                }
+            )
+
         if self._case:
+            casestr = b'#'.join(self._case)
             if isinstance(self._case, str):
-                quoted = shellquote(self._case)
+                quoted = shellquote(casestr)
             else:
-                quoted = shellquote(self._case.decode('utf8')).encode('utf8')
+                quoted = shellquote(casestr.decode('utf8')).encode('utf8')
             script.append(b'TESTCASE=%s\n' % quoted)
             script.append(b'export TESTCASE\n')
 
@@ -1419,7 +1482,7 @@
                     # We've just entered a Python block. Add the header.
                     inpython = True
                     addsalt(prepos, False) # Make sure we report the exit code.
-                    script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
+                    script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
                 addsalt(n, True)
                 script.append(l[2:])
             elif l.startswith(b'  ... '): # python inlines
@@ -1433,10 +1496,12 @@
                 prepos = pos
                 pos = n
                 addsalt(n, False)
-                cmd = l[4:].split()
+                rawcmd = l[4:]
+                cmd = rawcmd.split()
+                toggletrace(rawcmd)
                 if len(cmd) == 2 and cmd[0] == b'cd':
                     l = b'  $ cd %s || exit 1\n' % cmd[1]
-                script.append(l[4:])
+                script.append(rawcmd)
             elif l.startswith(b'  > '): # continuations
                 after.setdefault(prepos, []).append(l)
                 script.append(l[4:])
@@ -1455,7 +1520,6 @@
         if skipping is not None:
             after.setdefault(pos, []).append('  !!! missing #endif\n')
         addsalt(n + 1, False)
-
         return salt, script, after, expected
 
     def _processoutput(self, exitcode, output, salt, after, expected):
@@ -1785,10 +1849,8 @@
                 pass
             elif self._options.view:
                 v = self._options.view
-                if PYTHON3:
-                    v = _bytespath(v)
-                os.system(b"%s %s %s" %
-                          (v, test.refpath, test.errpath))
+                os.system(r"%s %s %s" %
+                          (v, _strpath(test.refpath), _strpath(test.errpath)))
             else:
                 servefail, lines = getdiff(expected, got,
                                            test.refpath, test.errpath)
@@ -1815,6 +1877,7 @@
                         'changes)')
                 else:
                     self.stream.write('Accept this change? [n] ')
+                    self.stream.flush()
                     answer = sys.stdin.readline().strip()
                     if answer.lower() in ('y', 'yes'):
                         if test.path.endswith(b'.t'):
@@ -2464,8 +2527,7 @@
             os.umask(oldmask)
 
     def _run(self, testdescs):
-        self._testdir = osenvironb[b'TESTDIR'] = getattr(
-            os, 'getcwdb', os.getcwd)()
+        self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
         # assume all tests in same folder for now
         if testdescs:
             pathname = os.path.dirname(testdescs[0]['path'])
@@ -2562,9 +2624,6 @@
         osenvironb[b"BINDIR"] = self._bindir
         osenvironb[b"PYTHON"] = PYTHON
 
-        if self.options.with_python3:
-            osenvironb[b'PYTHON3'] = self.options.with_python3
-
         fileb = _bytespath(__file__)
         runtestdir = os.path.abspath(os.path.dirname(fileb))
         osenvironb[b'RUNTESTDIR'] = runtestdir
@@ -2666,31 +2725,42 @@
                 expanded_args.append(arg)
         args = expanded_args
 
-        testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.]+))')
+        testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))')
         tests = []
         for t in args:
-            case = None
+            case = []
 
             if not (os.path.basename(t).startswith(b'test-')
                     and (t.endswith(b'.py') or t.endswith(b'.t'))):
 
                 m = testcasepattern.match(t)
                 if m is not None:
-                    t, _, case = m.groups()
+                    t, _, casestr = m.groups()
+                    if casestr:
+                        case = casestr.split(b'#')
                 else:
                     continue
 
             if t.endswith(b'.t'):
                 # .t file may contain multiple test cases
-                cases = sorted(parsettestcases(t))
-                if cases:
-                    if case is not None and case in cases:
-                        tests += [{'path': t, 'case': case}]
-                    elif case is not None and case not in cases:
+                casedimensions = parsettestcases(t)
+                if casedimensions:
+                    cases = []
+                    def addcases(case, casedimensions):
+                        if not casedimensions:
+                            cases.append(case)
+                        else:
+                            for c in casedimensions[0]:
+                                addcases(case + [c], casedimensions[1:])
+                    addcases([], casedimensions)
+                    if case and case in cases:
+                        cases = [case]
+                    elif case:
                         # Ignore invalid cases
-                        pass
+                        cases = []
                     else:
-                        tests += [{'path': t, 'case': c} for c in sorted(cases)]
+                        pass
+                    tests += [{'path': t, 'case': c} for c in sorted(cases)]
                 else:
                     tests.append({'path': t})
             else:
@@ -2701,7 +2771,7 @@
         def _reloadtest(test, i):
             # convert a test back to its description dict
             desc = {'path': test.path}
-            case = getattr(test, '_case', None)
+            case = getattr(test, '_case', [])
             if case:
                 desc['case'] = case
             return self._gettest(desc, i)
@@ -2713,7 +2783,8 @@
                     desc = testdescs[0]
                     # desc['path'] is a relative path
                     if 'case' in desc:
-                        errpath = b'%s.%s.err' % (desc['path'], desc['case'])
+                        casestr = b'#'.join(desc['case'])
+                        errpath = b'%s#%s.err' % (desc['path'], casestr)
                     else:
                         errpath = b'%s.err' % desc['path']
                     errpath = os.path.join(self._outputdir, errpath)
@@ -2726,13 +2797,15 @@
 
             tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
 
+            jobs = min(len(tests), self.options.jobs)
+
             failed = False
             kws = self.options.keywords
             if kws is not None and PYTHON3:
                 kws = kws.encode('utf-8')
 
             suite = TestSuite(self._testdir,
-                              jobs=self.options.jobs,
+                              jobs=jobs,
                               whitelist=self.options.whitelisted,
                               blacklist=self.options.blacklist,
                               retest=self.options.retest,
@@ -2760,6 +2833,9 @@
                     assert self._installdir
                     self._installchg()
 
+                log('running %d tests using %d parallel processes' % (
+                    len(tests), jobs))
+
                 result = runner.run(suite)
 
             if result.failures:
@@ -2822,7 +2898,7 @@
                     timeout=self.options.timeout,
                     startport=self._getport(count),
                     extraconfigopts=self.options.extra_config_opt,
-                    py3kwarnings=self.options.py3k_warnings,
+                    py3warnings=self.options.py3_warnings,
                     shell=self.options.shell,
                     hgcommand=self._hgcommand,
                     usechg=bool(self.options.with_chg or self.options.chg),
@@ -2847,7 +2923,10 @@
         """Configure the environment to use the appropriate Python in tests."""
         # Tests must use the same interpreter as us or bad things will happen.
         pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
-        if getattr(os, 'symlink', None):
+
+        # os.symlink() is a thing with py3 on Windows, but it requires
+        # Administrator rights.
+        if getattr(os, 'symlink', None) and os.name != 'nt':
             vlog("# Making python executable in test path a symlink to '%s'" %
                  sys.executable)
             mypython = os.path.join(self._tmpbindir, pyexename)
@@ -2932,7 +3011,7 @@
         makedirs(self._bindir)
 
         vlog("# Running", cmd)
-        if os.system(cmd) == 0:
+        if os.system(_strpath(cmd)) == 0:
             if not self.options.verbose:
                 try:
                     os.remove(installerrs)
@@ -2951,7 +3030,7 @@
 
         self._usecorrectpython()
 
-        if self.options.py3k_warnings and not self.options.anycoverage:
+        if self.options.py3_warnings and not self.options.anycoverage:
             vlog("# Updating hg command to enable Py3k Warnings switch")
             with open(os.path.join(self._bindir, 'hg'), 'rb') as f:
                 lines = [line.rstrip() for line in f]
@@ -3086,8 +3165,8 @@
     def _checktools(self):
         """Ensure tools required to run tests are present."""
         for p in self.REQUIREDTOOLS:
-            if os.name == 'nt' and not p.endswith('.exe'):
-                p += '.exe'
+            if os.name == 'nt' and not p.endswith(b'.exe'):
+                p += b'.exe'
             found = self._findprogram(p)
             if found:
                 vlog("# Found prerequisite", p, "at", found)
--- a/tests/simplestorerepo.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/simplestorerepo.py	Mon Oct 22 14:46:06 2018 -0400
@@ -22,6 +22,7 @@
     nullrev,
 )
 from mercurial.thirdparty import (
+    attr,
     cbor,
 )
 from mercurial import (
@@ -39,6 +40,7 @@
 )
 from mercurial.utils import (
     interfaceutil,
+    storageutil,
 )
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
@@ -60,6 +62,22 @@
     if not isinstance(rev, int):
         raise ValueError('expected int')
 
+class simplestoreerror(error.StorageError):
+    pass
+
+@interfaceutil.implementer(repository.irevisiondelta)
+@attr.s(slots=True, frozen=True)
+class simplestorerevisiondelta(object):
+    node = attr.ib()
+    p1node = attr.ib()
+    p2node = attr.ib()
+    basenode = attr.ib()
+    linknode = attr.ib()
+    flags = attr.ib()
+    baserevisionsize = attr.ib()
+    revision = attr.ib()
+    delta = attr.ib()
+
 @interfaceutil.implementer(repository.ifilestorage)
 class filestorage(object):
     """Implements storage for a tracked path.
@@ -86,19 +104,13 @@
         self._indexdata = indexdata or []
         self._indexbynode = {}
         self._indexbyrev = {}
-        self.index = []
+        self._index = []
         self._refreshindex()
 
-        # This is used by changegroup code :/
-        self._generaldelta = True
-        self.storedeltachains = False
-
-        self.version = 1
-
     def _refreshindex(self):
         self._indexbynode.clear()
         self._indexbyrev.clear()
-        self.index = []
+        self._index = []
 
         for i, entry in enumerate(self._indexdata):
             self._indexbynode[entry[b'node']] = entry
@@ -124,10 +136,10 @@
             p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
 
             # start, length, rawsize, chainbase, linkrev, p1, p2, node
-            self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
-                               entry[b'node']))
+            self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
+                                entry[b'node']))
 
-        self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
+        self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
 
     def __len__(self):
         return len(self._indexdata)
@@ -217,39 +229,28 @@
 
         return self._indexbyrev[rev][b'linkrev']
 
-    def flags(self, rev):
+    def _flags(self, rev):
         validaterev(rev)
 
         return self._indexbyrev[rev][b'flags']
 
-    def deltaparent(self, rev):
-        validaterev(rev)
-
-        p1node = self.parents(self.node(rev))[0]
-        return self.rev(p1node)
-
-    def candelta(self, baserev, rev):
+    def _candelta(self, baserev, rev):
         validaterev(baserev)
         validaterev(rev)
 
-        if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
-            or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
+        if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
+            or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
             return False
 
         return True
 
-    def rawsize(self, rev):
-        validaterev(rev)
-        node = self.node(rev)
-        return len(self.revision(node, raw=True))
-
     def _processflags(self, text, flags, operation, raw=False):
         if flags == 0:
             return text, True
 
         if flags & ~revlog.REVIDX_KNOWN_FLAGS:
-            raise error.RevlogError(_("incompatible revision flag '%#x'") %
-                                    (flags & ~revlog.REVIDX_KNOWN_FLAGS))
+            raise simplestoreerror(_("incompatible revision flag '%#x'") %
+                                   (flags & ~revlog.REVIDX_KNOWN_FLAGS))
 
         validatehash = True
         # Depending on the operation (read or write), the order might be
@@ -266,7 +267,7 @@
 
                 if flag not in revlog._flagprocessors:
                     message = _("missing processor for flag '%#x'") % (flag)
-                    raise revlog.RevlogError(message)
+                    raise simplestoreerror(message)
 
                 processor = revlog._flagprocessors[flag]
                 if processor is not None:
@@ -285,8 +286,8 @@
     def checkhash(self, text, node, p1=None, p2=None, rev=None):
         if p1 is None and p2 is None:
             p1, p2 = self.parents(node)
-        if node != revlog.hash(text, p1, p2):
-            raise error.RevlogError(_("integrity check failed on %s") %
+        if node != storageutil.hashrevisionsha1(text, p1, p2):
+            raise simplestoreerror(_("integrity check failed on %s") %
                 self._path)
 
     def revision(self, node, raw=False):
@@ -296,7 +297,7 @@
             return b''
 
         rev = self.rev(node)
-        flags = self.flags(rev)
+        flags = self._flags(rev)
 
         path = b'/'.join([self._storepath, hex(node)])
         rawtext = self._svfs.read(path)
@@ -325,7 +326,7 @@
             return False
 
         fulltext = self.revision(node)
-        m = revlog.parsemeta(fulltext)[0]
+        m = storageutil.parsemeta(fulltext)[0]
 
         if m and 'copy' in m:
             return m['copy'], bin(m['copyrev'])
@@ -342,7 +343,7 @@
 
         p1, p2 = self.parents(node)
 
-        if revlog.hash(t, p1, p2) == node:
+        if storageutil.hashrevisionsha1(t, p1, p2) == node:
             return False
 
         if self.iscensored(self.rev(node)):
@@ -370,7 +371,7 @@
     def iscensored(self, rev):
         validaterev(rev)
 
-        return self.flags(rev) & revlog.REVIDX_ISCENSORED
+        return self._flags(rev) & repository.REVISION_FLAG_CENSORED
 
     def commonancestorsheads(self, a, b):
         validatenode(a)
@@ -408,13 +409,9 @@
 
         return [b'/'.join((self._storepath, f)) for f in entries]
 
-    # Required by verify.
-    def checksize(self):
-        return 0, 0
-
     def add(self, text, meta, transaction, linkrev, p1, p2):
         if meta or text.startswith(b'\1\n'):
-            text = revlog.packmeta(meta, text)
+            text = storageutil.packmeta(meta, text)
 
         return self.addrevision(text, transaction, linkrev, p1, p2)
 
@@ -424,11 +421,11 @@
         validatenode(p2)
 
         if flags:
-            node = node or revlog.hash(text, p1, p2)
+            node = node or storageutil.hashrevisionsha1(text, p1, p2)
 
         rawtext, validatehash = self._processflags(text, flags, 'write')
 
-        node = node or revlog.hash(text, p1, p2)
+        node = node or storageutil.hashrevisionsha1(text, p1, p2)
 
         if node in self._indexbynode:
             return node
@@ -462,7 +459,12 @@
         self._refreshindex()
         self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
 
-    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
+    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
+                 maybemissingparents=False):
+        if maybemissingparents:
+            raise error.Abort(_('simple store does not support missing parents '
+                                'write mode'))
+
         nodes = []
 
         transaction.addbackup(self._indexpath)
@@ -490,28 +492,6 @@
 
         return nodes
 
-    def revdiff(self, rev1, rev2):
-        validaterev(rev1)
-        validaterev(rev2)
-
-        node1 = self.node(rev1)
-        node2 = self.node(rev2)
-
-        return mdiff.textdiff(self.revision(node1, raw=True),
-                              self.revision(node2, raw=True))
-
-    def headrevs(self):
-        # Assume all revisions are heads by default.
-        revishead = {rev: True for rev in self._indexbyrev}
-
-        for rev, entry in self._indexbyrev.items():
-            # Unset head flag for all seen parents.
-            revishead[self.rev(entry[b'p1'])] = False
-            revishead[self.rev(entry[b'p2'])] = False
-
-        return [rev for rev, ishead in sorted(revishead.items())
-                if ishead]
-
     def heads(self, start=None, stop=None):
         # This is copied from revlog.py.
         if start is None and stop is None:
@@ -564,8 +544,8 @@
 
         heads = {}
         futurelargelinkrevs = set()
-        for head in self.headrevs():
-            headlinkrev = self.linkrev(head)
+        for head in self.heads():
+            headlinkrev = self.linkrev(self.rev(head))
             heads[head] = headlinkrev
             if headlinkrev >= minlink:
                 futurelargelinkrevs.add(headlinkrev)
@@ -651,9 +631,9 @@
 def featuresetup(ui, supported):
     supported.add(REQUIREMENT)
 
-def newreporequirements(orig, repo):
+def newreporequirements(orig, ui):
     """Modifies default requirements for new repos to use the simple store."""
-    requirements = orig(repo)
+    requirements = orig(ui)
 
     # These requirements are only used to affect creation of the store
     # object. We have our own store. So we can remove them.
--- a/tests/svn-safe-append.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/svn-safe-append.py	Mon Oct 22 14:46:06 2018 -0400
@@ -9,14 +9,18 @@
 import stat
 import sys
 
-text = sys.argv[1]
-fname = sys.argv[2]
+if sys.version_info[0] >= 3:
+    text = os.fsencode(sys.argv[1])
+    fname = os.fsencode(sys.argv[2])
+else:
+    text = sys.argv[1]
+    fname = sys.argv[2]
 
 f = open(fname, "ab")
 try:
     before = os.fstat(f.fileno())[stat.ST_MTIME]
     f.write(text)
-    f.write("\n")
+    f.write(b"\n")
 finally:
     f.close()
 inc = 1
--- a/tests/svnxml.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/svnxml.py	Mon Oct 22 14:46:06 2018 -0400
@@ -35,7 +35,10 @@
     return entries
 
 def printentries(entries):
-    fp = sys.stdout
+    try:
+        fp = sys.stdout.buffer
+    except AttributeError:
+        fp = sys.stdout
     for e in entries:
         for k in ('revision', 'author', 'msg'):
             fp.write(('%s: %s\n' % (k, e[k])).encode('utf-8'))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-edit-lines.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,61 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > EOF
+
+  $ hg init repo1
+  $ cd repo1
+
+Make some commits:
+
+  $ for i in 1 2 3; do
+  >   echo $i >> a
+  >   hg commit -A a -m "commit $i" -q
+  > done
+
+absorb --edit-lines will run the editor if filename is provided:
+
+  $ hg absorb --edit-lines --apply-changes
+  nothing applied
+  [1]
+  $ HGEDITOR=cat hg absorb --edit-lines --apply-changes a
+  HG: editing a
+  HG: "y" means the line to the right exists in the changeset to the top
+  HG:
+  HG: /---- 4ec16f85269a commit 1
+  HG: |/--- 5c5f95224a50 commit 2
+  HG: ||/-- 43f0a75bede7 commit 3
+  HG: |||
+      yyy : 1
+       yy : 2
+        y : 3
+  nothing applied
+  [1]
+
+Edit the file using --edit-lines:
+
+  $ cat > editortext << EOF
+  >       y : a
+  >      yy :  b
+  >      y  : c
+  >     yy  : d  
+  >     y y : e
+  >     y   : f
+  >     yyy : g
+  > EOF
+  $ HGEDITOR='cat editortext >' hg absorb -q --edit-lines --apply-changes a
+  $ hg cat -r 0 a
+  d  
+  e
+  f
+  g
+  $ hg cat -r 1 a
+   b
+  c
+  d  
+  g
+  $ hg cat -r 2 a
+  a
+   b
+  e
+  g
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-filefixupstate.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,208 @@
+from __future__ import absolute_import, print_function
+
+import itertools
+from mercurial import pycompat
+from hgext import absorb
+
+class simplefctx(object):
+    def __init__(self, content):
+        self.content = content
+
+    def data(self):
+        return self.content
+
+def insertreturns(x):
+    # insert "\n"s after each single char
+    if isinstance(x, bytes):
+        return b''.join(ch + b'\n' for ch in pycompat.bytestr(x))
+    else:
+        return pycompat.maplist(insertreturns, x)
+
+def removereturns(x):
+    # the revert of "insertreturns"
+    if isinstance(x, bytes):
+        return x.replace(b'\n', b'')
+    else:
+        return pycompat.maplist(removereturns, x)
+
+def assertlistequal(lhs, rhs, decorator=lambda x: x):
+    if lhs != rhs:
+        raise RuntimeError('mismatch:\n actual:   %r\n expected: %r'
+                           % tuple(map(decorator, [lhs, rhs])))
+
+def testfilefixup(oldcontents, workingcopy, expectedcontents, fixups=None):
+    """([str], str, [str], [(rev, a1, a2, b1, b2)]?) -> None
+
+    workingcopy is a string, of which every character denotes a single line.
+
+    oldcontents, expectedcontents are lists of strings, every character of
+    every string denots a single line.
+
+    if fixups is not None, it's the expected fixups list and will be checked.
+    """
+    expectedcontents = insertreturns(expectedcontents)
+    oldcontents = insertreturns(oldcontents)
+    workingcopy = insertreturns(workingcopy)
+    state = absorb.filefixupstate(pycompat.maplist(simplefctx, oldcontents),
+                                  'path')
+    state.diffwith(simplefctx(workingcopy))
+    if fixups is not None:
+        assertlistequal(state.fixups, fixups)
+    state.apply()
+    assertlistequal(state.finalcontents, expectedcontents, removereturns)
+
+def buildcontents(linesrevs):
+    # linesrevs: [(linecontent : str, revs : [int])]
+    revs = set(itertools.chain(*[revs for line, revs in linesrevs]))
+    return [b''] + [
+        b''.join([l for l, rs in linesrevs if r in rs])
+        for r in sorted(revs)
+    ]
+
+# input case 0: one single commit
+case0 = [b'', b'11']
+
+# replace a single chunk
+testfilefixup(case0, b'', [b'', b''])
+testfilefixup(case0, b'2', [b'', b'2'])
+testfilefixup(case0, b'22', [b'', b'22'])
+testfilefixup(case0, b'222', [b'', b'222'])
+
+# input case 1: 3 lines, each commit adds one line
+case1 = buildcontents([
+    (b'1', [1, 2, 3]),
+    (b'2', [   2, 3]),
+    (b'3', [      3]),
+])
+
+# 1:1 line mapping
+testfilefixup(case1, b'123', case1)
+testfilefixup(case1, b'12c', [b'', b'1', b'12', b'12c'])
+testfilefixup(case1, b'1b3', [b'', b'1', b'1b', b'1b3'])
+testfilefixup(case1, b'1bc', [b'', b'1', b'1b', b'1bc'])
+testfilefixup(case1, b'a23', [b'', b'a', b'a2', b'a23'])
+testfilefixup(case1, b'a2c', [b'', b'a', b'a2', b'a2c'])
+testfilefixup(case1, b'ab3', [b'', b'a', b'ab', b'ab3'])
+testfilefixup(case1, b'abc', [b'', b'a', b'ab', b'abc'])
+
+# non 1:1 edits
+testfilefixup(case1, b'abcd', case1)
+testfilefixup(case1, b'ab', case1)
+
+# deletion
+testfilefixup(case1, b'',   [b'', b'', b'', b''])
+testfilefixup(case1, b'1',  [b'', b'1', b'1', b'1'])
+testfilefixup(case1, b'2',  [b'', b'', b'2', b'2'])
+testfilefixup(case1, b'3',  [b'', b'', b'', b'3'])
+testfilefixup(case1, b'13', [b'', b'1', b'1', b'13'])
+
+# replaces
+testfilefixup(case1, b'1bb3', [b'', b'1', b'1bb', b'1bb3'])
+
+# (confusing) replaces
+testfilefixup(case1, b'1bbb', case1)
+testfilefixup(case1, b'bbbb', case1)
+testfilefixup(case1, b'bbb3', case1)
+testfilefixup(case1, b'1b', case1)
+testfilefixup(case1, b'bb', case1)
+testfilefixup(case1, b'b3', case1)
+
+# insertions at the beginning and the end
+testfilefixup(case1, b'123c', [b'', b'1', b'12', b'123c'])
+testfilefixup(case1, b'a123', [b'', b'a1', b'a12', b'a123'])
+
+# (confusing) insertions
+testfilefixup(case1, b'1a23', case1)
+testfilefixup(case1, b'12b3', case1)
+
+# input case 2: delete in the middle
+case2 = buildcontents([
+    (b'11', [1, 2]),
+    (b'22', [1   ]),
+    (b'33', [1, 2]),
+])
+
+# deletion (optimize code should make it 2 chunks)
+testfilefixup(case2, b'', [b'', b'22', b''],
+              fixups=[(4, 0, 2, 0, 0), (4, 2, 4, 0, 0)])
+
+# 1:1 line mapping
+testfilefixup(case2, b'aaaa', [b'', b'aa22aa', b'aaaa'])
+
+# non 1:1 edits
+# note: unlike case0, the chunk is not "continuous" and no edit allowed
+testfilefixup(case2, b'aaa', case2)
+
+# input case 3: rev 3 reverts rev 2
+case3 = buildcontents([
+    (b'1', [1, 2, 3]),
+    (b'2', [   2   ]),
+    (b'3', [1, 2, 3]),
+])
+
+# 1:1 line mapping
+testfilefixup(case3, b'13', case3)
+testfilefixup(case3, b'1b', [b'', b'1b', b'12b', b'1b'])
+testfilefixup(case3, b'a3', [b'', b'a3', b'a23', b'a3'])
+testfilefixup(case3, b'ab', [b'', b'ab', b'a2b', b'ab'])
+
+# non 1:1 edits
+testfilefixup(case3, b'a', case3)
+testfilefixup(case3, b'abc', case3)
+
+# deletion
+testfilefixup(case3, b'', [b'', b'', b'2', b''])
+
+# insertion
+testfilefixup(case3, b'a13c', [b'', b'a13c', b'a123c', b'a13c'])
+
+# input case 4: a slightly complex case
+case4 = buildcontents([
+    (b'1', [1, 2, 3]),
+    (b'2', [   2, 3]),
+    (b'3', [1, 2,  ]),
+    (b'4', [1,    3]),
+    (b'5', [      3]),
+    (b'6', [   2, 3]),
+    (b'7', [   2   ]),
+    (b'8', [   2, 3]),
+    (b'9', [      3]),
+])
+
+testfilefixup(case4, b'1245689', case4)
+testfilefixup(case4, b'1a2456bbb', case4)
+testfilefixup(case4, b'1abc5689', case4)
+testfilefixup(case4, b'1ab5689', [b'', b'134', b'1a3678', b'1ab5689'])
+testfilefixup(case4, b'aa2bcd8ee', [b'', b'aa34', b'aa23d78', b'aa2bcd8ee'])
+testfilefixup(case4, b'aa2bcdd8ee',[b'', b'aa34', b'aa23678', b'aa24568ee'])
+testfilefixup(case4, b'aaaaaa', case4)
+testfilefixup(case4, b'aa258b', [b'', b'aa34', b'aa2378', b'aa258b'])
+testfilefixup(case4, b'25bb', [b'', b'34', b'23678', b'25689'])
+testfilefixup(case4, b'27', [b'', b'34', b'23678', b'245689'])
+testfilefixup(case4, b'28', [b'', b'34', b'2378', b'28'])
+testfilefixup(case4, b'', [b'', b'34', b'37', b''])
+
+# input case 5: replace a small chunk which is near a deleted line
+case5 = buildcontents([
+    (b'12', [1, 2]),
+    (b'3',  [1]),
+    (b'4',  [1, 2]),
+])
+
+testfilefixup(case5, b'1cd4', [b'', b'1cd34', b'1cd4'])
+
+# input case 6: base "changeset" is immutable
+case6 = [b'1357', b'0125678']
+
+testfilefixup(case6, b'0125678', case6)
+testfilefixup(case6, b'0a25678', case6)
+testfilefixup(case6, b'0a256b8', case6)
+testfilefixup(case6, b'abcdefg', [b'1357', b'a1c5e7g'])
+testfilefixup(case6, b'abcdef', case6)
+testfilefixup(case6, b'', [b'1357', b'157'])
+testfilefixup(case6, b'0123456789', [b'1357', b'0123456789'])
+
+# input case 7: change an empty file
+case7 = [b'']
+
+testfilefixup(case7, b'1', case7)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-phase.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,30 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > drawdag=$TESTDIR/drawdag.py
+  > EOF
+
+  $ hg init
+  $ hg debugdrawdag <<'EOS'
+  > C
+  > |
+  > B
+  > |
+  > A
+  > EOS
+
+  $ hg phase -r A --public -q
+  $ hg phase -r C --secret --force -q
+
+  $ hg update C -q
+  $ printf B1 > B
+
+  $ hg absorb -aq
+
+  $ hg log -G -T '{desc} {phase}'
+  @  C secret
+  |
+  o  B draft
+  |
+  o  A public
+  
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-rename.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,387 @@
+  $ cat >> $HGRCPATH << EOF
+  > [diff]
+  > git=1
+  > [extensions]
+  > absorb=
+  > EOF
+
+  $ sedi() { # workaround check-code
+  > pattern="$1"
+  > shift
+  > for i in "$@"; do
+  >     sed "$pattern" "$i" > "$i".tmp
+  >     mv "$i".tmp "$i"
+  > done
+  > }
+
+rename a to b, then b to a
+
+  $ hg init repo1
+  $ cd repo1
+
+  $ echo 1 > a
+  $ hg ci -A a -m 1
+  $ hg mv a b
+  $ echo 2 >> b
+  $ hg ci -m 2
+  $ hg mv b a
+  $ echo 3 >> a
+  $ hg ci -m 3
+
+  $ hg annotate -ncf a
+  0 eff892de26ec a: 1
+  1 bf56e1f4f857 b: 2
+  2 0b888b00216c a: 3
+
+  $ sedi 's/$/a/' a
+  $ hg absorb -apq
+  showing changes for a
+          @@ -0,3 +0,3 @@
+  eff892d -1
+  bf56e1f -2
+  0b888b0 -3
+  eff892d +1a
+  bf56e1f +2a
+  0b888b0 +3a
+  
+  3 changesets affected
+  0b888b0 3
+  bf56e1f 2
+  eff892d 1
+
+  $ hg status
+
+  $ hg annotate -ncf a
+  0 5d1c5620e6f2 a: 1a
+  1 9a14ffe67ae9 b: 2a
+  2 9191d121a268 a: 3a
+
+when the first changeset is public
+
+  $ hg phase --public -r 0
+
+  $ sedi 's/a/A/' a
+
+  $ hg absorb -apq
+  showing changes for a
+          @@ -0,3 +0,3 @@
+          -1a
+  9a14ffe -2a
+  9191d12 -3a
+          +1A
+  9a14ffe +2A
+  9191d12 +3A
+  
+  2 changesets affected
+  9191d12 3
+  9a14ffe 2
+
+  $ hg diff
+  diff --git a/a b/a
+  --- a/a
+  +++ b/a
+  @@ -1,3 +1,3 @@
+  -1a
+  +1A
+   2A
+   3A
+
+copy a to b
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+  $ echo 1 > a
+  $ hg ci -A a -m 1
+  $ hg cp a b
+  $ echo 2 >> b
+  $ hg ci -m 2
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  1:17b72129ab68 2
+  0:eff892de26ec 1
+
+  $ sedi 's/$/a/' a
+  $ sedi 's/$/b/' b
+
+  $ hg absorb -apq
+  showing changes for a
+          @@ -0,1 +0,1 @@
+  eff892d -1
+  eff892d +1a
+  showing changes for b
+          @@ -0,2 +0,2 @@
+          -1
+  17b7212 -2
+          +1b
+  17b7212 +2b
+  
+  2 changesets affected
+  17b7212 2
+  eff892d 1
+
+  $ hg diff
+  diff --git a/b b/b
+  --- a/b
+  +++ b/b
+  @@ -1,2 +1,2 @@
+  -1
+  +1b
+   2b
+
+copy b to a
+
+  $ cd ..
+  $ hg init repo3
+  $ cd repo3
+
+  $ echo 1 > b
+  $ hg ci -A b -m 1
+  $ hg cp b a
+  $ echo 2 >> a
+  $ hg ci -m 2
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  1:e62c256d8b24 2
+  0:55105f940d5c 1
+
+  $ sedi 's/$/a/' a
+  $ sedi 's/$/a/' b
+
+  $ hg absorb -apq
+  showing changes for a
+          @@ -0,2 +0,2 @@
+          -1
+  e62c256 -2
+          +1a
+  e62c256 +2a
+  showing changes for b
+          @@ -0,1 +0,1 @@
+  55105f9 -1
+  55105f9 +1a
+  
+  2 changesets affected
+  e62c256 2
+  55105f9 1
+
+  $ hg diff
+  diff --git a/a b/a
+  --- a/a
+  +++ b/a
+  @@ -1,2 +1,2 @@
+  -1
+  +1a
+   2a
+
+"move" b to both a and c, follow a - sorted alphabetically
+
+  $ cd ..
+  $ hg init repo4
+  $ cd repo4
+
+  $ echo 1 > b
+  $ hg ci -A b -m 1
+  $ hg cp b a
+  $ hg cp b c
+  $ hg rm b
+  $ echo 2 >> a
+  $ echo 3 >> c
+  $ hg commit -m cp
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  1:366daad8e679 cp
+  0:55105f940d5c 1
+
+  $ sedi 's/$/a/' a
+  $ sedi 's/$/c/' c
+
+  $ hg absorb -apq
+  showing changes for a
+          @@ -0,2 +0,2 @@
+  55105f9 -1
+  366daad -2
+  55105f9 +1a
+  366daad +2a
+  showing changes for c
+          @@ -0,2 +0,2 @@
+          -1
+  366daad -3
+          +1c
+  366daad +3c
+  
+  2 changesets affected
+  366daad cp
+  55105f9 1
+
+  $ hg log -G -p -T '{rev}:{node|short} {desc}\n'
+  @  1:70606019f91b cp
+  |  diff --git a/b b/a
+  |  rename from b
+  |  rename to a
+  |  --- a/b
+  |  +++ b/a
+  |  @@ -1,1 +1,2 @@
+  |   1a
+  |  +2a
+  |  diff --git a/b b/c
+  |  copy from b
+  |  copy to c
+  |  --- a/b
+  |  +++ b/c
+  |  @@ -1,1 +1,2 @@
+  |  -1a
+  |  +1
+  |  +3c
+  |
+  o  0:bfb67c3539c1 1
+     diff --git a/b b/b
+     new file mode 100644
+     --- /dev/null
+     +++ b/b
+     @@ -0,0 +1,1 @@
+     +1a
+  
+run absorb again would apply the change to c
+
+  $ hg absorb -apq
+  showing changes for c
+          @@ -0,1 +0,1 @@
+  7060601 -1
+  7060601 +1c
+  
+  1 changesets affected
+  7060601 cp
+
+  $ hg log -G -p -T '{rev}:{node|short} {desc}\n'
+  @  1:8bd536cce368 cp
+  |  diff --git a/b b/a
+  |  rename from b
+  |  rename to a
+  |  --- a/b
+  |  +++ b/a
+  |  @@ -1,1 +1,2 @@
+  |   1a
+  |  +2a
+  |  diff --git a/b b/c
+  |  copy from b
+  |  copy to c
+  |  --- a/b
+  |  +++ b/c
+  |  @@ -1,1 +1,2 @@
+  |  -1a
+  |  +1c
+  |  +3c
+  |
+  o  0:bfb67c3539c1 1
+     diff --git a/b b/b
+     new file mode 100644
+     --- /dev/null
+     +++ b/b
+     @@ -0,0 +1,1 @@
+     +1a
+  
+"move" b to a, c and d, follow d if a gets renamed to e, and c is deleted
+
+  $ cd ..
+  $ hg init repo5
+  $ cd repo5
+
+  $ echo 1 > b
+  $ hg ci -A b -m 1
+  $ hg cp b a
+  $ hg cp b c
+  $ hg cp b d
+  $ hg rm b
+  $ echo 2 >> a
+  $ echo 3 >> c
+  $ echo 4 >> d
+  $ hg commit -m cp
+  $ hg mv a e
+  $ hg rm c
+  $ hg commit -m mv
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  2:49911557c471 mv
+  1:7bc3d43ede83 cp
+  0:55105f940d5c 1
+
+  $ sedi 's/$/e/' e
+  $ sedi 's/$/d/' d
+
+  $ hg absorb -apq
+  showing changes for d
+          @@ -0,2 +0,2 @@
+  55105f9 -1
+  7bc3d43 -4
+  55105f9 +1d
+  7bc3d43 +4d
+  showing changes for e
+          @@ -0,2 +0,2 @@
+          -1
+  7bc3d43 -2
+          +1e
+  7bc3d43 +2e
+  
+  2 changesets affected
+  7bc3d43 cp
+  55105f9 1
+
+  $ hg diff
+  diff --git a/e b/e
+  --- a/e
+  +++ b/e
+  @@ -1,2 +1,2 @@
+  -1
+  +1e
+   2e
+
+  $ hg log -G -p -T '{rev}:{node|short} {desc}\n'
+  @  2:34be9b0c786e mv
+  |  diff --git a/c b/c
+  |  deleted file mode 100644
+  |  --- a/c
+  |  +++ /dev/null
+  |  @@ -1,2 +0,0 @@
+  |  -1
+  |  -3
+  |  diff --git a/a b/e
+  |  rename from a
+  |  rename to e
+  |
+  o  1:13e56db5948d cp
+  |  diff --git a/b b/a
+  |  rename from b
+  |  rename to a
+  |  --- a/b
+  |  +++ b/a
+  |  @@ -1,1 +1,2 @@
+  |  -1d
+  |  +1
+  |  +2e
+  |  diff --git a/b b/c
+  |  copy from b
+  |  copy to c
+  |  --- a/b
+  |  +++ b/c
+  |  @@ -1,1 +1,2 @@
+  |  -1d
+  |  +1
+  |  +3
+  |  diff --git a/b b/d
+  |  copy from b
+  |  copy to d
+  |  --- a/b
+  |  +++ b/d
+  |  @@ -1,1 +1,2 @@
+  |   1d
+  |  +4d
+  |
+  o  0:0037613a5dc6 1
+     diff --git a/b b/b
+     new file mode 100644
+     --- /dev/null
+     +++ b/b
+     @@ -0,0 +1,1 @@
+     +1d
+  
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb-strip.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,45 @@
+Do not strip innocent children. See https://bitbucket.org/facebook/hg-experimental/issues/6/hg-absorb-merges-diverged-commits
+
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > drawdag=$TESTDIR/drawdag.py
+  > EOF
+
+  $ hg init
+  $ hg debugdrawdag << EOF
+  > E
+  > |
+  > D F
+  > |/
+  > C
+  > |
+  > B
+  > |
+  > A
+  > EOF
+
+  $ hg up E -q
+  $ echo 1 >> B
+  $ echo 2 >> D
+  $ hg absorb -a
+  saved backup bundle to * (glob)
+  2 of 2 chunk(s) applied
+
+  $ hg log -G -T '{desc}'
+  @  E
+  |
+  o  D
+  |
+  o  C
+  |
+  o  B
+  |
+  | o  F
+  | |
+  | o  C
+  | |
+  | o  B
+  |/
+  o  A
+  
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-absorb.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,489 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > absorb=
+  > EOF
+
+  $ sedi() { # workaround check-code
+  > pattern="$1"
+  > shift
+  > for i in "$@"; do
+  >     sed "$pattern" "$i" > "$i".tmp
+  >     mv "$i".tmp "$i"
+  > done
+  > }
+
+  $ hg init repo1
+  $ cd repo1
+
+Do not crash with empty repo:
+
+  $ hg absorb
+  abort: no mutable changeset to change
+  [255]
+
+Make some commits:
+
+  $ for i in 1 2 3 4 5; do
+  >   echo $i >> a
+  >   hg commit -A a -m "commit $i" -q
+  > done
+
+  $ hg annotate a
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+Change a few lines:
+
+  $ cat > a <<EOF
+  > 1a
+  > 2b
+  > 3
+  > 4d
+  > 5e
+  > EOF
+
+Preview absorb changes:
+
+  $ hg absorb --print-changes --dry-run
+  showing changes for a
+          @@ -0,2 +0,2 @@
+  4ec16f8 -1
+  5c5f952 -2
+  4ec16f8 +1a
+  5c5f952 +2b
+          @@ -3,2 +3,2 @@
+  ad8b8b7 -4
+  4f55fa6 -5
+  ad8b8b7 +4d
+  4f55fa6 +5e
+  
+  4 changesets affected
+  4f55fa6 commit 5
+  ad8b8b7 commit 4
+  5c5f952 commit 2
+  4ec16f8 commit 1
+
+Run absorb:
+
+  $ hg absorb --apply-changes
+  saved backup bundle to * (glob)
+  2 of 2 chunk(s) applied
+  $ hg annotate a
+  0: 1a
+  1: 2b
+  2: 3
+  3: 4d
+  4: 5e
+
+Delete a few lines and related commits will be removed if they will be empty:
+
+  $ cat > a <<EOF
+  > 2b
+  > 4d
+  > EOF
+  $ echo y | hg absorb --config ui.interactive=1
+  showing changes for a
+          @@ -0,1 +0,0 @@
+  f548282 -1a
+          @@ -2,1 +1,0 @@
+  ff5d556 -3
+          @@ -4,1 +2,0 @@
+  84e5416 -5e
+  
+  3 changesets affected
+  84e5416 commit 5
+  ff5d556 commit 3
+  f548282 commit 1
+  apply changes (yn)?  y
+  saved backup bundle to * (glob)
+  3 of 3 chunk(s) applied
+  $ hg annotate a
+  1: 2b
+  2: 4d
+  $ hg log -T '{rev} {desc}\n' -Gp
+  @  2 commit 4
+  |  diff -r 1cae118c7ed8 -r 58a62bade1c6 a
+  |  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  |  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  |  @@ -1,1 +1,2 @@
+  |   2b
+  |  +4d
+  |
+  o  1 commit 2
+  |  diff -r 84add69aeac0 -r 1cae118c7ed8 a
+  |  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  |  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  |  @@ -0,0 +1,1 @@
+  |  +2b
+  |
+  o  0 commit 1
+  
+
+Non 1:1 map changes will be ignored:
+
+  $ echo 1 > a
+  $ hg absorb --apply-changes
+  nothing applied
+  [1]
+
+Insertaions:
+
+  $ cat > a << EOF
+  > insert before 2b
+  > 2b
+  > 4d
+  > insert aftert 4d
+  > EOF
+  $ hg absorb -q --apply-changes
+  $ hg status
+  $ hg annotate a
+  1: insert before 2b
+  1: 2b
+  2: 4d
+  2: insert aftert 4d
+
+Bookmarks are moved:
+
+  $ hg bookmark -r 1 b1
+  $ hg bookmark -r 2 b2
+  $ hg bookmark ba
+  $ hg bookmarks
+     b1                        1:b35060a57a50
+     b2                        2:946e4bc87915
+   * ba                        2:946e4bc87915
+  $ sedi 's/insert/INSERT/' a
+  $ hg absorb -q --apply-changes
+  $ hg status
+  $ hg bookmarks
+     b1                        1:a4183e9b3d31
+     b2                        2:c9b20c925790
+   * ba                        2:c9b20c925790
+
+Non-mofified files are ignored:
+
+  $ touch b
+  $ hg commit -A b -m b
+  $ touch c
+  $ hg add c
+  $ hg rm b
+  $ hg absorb --apply-changes
+  nothing applied
+  [1]
+  $ sedi 's/INSERT/Insert/' a
+  $ hg absorb --apply-changes
+  saved backup bundle to * (glob)
+  2 of 2 chunk(s) applied
+  $ hg status
+  A c
+  R b
+
+Public commits will not be changed:
+
+  $ hg phase -p 1
+  $ sedi 's/Insert/insert/' a
+  $ hg absorb -pn
+  showing changes for a
+          @@ -0,1 +0,1 @@
+          -Insert before 2b
+          +insert before 2b
+          @@ -3,1 +3,1 @@
+  85b4e0e -Insert aftert 4d
+  85b4e0e +insert aftert 4d
+  
+  1 changesets affected
+  85b4e0e commit 4
+  $ hg absorb --apply-changes
+  saved backup bundle to * (glob)
+  1 of 2 chunk(s) applied
+  $ hg diff -U 0
+  diff -r 1c8eadede62a a
+  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	* (glob)
+  @@ -1,1 +1,1 @@
+  -Insert before 2b
+  +insert before 2b
+  $ hg annotate a
+  1: Insert before 2b
+  1: 2b
+  2: 4d
+  2: insert aftert 4d
+
+Make working copy clean:
+
+  $ hg revert -q -C a b
+  $ hg forget c
+  $ rm c
+  $ hg status
+
+Merge commit will not be changed:
+
+  $ echo 1 > m1
+  $ hg commit -A m1 -m m1
+  $ hg bookmark -q -i m1
+  $ hg update -q '.^'
+  $ echo 2 > m2
+  $ hg commit -q -A m2 -m m2
+  $ hg merge -q m1
+  $ hg commit -m merge
+  $ hg bookmark -d m1
+  $ hg log -G -T '{rev} {desc} {phase}\n'
+  @    6 merge draft
+  |\
+  | o  5 m2 draft
+  | |
+  o |  4 m1 draft
+  |/
+  o  3 b draft
+  |
+  o  2 commit 4 draft
+  |
+  o  1 commit 2 public
+  |
+  o  0 commit 1 public
+  
+  $ echo 2 >> m1
+  $ echo 2 >> m2
+  $ hg absorb --apply-changes
+  abort: no mutable changeset to change
+  [255]
+  $ hg revert -q -C m1 m2
+
+Use a new repo:
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+Make some commits to multiple files:
+
+  $ for f in a b; do
+  >   for i in 1 2; do
+  >     echo $f line $i >> $f
+  >     hg commit -A $f -m "commit $f $i" -q
+  >   done
+  > done
+
+Use pattern to select files to be fixed up:
+
+  $ sedi 's/line/Line/' a b
+  $ hg status
+  M a
+  M b
+  $ hg absorb --apply-changes a
+  saved backup bundle to * (glob)
+  1 of 1 chunk(s) applied
+  $ hg status
+  M b
+  $ hg absorb --apply-changes --exclude b
+  nothing applied
+  [1]
+  $ hg absorb --apply-changes b
+  saved backup bundle to * (glob)
+  1 of 1 chunk(s) applied
+  $ hg status
+  $ cat a b
+  a Line 1
+  a Line 2
+  b Line 1
+  b Line 2
+
+Test config option absorb.max-stack-size:
+
+  $ sedi 's/Line/line/' a b
+  $ hg log -T '{rev}:{node} {desc}\n'
+  3:712d16a8f445834e36145408eabc1d29df05ec09 commit b 2
+  2:74cfa6294160149d60adbf7582b99ce37a4597ec commit b 1
+  1:28f10dcf96158f84985358a2e5d5b3505ca69c22 commit a 2
+  0:f9a81da8dc53380ed91902e5b82c1b36255a4bd0 commit a 1
+  $ hg --config absorb.max-stack-size=1 absorb -pn
+  absorb: only the recent 1 changesets will be analysed
+  showing changes for a
+          @@ -0,2 +0,2 @@
+          -a Line 1
+          -a Line 2
+          +a line 1
+          +a line 2
+  showing changes for b
+          @@ -0,2 +0,2 @@
+          -b Line 1
+  712d16a -b Line 2
+          +b line 1
+  712d16a +b line 2
+  
+  1 changesets affected
+  712d16a commit b 2
+
+Test obsolete markers creation:
+
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution=createmarkers
+  > [absorb]
+  > add-noise=1
+  > EOF
+
+  $ hg --config absorb.max-stack-size=3 absorb -a
+  absorb: only the recent 3 changesets will be analysed
+  2 of 2 chunk(s) applied
+  $ hg log -T '{rev}:{node|short} {desc} {get(extras, "absorb_source")}\n'
+  6:3dfde4199b46 commit b 2 712d16a8f445834e36145408eabc1d29df05ec09
+  5:99cfab7da5ff commit b 1 74cfa6294160149d60adbf7582b99ce37a4597ec
+  4:fec2b3bd9e08 commit a 2 28f10dcf96158f84985358a2e5d5b3505ca69c22
+  0:f9a81da8dc53 commit a 1 
+  $ hg absorb --apply-changes
+  1 of 1 chunk(s) applied
+  $ hg log -T '{rev}:{node|short} {desc} {get(extras, "absorb_source")}\n'
+  10:e1c8c1e030a4 commit b 2 3dfde4199b4610ea6e3c6fa9f5bdad8939d69524
+  9:816c30955758 commit b 1 99cfab7da5ffdaf3b9fc6643b14333e194d87f46
+  8:5867d584106b commit a 2 fec2b3bd9e0834b7cb6a564348a0058171aed811
+  7:8c76602baf10 commit a 1 f9a81da8dc53380ed91902e5b82c1b36255a4bd0
+
+Executable files:
+
+  $ cat >> $HGRCPATH << EOF
+  > [diff]
+  > git=True
+  > EOF
+  $ cd ..
+  $ hg init repo3
+  $ cd repo3
+
+#if execbit
+  $ echo > foo.py
+  $ chmod +x foo.py
+  $ hg add foo.py
+  $ hg commit -mfoo
+#else
+  $ hg import -q --bypass - <<EOF
+  > # HG changeset patch
+  > foo
+  > 
+  > diff --git a/foo.py b/foo.py
+  > new file mode 100755
+  > --- /dev/null
+  > +++ b/foo.py
+  > @@ -0,0 +1,1 @@
+  > +
+  > EOF
+  $ hg up -q
+#endif
+
+  $ echo bla > foo.py
+  $ hg absorb --dry-run --print-changes
+  showing changes for foo.py
+          @@ -0,1 +0,1 @@
+  99b4ae7 -
+  99b4ae7 +bla
+  
+  1 changesets affected
+  99b4ae7 foo
+  $ hg absorb --apply-changes
+  1 of 1 chunk(s) applied
+  $ hg diff -c .
+  diff --git a/foo.py b/foo.py
+  new file mode 100755
+  --- /dev/null
+  +++ b/foo.py
+  @@ -0,0 +1,1 @@
+  +bla
+  $ hg diff
+
+Remove lines may delete changesets:
+
+  $ cd ..
+  $ hg init repo4
+  $ cd repo4
+  $ cat > a <<EOF
+  > 1
+  > 2
+  > EOF
+  $ hg commit -m a12 -A a
+  $ cat > b <<EOF
+  > 1
+  > 2
+  > EOF
+  $ hg commit -m b12 -A b
+  $ echo 3 >> b
+  $ hg commit -m b3
+  $ echo 4 >> b
+  $ hg commit -m b4
+  $ echo 1 > b
+  $ echo 3 >> a
+  $ hg absorb -pn
+  showing changes for a
+          @@ -2,0 +2,1 @@
+  bfafb49 +3
+  showing changes for b
+          @@ -1,3 +1,0 @@
+  1154859 -2
+  30970db -3
+  a393a58 -4
+  
+  4 changesets affected
+  a393a58 b4
+  30970db b3
+  1154859 b12
+  bfafb49 a12
+  $ hg absorb -av | grep became
+  0:bfafb49242db: 1 file(s) changed, became 4:1a2de97fc652
+  1:115485984805: 2 file(s) changed, became 5:0c930dfab74c
+  2:30970dbf7b40: became empty and was dropped
+  3:a393a58b9a85: became empty and was dropped
+  $ hg log -T '{rev} {desc}\n' -Gp
+  @  5 b12
+  |  diff --git a/b b/b
+  |  new file mode 100644
+  |  --- /dev/null
+  |  +++ b/b
+  |  @@ -0,0 +1,1 @@
+  |  +1
+  |
+  o  4 a12
+     diff --git a/a b/a
+     new file mode 100644
+     --- /dev/null
+     +++ b/a
+     @@ -0,0 +1,3 @@
+     +1
+     +2
+     +3
+  
+
+Use revert to make the current change and its parent disappear.
+This should move us to the non-obsolete ancestor.
+
+  $ cd ..
+  $ hg init repo5
+  $ cd repo5
+  $ cat > a <<EOF
+  > 1
+  > 2
+  > EOF
+  $ hg commit -m a12 -A a
+  $ hg id
+  bfafb49242db tip
+  $ echo 3 >> a
+  $ hg commit -m a123 a
+  $ echo 4 >> a
+  $ hg commit -m a1234 a
+  $ hg id
+  82dbe7fd19f0 tip
+  $ hg revert -r 0 a
+  $ hg absorb -pn
+  showing changes for a
+          @@ -2,2 +2,0 @@
+  f1c23dd -3
+  82dbe7f -4
+  
+  2 changesets affected
+  82dbe7f a1234
+  f1c23dd a123
+  $ hg absorb --apply-changes --verbose
+  1:f1c23dd5d08d: became empty and was dropped
+  2:82dbe7fd19f0: became empty and was dropped
+  a: 1 of 1 chunk(s) applied
+  $ hg id
+  bfafb49242db tip
--- a/tests/test-add.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-add.t	Mon Oct 22 14:46:06 2018 -0400
@@ -12,6 +12,9 @@
   $ hg forget a
   $ hg add
   adding a
+  $ hg forget a
+  $ hg add --color debug
+  [ui.addremove.added ui.status|adding a]
   $ hg st
   A a
   $ mkdir dir
--- a/tests/test-addremove-similar.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-addremove-similar.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,7 +1,7 @@
   $ hg init rep; cd rep
 
   $ touch empty-file
-  $ $PYTHON -c 'for x in range(10000): print(x)' > large-file
+  $ "$PYTHON" -c 'for x in range(10000): print(x)' > large-file
 
   $ hg addremove
   adding empty-file
@@ -10,7 +10,7 @@
   $ hg commit -m A
 
   $ rm large-file empty-file
-  $ $PYTHON -c 'for x in range(10,10000): print(x)' > another-file
+  $ "$PYTHON" -c 'for x in range(10,10000): print(x)' > another-file
 
   $ hg addremove -s50
   adding another-file
@@ -34,8 +34,8 @@
 
   $ hg init rep2; cd rep2
 
-  $ $PYTHON -c 'for x in range(10000): print(x)' > large-file
-  $ $PYTHON -c 'for x in range(50): print(x)' > tiny-file
+  $ "$PYTHON" -c 'for x in range(10000): print(x)' > large-file
+  $ "$PYTHON" -c 'for x in range(50): print(x)' > tiny-file
 
   $ hg addremove
   adding large-file
@@ -43,7 +43,7 @@
 
   $ hg commit -m A
 
-  $ $PYTHON -c 'for x in range(70): print(x)' > small-file
+  $ "$PYTHON" -c 'for x in range(70): print(x)' > small-file
   $ rm tiny-file
   $ rm large-file
 
@@ -57,7 +57,7 @@
 
 should be sorted by path for stable result
 
-  $ for i in `$PYTHON $TESTDIR/seq.py 0 9`; do
+  $ for i in `"$PYTHON" $TESTDIR/seq.py 0 9`; do
   >     cp small-file $i
   > done
   $ rm small-file
@@ -88,7 +88,7 @@
 pick one from many identical files
 
   $ cp 0 a
-  $ rm `$PYTHON $TESTDIR/seq.py 0 9`
+  $ rm `"$PYTHON" $TESTDIR/seq.py 0 9`
   $ hg addremove
   removing 0
   removing 1
@@ -107,11 +107,11 @@
 pick one from many similar files
 
   $ cp 0 a
-  $ for i in `$PYTHON $TESTDIR/seq.py 0 9`; do
+  $ for i in `"$PYTHON" $TESTDIR/seq.py 0 9`; do
   >     echo $i >> $i
   > done
   $ hg commit -m 'make them slightly different'
-  $ rm `$PYTHON $TESTDIR/seq.py 0 9`
+  $ rm `"$PYTHON" $TESTDIR/seq.py 0 9`
   $ hg addremove -s50
   removing 0
   removing 1
--- a/tests/test-addremove.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-addremove.t	Mon Oct 22 14:46:06 2018 -0400
@@ -69,6 +69,12 @@
   removing c
   adding d
   recording removal of a as rename to b (100% similar)
+  $ hg addremove -ns 50 --color debug
+  [ui.addremove.removed ui.status|removing a]
+  [ui.addremove.added ui.status|adding b]
+  [ui.addremove.removed ui.status|removing c]
+  [ui.addremove.added ui.status|adding d]
+  [ ui.status|recording removal of a as rename to b (100% similar)]
   $ hg addremove -s 50
   removing a
   adding b
--- a/tests/test-alias.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-alias.t	Mon Oct 22 14:46:06 2018 -0400
@@ -194,6 +194,7 @@
    -I --include PATTERN [+] include names matching the given patterns
    -X --exclude PATTERN [+] exclude names matching the given patterns
    -S --subrepos            recurse into subrepositories
+   -T --template TEMPLATE   display with template
   
   (some details hidden, use --verbose to show complete help)
 
@@ -651,81 +652,15 @@
 
   $ hg --invalid root
   hg: option --invalid not recognized
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help -v' for a list of global options)
   [255]
   $ hg --invalid mylog
   hg: option --invalid not recognized
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help -v' for a list of global options)
   [255]
   $ hg --invalid blank
   hg: option --invalid not recognized
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help -v' for a list of global options)
   [255]
 
 environment variable changes in alias commands
--- a/tests/test-amend.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-amend.t	Mon Oct 22 14:46:06 2018 -0400
@@ -250,15 +250,15 @@
   $ hg init $TESTTMP/wcstates
   $ cd $TESTTMP/wcstates
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 1
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 1
   $ hg addremove -q --similarity 0
   $ hg commit -m0
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 2
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 2
   $ hg addremove -q --similarity 0
   $ hg commit -m1
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 wc
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 wc
   $ hg addremove -q --similarity 0
   $ hg forget *_*_*-untracked
   $ rm *_*_missing-*
@@ -331,3 +331,37 @@
   ? missing_content2_content2-untracked
   ? missing_content2_content3-untracked
   ? missing_missing_content3-untracked
+
+==========================================
+Test history-editing-backup config option|
+==========================================
+  $ hg init $TESTTMP/repo4
+  $ cd $TESTTMP/repo4
+  $ echo a>a
+  $ hg ci -Aqma
+  $ echo oops>b
+  $ hg ci -Aqm "b"
+  $ echo partiallyfixed > b
+
+#if obsstore-off
+  $ hg amend
+  saved backup bundle to $TESTTMP/repo4/.hg/strip-backup/95e899acf2ce-f11cb050-amend.hg
+When history-editing-backup config option is set:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+  $ echo fixed > b
+  $ hg amend
+
+#else
+  $ hg amend
+When history-editing-backup config option is set:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+  $ echo fixed > b
+  $ hg amend
+
+#endif
--- a/tests/test-ancestor.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-ancestor.py	Mon Oct 22 14:46:06 2018 -0400
@@ -178,9 +178,9 @@
 # |
 # o  0
 
-graph = {0: [-1], 1: [0], 2: [1], 3: [1], 4: [2], 5: [4], 6: [4],
-         7: [4], 8: [-1], 9: [6, 7], 10: [5], 11: [3, 7], 12: [9],
-         13: [8]}
+graph = {0: [-1, -1], 1: [0, -1], 2: [1, -1], 3: [1, -1], 4: [2, -1],
+         5: [4, -1], 6: [4, -1], 7: [4, -1], 8: [-1, -1], 9: [6, 7],
+         10: [5, -1], 11: [3, 7], 12: [9, -1], 13: [8, -1]}
 
 def genlazyancestors(revs, stoprev=0, inclusive=False):
     print(("%% lazy ancestor set for %s, stoprev = %s, inclusive = %s" %
@@ -215,6 +215,15 @@
     s = genlazyancestors([11, 13], stoprev=6, inclusive=True)
     printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
 
+    # Test with stoprev >= min(initrevs)
+    s = genlazyancestors([11, 13], stoprev=11, inclusive=True)
+    printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
+    s = genlazyancestors([11, 13], stoprev=12, inclusive=True)
+    printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
+
+    # Contiguous chains: 5->4, 2->1 (where 1 is in seen set), 1->0
+    s = genlazyancestors([10, 1], inclusive=True)
+    printlazyancestors(s, [2, 10, 4, 5, -1, 0, 1])
 
 # The C gca algorithm requires a real repo. These are textual descriptions of
 # DAGs that have been known to be problematic, and, optionally, known pairs
--- a/tests/test-ancestor.py.out	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-ancestor.py.out	Mon Oct 22 14:46:06 2018 -0400
@@ -3,16 +3,25 @@
 iteration:  []
 % lazy ancestor set for [11, 13], stoprev = 0, inclusive = False
 membership: [7, 8, 3, 4, 1, 0]
-iteration:  [3, 7, 8, 1, 4, 0, 2]
+iteration:  [8, 7, 4, 3, 2, 1, 0]
 % lazy ancestor set for [1, 3], stoprev = 0, inclusive = False
 membership: [1, 0]
-iteration:  [0, 1]
+iteration:  [1, 0]
 % lazy ancestor set for [11, 13], stoprev = 0, inclusive = True
 membership: [11, 13, 7, 8, 3, 4, 1, 0]
-iteration:  [11, 13, 3, 7, 8, 1, 4, 0, 2]
+iteration:  [13, 11, 8, 7, 4, 3, 2, 1, 0]
 % lazy ancestor set for [11, 13], stoprev = 6, inclusive = False
 membership: [7, 8]
-iteration:  [7, 8]
+iteration:  [8, 7]
 % lazy ancestor set for [11, 13], stoprev = 6, inclusive = True
 membership: [11, 13, 7, 8]
-iteration:  [11, 13, 7, 8]
+iteration:  [13, 11, 8, 7]
+% lazy ancestor set for [11, 13], stoprev = 11, inclusive = True
+membership: [11, 13]
+iteration:  [13, 11]
+% lazy ancestor set for [11, 13], stoprev = 12, inclusive = True
+membership: [13]
+iteration:  [13]
+% lazy ancestor set for [10, 1], stoprev = 0, inclusive = True
+membership: [2, 10, 4, 5, 0, 1]
+iteration:  [10, 5, 4, 2, 1, 0]
--- a/tests/test-annotate.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-annotate.t	Mon Oct 22 14:46:06 2018 -0400
@@ -56,7 +56,6 @@
   $ hg annotate -Tjson a
   [
    {
-    "abspath": "a",
     "lines": [{"line": "a\n", "rev": 0}],
     "path": "a"
    }
@@ -65,8 +64,7 @@
   $ hg annotate -Tjson -cdfnul a
   [
    {
-    "abspath": "a",
-    "lines": [{"date": [1.0, 0], "file": "a", "line": "a\n", "line_number": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "rev": 0, "user": "nobody"}],
+    "lines": [{"date": [1.0, 0], "line": "a\n", "lineno": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "path": "a", "rev": 0, "user": "nobody"}],
     "path": "a"
    }
   ]
@@ -76,12 +74,12 @@
   $ hg annotate -T'{lines % "{rev} {node|shortest}: {line}"}' a
   0 8435: a
 
-'{line_number}' field should be populated as necessary
+'{lineno}' field should be populated as necessary
 
-  $ hg annotate -T'{lines % "{rev}:{line_number}: {line}"}' a
+  $ hg annotate -T'{lines % "{rev}:{lineno}: {line}"}' a
   0:1: a
   $ hg annotate -Ta a \
-  > --config templates.a='"{lines % "{rev}:{line_number}: {line}"}"'
+  > --config templates.a='"{lines % "{rev}:{lineno}: {line}"}"'
   0:1: a
 
   $ cat <<EOF >>a
@@ -127,12 +125,10 @@
   $ hg annotate -Tjson a b
   [
    {
-    "abspath": "a",
     "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}],
     "path": "a"
    },
    {
-    "abspath": "b",
     "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}, {"line": "b4\n", "rev": 3}, {"line": "b5\n", "rev": 3}, {"line": "b6\n", "rev": 3}],
     "path": "b"
    }
@@ -140,7 +136,7 @@
 
 annotate multiple files (template)
 
-  $ hg annotate -T'== {abspath} ==\n{lines % "{rev}: {line}"}' a b
+  $ hg annotate -T'== {path} ==\n{lines % "{rev}: {line}"}' a b
   == a ==
   0: a
   1: a
@@ -568,8 +564,7 @@
   $ hg annotate -ncr "wdir()" -Tjson foo
   [
    {
-    "abspath": "foo",
-    "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": null, "rev": null}],
+    "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": "ffffffffffffffffffffffffffffffffffffffff", "rev": 2147483647}],
     "path": "foo"
    }
   ]
@@ -870,11 +865,9 @@
   $ hg annotate -Tjson binary empty
   [
    {
-    "abspath": "binary",
     "path": "binary"
    },
    {
-    "abspath": "empty",
     "lines": [],
     "path": "empty"
    }
@@ -957,13 +950,13 @@
   ...     f.write(b'0a\r0b\r\n1c\r1d\r\n0e\n1f\n0g') and None
   $ hg ci -m1
 
-  $ hg annotate -r0 a | $PYTHON "$TESTTMP/substcr.py"
+  $ hg annotate -r0 a | "$PYTHON" "$TESTTMP/substcr.py"
   0: 0a[CR]0b[CR]
   0: 0c[CR]0d[CR]
   0: 0e
   0: 0f
   0: 0g
-  $ hg annotate -r1 a | $PYTHON "$TESTTMP/substcr.py"
+  $ hg annotate -r1 a | "$PYTHON" "$TESTTMP/substcr.py"
   0: 0a[CR]0b[CR]
   1: 1c[CR]1d[CR]
   0: 0e
--- a/tests/test-archive.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-archive.t	Mon Oct 22 14:46:06 2018 -0400
@@ -187,7 +187,7 @@
   server: testing stub value
   transfer-encoding: chunked
   
-  body: size=1377, sha1=677b14d3d048778d5eb5552c14a67e6192068650
+  body: size=(1377|1461), sha1=(677b14d3d048778d5eb5552c14a67e6192068650|be6d3983aa13dfe930361b2569291cdedd02b537) (re)
   % tar.gz and tar.bz2 disallowed should both give 403
   403 Archive type not allowed: gz
   content-type: text/html; charset=ascii
@@ -274,7 +274,7 @@
   server: testing stub value
   transfer-encoding: chunked
   
-  body: size=1377, sha1=677b14d3d048778d5eb5552c14a67e6192068650
+  body: size=(1377|1461), sha1=(677b14d3d048778d5eb5552c14a67e6192068650|be6d3983aa13dfe930361b2569291cdedd02b537) (re)
   % tar.gz and tar.bz2 disallowed should both give 403
   403 Archive type not allowed: gz
   content-type: text/html; charset=ascii
@@ -341,7 +341,7 @@
   > except util.urlerr.httperror as e:
   >     sys.stderr.write(str(e) + '\n')
   > EOF
-  $ $PYTHON getarchive.py "$TIP" gz | gunzip | tar tf - 2>/dev/null
+  $ "$PYTHON" getarchive.py "$TIP" gz | gunzip | tar tf - 2>/dev/null
   test-archive-1701ef1f1510/.hg_archival.txt
   test-archive-1701ef1f1510/.hgsub
   test-archive-1701ef1f1510/.hgsubstate
@@ -349,7 +349,7 @@
   test-archive-1701ef1f1510/baz/bletch
   test-archive-1701ef1f1510/foo
   test-archive-1701ef1f1510/subrepo/sub
-  $ $PYTHON getarchive.py "$TIP" bz2 | bunzip2 | tar tf - 2>/dev/null
+  $ "$PYTHON" getarchive.py "$TIP" bz2 | bunzip2 | tar tf - 2>/dev/null
   test-archive-1701ef1f1510/.hg_archival.txt
   test-archive-1701ef1f1510/.hgsub
   test-archive-1701ef1f1510/.hgsubstate
@@ -357,7 +357,7 @@
   test-archive-1701ef1f1510/baz/bletch
   test-archive-1701ef1f1510/foo
   test-archive-1701ef1f1510/subrepo/sub
-  $ $PYTHON getarchive.py "$TIP" zip > archive.zip
+  $ "$PYTHON" getarchive.py "$TIP" zip > archive.zip
   $ unzip -t archive.zip
   Archive:  archive.zip
       testing: test-archive-1701ef1f1510/.hg_archival.txt*OK (glob)
@@ -371,19 +371,19 @@
 
 test that we can download single directories and files
 
-  $ $PYTHON getarchive.py "$TIP" gz baz | gunzip | tar tf - 2>/dev/null
+  $ "$PYTHON" getarchive.py "$TIP" gz baz | gunzip | tar tf - 2>/dev/null
   test-archive-1701ef1f1510/baz/bletch
-  $ $PYTHON getarchive.py "$TIP" gz foo | gunzip | tar tf - 2>/dev/null
+  $ "$PYTHON" getarchive.py "$TIP" gz foo | gunzip | tar tf - 2>/dev/null
   test-archive-1701ef1f1510/foo
 
 test that we detect file patterns that match no files
 
-  $ $PYTHON getarchive.py "$TIP" gz foobar
+  $ "$PYTHON" getarchive.py "$TIP" gz foobar
   HTTP Error 404: file(s) not found: foobar
 
 test that we reject unsafe patterns
 
-  $ $PYTHON getarchive.py "$TIP" gz relre:baz
+  $ "$PYTHON" getarchive.py "$TIP" gz relre:baz
   HTTP Error 404: file(s) not found: relre:baz
 
   $ killdaemons.py
@@ -464,7 +464,7 @@
   $ sleep 1
   $ hg archive -t tgz tip.tar.gz
   $ mv tip.tar.gz tip2.tar.gz
-  $ $PYTHON md5comp.py tip1.tar.gz tip2.tar.gz
+  $ "$PYTHON" md5comp.py tip1.tar.gz tip2.tar.gz
   True
 
   $ hg archive -t zip -p /illegal test.zip
@@ -598,12 +598,12 @@
 
   $ hg -R repo archive --prefix tar-extracted archive.tar
   $ (TZ=UTC-3; export TZ; tar xf archive.tar)
-  $ $PYTHON show_mtime.py tar-extracted/a
+  $ "$PYTHON" show_mtime.py tar-extracted/a
   456789012
 
   $ hg -R repo archive --prefix zip-extracted archive.zip
   $ (TZ=UTC-3; export TZ; unzip -q archive.zip)
-  $ $PYTHON show_mtime.py zip-extracted/a
+  $ "$PYTHON" show_mtime.py zip-extracted/a
   456789012
 
   $ cd ..
--- a/tests/test-audit-path.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-audit-path.t	Mon Oct 22 14:46:06 2018 -0400
@@ -82,7 +82,7 @@
   adding manifests
   adding file changes
   added 5 changesets with 6 changes to 6 files (+4 heads)
-  new changesets b7da9bf6b037:fc1393d727bc
+  new changesets b7da9bf6b037:fc1393d727bc (5 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
 attack .hg/test
--- a/tests/test-backout.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-backout.t	Mon Oct 22 14:46:06 2018 -0400
@@ -144,17 +144,17 @@
   $ touch -t 200001010000 c
   $ hg status -A
   C c
-  $ hg debugstate --nodates
+  $ hg debugstate --no-dates
   n 644         12 set                 c
   $ hg backout -d '6 0' -m 'to be rollback-ed soon' -r .
+  removing c
   adding b
-  removing c
   changeset 6:4bfec048029d backs out changeset 5:fac0b729a654
   $ hg rollback -q
   $ hg status -A
   A b
   R c
-  $ hg debugstate --nodates
+  $ hg debugstate --no-dates
   a   0         -1 unset               b
   r   0          0 set                 c
 
--- a/tests/test-bad-extension.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bad-extension.t	Mon Oct 22 14:46:06 2018 -0400
@@ -10,7 +10,7 @@
   > EOF
   $ hg -q --config extensions.bailatexit=$TESTTMP/bailatexit.py \
   >  help help
-  hg help [-ecks] [TOPIC]
+  hg help [-eck] [-s PLATFORM] [TOPIC]
   
   show help for a given topic or a help overview
   error in exit handlers:
@@ -47,17 +47,22 @@
 
   $ hg -q help help 2>&1 |grep extension
   *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
-  *** failed to import extension badext2: No module named badext2
+  *** failed to import extension badext2: No module named *badext2* (glob)
 
 show traceback
 
-  $ hg -q help help --traceback 2>&1 | egrep ' extension|^Exception|Traceback|ImportError'
+  $ hg -q help help --traceback 2>&1 | egrep ' extension|^Exception|Traceback|ImportError|ModuleNotFound'
   *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
   Traceback (most recent call last):
   Exception: bit bucket overflow
-  *** failed to import extension badext2: No module named badext2
+  *** failed to import extension badext2: No module named *badext2* (glob)
   Traceback (most recent call last):
-  ImportError: No module named badext2
+  ImportError: No module named badext2 (no-py3 !)
+  ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
+  Traceback (most recent call last): (py3 !)
+  ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
+  Traceback (most recent call last): (py3 !)
+  ModuleNotFoundError: No module named 'badext2' (py3 !)
 
 names of extensions failed to load can be accessed via extensions.notloaded()
 
@@ -67,34 +72,80 @@
   > command = registrar.command(cmdtable)
   > @command(b'showbadexts', norepo=True)
   > def showbadexts(ui, *pats, **opts):
-  >     ui.write('BADEXTS: %s\n' % ' '.join(sorted(extensions.notloaded())))
+  >     ui.write(b'BADEXTS: %s\n' % b' '.join(sorted(extensions.notloaded())))
   > EOF
   $ hg --config extensions.badexts=showbadexts.py showbadexts 2>&1 | grep '^BADEXTS'
   BADEXTS: badext badext2
 
+#if no-extraextensions
 show traceback for ImportError of hgext.name if devel.debug.extensions is set
 
   $ (hg help help --traceback --debug --config devel.debug.extensions=yes 2>&1) \
   > | grep -v '^ ' \
-  > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|not import'
+  > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|not import|ModuleNotFound'
+  debug.extensions: loading extensions
+  debug.extensions: - processing 5 entries
+  debug.extensions:   - loading extension: 'gpg'
+  debug.extensions:   > 'gpg' extension loaded in * (glob)
+  debug.extensions:     - validating extension tables: 'gpg'
+  debug.extensions:     - invoking registered callbacks: 'gpg'
+  debug.extensions:     > callbacks completed in * (glob)
+  debug.extensions:   - loading extension: 'badext'
   *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
   Traceback (most recent call last):
   Exception: bit bucket overflow
-  could not import hgext.badext2 (No module named *badext2): trying hgext3rd.badext2 (glob)
+  debug.extensions:   - loading extension: 'baddocext'
+  debug.extensions:   > 'baddocext' extension loaded in * (glob)
+  debug.extensions:     - validating extension tables: 'baddocext'
+  debug.extensions:     - invoking registered callbacks: 'baddocext'
+  debug.extensions:     > callbacks completed in * (glob)
+  debug.extensions:   - loading extension: 'badext2'
+  debug.extensions:     - could not import hgext.badext2 (No module named *badext2*): trying hgext3rd.badext2 (glob)
   Traceback (most recent call last):
-  ImportError: No module named *badext2 (glob)
-  could not import hgext3rd.badext2 (No module named *badext2): trying badext2 (glob)
+  ImportError: No module named badext2 (no-py3 !)
+  ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
+  debug.extensions:     - could not import hgext3rd.badext2 (No module named *badext2*): trying badext2 (glob)
+  Traceback (most recent call last):
+  ImportError: No module named badext2 (no-py3 !)
+  ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
+  Traceback (most recent call last): (py3 !)
+  ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
+  *** failed to import extension badext2: No module named *badext2* (glob)
   Traceback (most recent call last):
-  ImportError: No module named *badext2 (glob)
-  *** failed to import extension badext2: No module named badext2
-  Traceback (most recent call last):
-  ImportError: No module named badext2
+  ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
+  Traceback (most recent call last): (py3 !)
+  ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
+  Traceback (most recent call last): (py3 !)
+  ModuleNotFoundError: No module named 'badext2' (py3 !)
+  ImportError: No module named badext2 (no-py3 !)
+  debug.extensions: > loaded 2 extensions, total time * (glob)
+  debug.extensions: - loading configtable attributes
+  debug.extensions: - executing uisetup hooks
+  debug.extensions:   - running uisetup for 'gpg'
+  debug.extensions:   > uisetup for 'gpg' took * (glob)
+  debug.extensions:   - running uisetup for 'baddocext'
+  debug.extensions:   > uisetup for 'baddocext' took * (glob)
+  debug.extensions: > all uisetup took * (glob)
+  debug.extensions: - executing extsetup hooks
+  debug.extensions:   - running extsetup for 'gpg'
+  debug.extensions:   > extsetup for 'gpg' took * (glob)
+  debug.extensions:   - running extsetup for 'baddocext'
+  debug.extensions:   > extsetup for 'baddocext' took * (glob)
+  debug.extensions: > all extsetup took * (glob)
+  debug.extensions: - executing remaining aftercallbacks
+  debug.extensions: > remaining aftercallbacks completed in * (glob)
+  debug.extensions: - loading extension registration objects
+  debug.extensions: > extension registration object loading took * (glob)
+  debug.extensions: > extension baddocext take a total of * to load (glob)
+  debug.extensions: > extension gpg take a total of * to load (glob)
+  debug.extensions: extension loading complete
+#endif
 
 confirm that there's no crash when an extension's documentation is bad
 
   $ hg help --keyword baddocext
   *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
-  *** failed to import extension badext2: No module named badext2
+  *** failed to import extension badext2: No module named *badext2* (glob)
   Topics:
   
    extensions Using Additional Features
--- a/tests/test-bad-pull.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bad-pull.t	Mon Oct 22 14:46:06 2018 -0400
@@ -7,7 +7,7 @@
   $ test -d copy
   [1]
 
-  $ $PYTHON "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid
+  $ "$PYTHON" "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid
   $ cat dumb.pid >> $DAEMON_PIDS
   $ hg clone http://localhost:$HGPORT/foo copy2
   abort: HTTP Error 404: * (glob)
--- a/tests/test-basic.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-basic.t	Mon Oct 22 14:46:06 2018 -0400
@@ -61,14 +61,14 @@
 Verify that updating to revision 0 via commands.update() works properly
 
   $ cat <<EOF > update_to_rev0.py
-  > from mercurial import ui, hg, commands
-  > myui = ui.ui.load()
+  > from mercurial import commands, hg, ui as uimod
+  > myui = uimod.ui.load()
   > repo = hg.repository(myui, path=b'.')
   > commands.update(myui, repo, rev=b"0")
   > EOF
   $ hg up null
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ $PYTHON ./update_to_rev0.py
+  $ "$PYTHON" ./update_to_rev0.py
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg identify -n
   0
@@ -89,7 +89,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
 
 Repository root:
 
--- a/tests/test-bdiff.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bdiff.py	Mon Oct 22 14:46:06 2018 -0400
@@ -29,25 +29,25 @@
 
     def test_bdiff_basic(self):
         cases = [
-            ("a\nc\n\n\n\n", "a\nb\n\n\n"),
-            ("a\nb\nc\n", "a\nc\n"),
-            ("", ""),
-            ("a\nb\nc", "a\nb\nc"),
-            ("a\nb\nc\nd\n", "a\nd\n"),
-            ("a\nb\nc\nd\n", "a\nc\ne\n"),
-            ("a\nb\nc\n", "a\nc\n"),
-            ("a\n", "c\na\nb\n"),
-            ("a\n", ""),
-            ("a\n", "b\nc\n"),
-            ("a\n", "c\na\n"),
-            ("", "adjfkjdjksdhfksj"),
-            ("", "ab"),
-            ("", "abc"),
-            ("a", "a"),
-            ("ab", "ab"),
-            ("abc", "abc"),
-            ("a\n", "a\n"),
-            ("a\nb", "a\nb"),
+            (b"a\nc\n\n\n\n", b"a\nb\n\n\n"),
+            (b"a\nb\nc\n", b"a\nc\n"),
+            (b"", b""),
+            (b"a\nb\nc", b"a\nb\nc"),
+            (b"a\nb\nc\nd\n", b"a\nd\n"),
+            (b"a\nb\nc\nd\n", b"a\nc\ne\n"),
+            (b"a\nb\nc\n", b"a\nc\n"),
+            (b"a\n", b"c\na\nb\n"),
+            (b"a\n", b""),
+            (b"a\n", b"b\nc\n"),
+            (b"a\n", b"c\na\n"),
+            (b"", b"adjfkjdjksdhfksj"),
+            (b"", b"ab"),
+            (b"", b"abc"),
+            (b"a", b"a"),
+            (b"ab", b"ab"),
+            (b"abc", b"abc"),
+            (b"a\n", b"a\n"),
+            (b"a\nb", b"a\nb"),
         ]
         for a, b in cases:
             self.assert_bdiff(a, b)
@@ -71,42 +71,44 @@
 
     def test_issue1295(self):
         cases = [
-            ("x\n\nx\n\nx\n\nx\n\nz\n", "x\n\nx\n\ny\n\nx\n\nx\n\nz\n",
-             ['x\n\nx\n\n', diffreplace(6, 6, '', 'y\n\n'), 'x\n\nx\n\nz\n']),
-            ("x\n\nx\n\nx\n\nx\n\nz\n", "x\n\nx\n\ny\n\nx\n\ny\n\nx\n\nz\n",
-             ['x\n\nx\n\n',
-              diffreplace(6, 6, '', 'y\n\n'),
-              'x\n\n',
-              diffreplace(9, 9, '', 'y\n\n'),
-              'x\n\nz\n']),
+            (b"x\n\nx\n\nx\n\nx\n\nz\n", b"x\n\nx\n\ny\n\nx\n\nx\n\nz\n",
+             [b'x\n\nx\n\n',
+              diffreplace(6, 6, b'', b'y\n\n'),
+              b'x\n\nx\n\nz\n']),
+            (b"x\n\nx\n\nx\n\nx\n\nz\n", b"x\n\nx\n\ny\n\nx\n\ny\n\nx\n\nz\n",
+             [b'x\n\nx\n\n',
+              diffreplace(6, 6, b'', b'y\n\n'),
+              b'x\n\n',
+              diffreplace(9, 9, b'', b'y\n\n'),
+              b'x\n\nz\n']),
         ]
         for old, new, want in cases:
             self.assertEqual(self.showdiff(old, new), want)
 
     def test_issue1295_varies_on_pure(self):
             # we should pick up abbbc. rather than bc.de as the longest match
-        got = self.showdiff("a\nb\nb\nb\nc\n.\nd\ne\n.\nf\n",
-                            "a\nb\nb\na\nb\nb\nb\nc\n.\nb\nc\n.\nd\ne\nf\n")
-        want_c = ['a\nb\nb\n',
-                  diffreplace(6, 6, '', 'a\nb\nb\nb\nc\n.\n'),
-                  'b\nc\n.\nd\ne\n',
-                  diffreplace(16, 18, '.\n', ''),
-                  'f\n']
-        want_pure = [diffreplace(0, 0, '', 'a\nb\nb\n'),
-                     'a\nb\nb\nb\nc\n.\n',
-                     diffreplace(12, 12, '', 'b\nc\n.\n'),
-                     'd\ne\n',
-                     diffreplace(16, 18, '.\n', ''), 'f\n']
-        self.assert_(got in (want_c, want_pure),
-                     'got: %r, wanted either %r or %r' % (
-                         got, want_c, want_pure))
+        got = self.showdiff(b"a\nb\nb\nb\nc\n.\nd\ne\n.\nf\n",
+                            b"a\nb\nb\na\nb\nb\nb\nc\n.\nb\nc\n.\nd\ne\nf\n")
+        want_c = [b'a\nb\nb\n',
+                  diffreplace(6, 6, b'', b'a\nb\nb\nb\nc\n.\n'),
+                  b'b\nc\n.\nd\ne\n',
+                  diffreplace(16, 18, b'.\n', b''),
+                  b'f\n']
+        want_pure = [diffreplace(0, 0, b'', b'a\nb\nb\n'),
+                     b'a\nb\nb\nb\nc\n.\n',
+                     diffreplace(12, 12, b'', b'b\nc\n.\n'),
+                     b'd\ne\n',
+                     diffreplace(16, 18, b'.\n', b''), b'f\n']
+        self.assertTrue(got in (want_c, want_pure),
+                        'got: %r, wanted either %r or %r' % (
+                            got, want_c, want_pure))
 
     def test_fixws(self):
         cases = [
-            (" \ta\r b\t\n", "ab\n", 1),
-            (" \ta\r b\t\n", " a b\n", 0),
-            ("", "", 1),
-            ("", "", 0),
+            (b" \ta\r b\t\n", b"ab\n", 1),
+            (b" \ta\r b\t\n", b" a b\n", 0),
+            (b"", b"", 1),
+            (b"", b"", 0),
         ]
         for a, b, allws in cases:
             c = mdiff.fixws(a, allws)
@@ -115,34 +117,34 @@
 
     def test_nice_diff_for_trivial_change(self):
         self.assertEqual(self.showdiff(
-            ''.join('<%s\n-\n' % i for i in range(5)),
-            ''.join('>%s\n-\n' % i for i in range(5))),
-                         [diffreplace(0, 3, '<0\n', '>0\n'),
-                          '-\n',
-                          diffreplace(5, 8, '<1\n', '>1\n'),
-                          '-\n',
-                          diffreplace(10, 13, '<2\n', '>2\n'),
-                          '-\n',
-                          diffreplace(15, 18, '<3\n', '>3\n'),
-                          '-\n',
-                          diffreplace(20, 23, '<4\n', '>4\n'),
-                          '-\n'])
+            b''.join(b'<%d\n-\n' % i for i in range(5)),
+            b''.join(b'>%d\n-\n' % i for i in range(5))),
+                         [diffreplace(0, 3, b'<0\n', b'>0\n'),
+                          b'-\n',
+                          diffreplace(5, 8, b'<1\n', b'>1\n'),
+                          b'-\n',
+                          diffreplace(10, 13, b'<2\n', b'>2\n'),
+                          b'-\n',
+                          diffreplace(15, 18, b'<3\n', b'>3\n'),
+                          b'-\n',
+                          diffreplace(20, 23, b'<4\n', b'>4\n'),
+                          b'-\n'])
 
     def test_prefer_appending(self):
         # 1 line to 3 lines
-        self.assertEqual(self.showdiff('a\n', 'a\n' * 3),
-                         ['a\n', diffreplace(2, 2, '', 'a\na\n')])
+        self.assertEqual(self.showdiff(b'a\n', b'a\n' * 3),
+                         [b'a\n', diffreplace(2, 2, b'', b'a\na\n')])
         # 1 line to 5 lines
-        self.assertEqual(self.showdiff('a\n', 'a\n' * 5),
-                         ['a\n', diffreplace(2, 2, '', 'a\na\na\na\n')])
+        self.assertEqual(self.showdiff(b'a\n', b'a\n' * 5),
+                         [b'a\n', diffreplace(2, 2, b'', b'a\na\na\na\n')])
 
     def test_prefer_removing_trailing(self):
         # 3 lines to 1 line
-        self.assertEqual(self.showdiff('a\n' * 3, 'a\n'),
-                         ['a\n', diffreplace(2, 6, 'a\na\n', '')])
+        self.assertEqual(self.showdiff(b'a\n' * 3, b'a\n'),
+                         [b'a\n', diffreplace(2, 6, b'a\na\n', b'')])
         # 5 lines to 1 line
-        self.assertEqual(self.showdiff('a\n' * 5, 'a\n'),
-                         ['a\n', diffreplace(2, 10, 'a\na\na\na\n', '')])
+        self.assertEqual(self.showdiff(b'a\n' * 5, b'a\n'),
+                         [b'a\n', diffreplace(2, 10, b'a\na\na\na\n', b'')])
 
 if __name__ == '__main__':
     import silenttestrunner
--- a/tests/test-blackbox.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-blackbox.t	Mon Oct 22 14:46:06 2018 -0400
@@ -233,7 +233,7 @@
   $ sed -e 's/\(.*test1.*\)/#\1/; s#\(.*commit2.*\)#os.rmdir(".hg/blackbox.log")\
   > os.rename(".hg/blackbox.log-", ".hg/blackbox.log")\
   > \1#' $TESTDIR/test-dispatch.py > ../test-dispatch.py
-  $ $PYTHON $TESTDIR/blackbox-readonly-dispatch.py
+  $ "$PYTHON" $TESTDIR/blackbox-readonly-dispatch.py
   running: --debug add foo
   warning: cannot write to blackbox.log: Is a directory (no-windows !)
   warning: cannot write to blackbox.log: $TESTTMP/blackboxtest3/.hg/blackbox.log: Access is denied (windows !)
@@ -343,7 +343,7 @@
   > noop=$TESTTMP/noop.py
   > EOF
 
-  $ $PYTHON -c 'print("a" * 400)' > .hg/blackbox.log
+  $ "$PYTHON" -c 'print("a" * 400)' > .hg/blackbox.log
   $ chg noop
   $ chg noop
   $ chg noop
@@ -351,7 +351,8 @@
   $ chg noop
 
   $ cat > showsize.py << 'EOF'
-  > import os, sys
+  > import os
+  > import sys
   > limit = 500
   > for p in sys.argv[1:]:
   >     size = os.stat(p).st_size
@@ -362,7 +363,7 @@
   >     print('%s: %s %d' % (p, desc, limit))
   > EOF
 
-  $ $PYTHON showsize.py .hg/blackbox*
+  $ "$PYTHON" showsize.py .hg/blackbox*
   .hg/blackbox.log: < 500
   .hg/blackbox.log.1: >= 500
   .hg/blackbox.log.2: >= 500
--- a/tests/test-bookmarks-current.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bookmarks-current.t	Mon Oct 22 14:46:06 2018 -0400
@@ -222,3 +222,27 @@
      Z                         0:719295282060
   $ hg parents -q
   4:8fa964221e8e
+
+Checks command to retrieve active bookmark
+------------------------------------------
+
+display how "{activebookmark}" template is unsuitable for the task
+
+  $ hg book -T '- {activebookmark}\n'
+  - 
+  - Y
+  - 
+
+  $ hg book -r . W
+  $ hg book -T '- {activebookmark}\n'
+  - Y
+  - 
+  - Y
+  - 
+
+  $ hg bookmarks -ql .
+  Y
+  $ hg bookmarks --inactive
+  $ hg bookmarks -ql .
+  abort: no active bookmark!
+  [255]
--- a/tests/test-bookmarks-pushpull.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bookmarks-pushpull.t	Mon Oct 22 14:46:06 2018 -0400
@@ -55,7 +55,7 @@
   adding remote bookmark X
   updating bookmark Y
   adding remote bookmark Z
-  new changesets 4e3505fd9583
+  new changesets 4e3505fd9583 (1 drafts)
   test-hook-bookmark: X:   -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
   test-hook-bookmark: Y:  0000000000000000000000000000000000000000 -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
   test-hook-bookmark: Z:   -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
@@ -345,7 +345,7 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (leaving bookmark V)
   $ hg push -B . ../a
-  abort: no active bookmark
+  abort: no active bookmark!
   [255]
   $ hg update -r V
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
@@ -418,7 +418,7 @@
   divergent bookmark @ stored as @foo
   divergent bookmark X stored as X@foo
   updating bookmark Z
-  new changesets 0d2164f0ce0d
+  new changesets 0d2164f0ce0d (1 drafts)
   test-hook-bookmark: @foo:   -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
   test-hook-bookmark: X@foo:   -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
   test-hook-bookmark: Z:  4e3505fd95835d721066b76e75dbb8cc554d7f77 -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
@@ -435,7 +435,7 @@
 
 (test that too many divergence of bookmark)
 
-  $ $PYTHON $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -r 000000000000 "X@${i}"; done
+  $ "$PYTHON" $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -r 000000000000 "X@${i}"; done
   $ hg pull ../a
   pulling from ../a
   searching for changes
@@ -463,7 +463,7 @@
      @1                        2:0d2164f0ce0d
      @foo                      2:0d2164f0ce0d
 
-  $ $PYTHON $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -d "X@${i}"; done
+  $ "$PYTHON" $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -d "X@${i}"; done
   $ hg bookmarks -d "@1"
 
   $ hg push -f ../a
@@ -582,7 +582,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   updating bookmark Y
-  new changesets b0a5eff05604
+  new changesets b0a5eff05604 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg book
    * @                         1:0d2164f0ce0d
@@ -632,7 +632,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   updating bookmark Y
-  new changesets 35d1ef0a8d1b
+  new changesets 35d1ef0a8d1b (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg book
      @                         1:0d2164f0ce0d
@@ -796,7 +796,7 @@
   adding file changes
   added 5 changesets with 5 changes to 3 files (+2 heads)
   2 new obsolescence markers
-  new changesets 4e3505fd9583:c922c0139ca0
+  new changesets 4e3505fd9583:c922c0139ca0 (5 drafts)
   updating to bookmark @
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg -R cloned-bookmarks bookmarks
@@ -933,7 +933,7 @@
   adding file changes
   added 5 changesets with 5 changes to 3 files (+2 heads)
   2 new obsolescence markers
-  new changesets 4e3505fd9583:c922c0139ca0
+  new changesets 4e3505fd9583:c922c0139ca0 (5 drafts)
   updating to bookmark @
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd addmarks
@@ -1082,7 +1082,7 @@
   > ssh=ssh://user@dummy/issue4455-dest
   > http=http://localhost:$HGPORT/
   > [ui]
-  > ssh=$PYTHON "$TESTDIR/dummyssh"
+  > ssh="$PYTHON" "$TESTDIR/dummyssh"
   > EOF
   $ cat >> ../issue4455-dest/.hg/hgrc << EOF
   > [hooks]
@@ -1225,19 +1225,19 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 79513d0d7716
+  new changesets 79513d0d7716 (1 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
 Forbid bookmark move on the server
 
-  $ cat << EOF >> $TESTDIR/no-bm-move.sh
+  $ cat << EOF >> $TESTTMP/no-bm-move.sh
   > #!/bin/sh
   > echo \$HG_NAMESPACE | grep -v bookmarks
   > EOF
   $ cat << EOF >> server/.hg/hgrc
   > [hooks]
-  > prepushkey.no-bm-move= sh $TESTDIR/no-bm-move.sh
+  > prepushkey.no-bm-move= sh $TESTTMP/no-bm-move.sh
   > EOF
 
 pushing changeset is okay
--- a/tests/test-bookmarks.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bookmarks.t	Mon Oct 22 14:46:06 2018 -0400
@@ -68,6 +68,25 @@
      X                         0:f7b1eb17ad24
    * X2                        0:f7b1eb17ad24
      Y                         -1:000000000000
+  $ hg bookmarks -l
+     X                         0:f7b1eb17ad24
+   * X2                        0:f7b1eb17ad24
+     Y                         -1:000000000000
+  $ hg bookmarks -l X Y
+     X                         0:f7b1eb17ad24
+     Y                         -1:000000000000
+  $ hg bookmarks -l .
+   * X2                        0:f7b1eb17ad24
+  $ hg bookmarks -l X A Y
+  abort: bookmark 'A' does not exist
+  [255]
+  $ hg bookmarks -l -r0
+  abort: --rev is incompatible with --list
+  [255]
+  $ hg bookmarks -l --inactive
+  abort: --inactive is incompatible with --list
+  [255]
+
   $ hg log -T '{bookmarks % "{rev} {bookmark}\n"}'
   0 X
   0 X2
@@ -151,6 +170,31 @@
   summary:     0
   
 
+"." is expanded to the active bookmark:
+
+  $ hg log -r 'bookmark(.)'
+  changeset:   1:925d80f479bb
+  bookmark:    X2
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     1
+  
+
+but "literal:." is not since "." seems not a literal bookmark:
+
+  $ hg log -r 'bookmark("literal:.")'
+  abort: bookmark '.' does not exist!
+  [255]
+
+"." should fail if there's no active bookmark:
+
+  $ hg bookmark --inactive
+  $ hg log -r 'bookmark(.)'
+  abort: no active bookmark!
+  [255]
+  $ hg log -r 'present(bookmark(.))'
+
   $ hg log -r 'bookmark(unknown)'
   abort: bookmark 'unknown' does not exist!
   [255]
@@ -166,6 +210,12 @@
   $ hg help revsets | grep 'bookmark('
       "bookmark([name])"
 
+reactivate "X2"
+
+  $ hg update X2
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (activating bookmark X2)
+
 bookmarks X and X2 moved to rev 1, Y at rev -1
 
   $ hg bookmarks
@@ -229,7 +279,7 @@
   $ hg book rename-me
   $ hg book -i rename-me
   $ hg book -m . renamed
-  abort: no active bookmark
+  abort: no active bookmark!
   [255]
   $ hg up -q Y
   $ hg book -d rename-me
@@ -249,7 +299,7 @@
   $ hg book delete-me
   $ hg book -i delete-me
   $ hg book -d .
-  abort: no active bookmark
+  abort: no active bookmark!
   [255]
   $ hg up -q Y
   $ hg book -d delete-me
@@ -296,6 +346,12 @@
   abort: bookmark 'A' does not exist
   [255]
 
+delete with --inactive
+
+  $ hg bookmark -d --inactive Y
+  abort: --inactive is incompatible with --delete
+  [255]
+
 bookmark name with spaces should be stripped
 
   $ hg bookmark ' x  y '
@@ -663,7 +719,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files (+1 heads)
-  new changesets 125c9a1d6df6:9ba5f110a0b3
+  new changesets 125c9a1d6df6:9ba5f110a0b3 (2 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
 update to active bookmark if it's not the parent
--- a/tests/test-branches.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-branches.t	Mon Oct 22 14:46:06 2018 -0400
@@ -91,7 +91,7 @@
   adding manifests
   adding file changes
   added 3 changesets with 3 changes to 2 files
-  new changesets f0e4c7f04036:33c2ceb9310b
+  new changesets f0e4c7f04036:33c2ceb9310b (3 drafts)
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
   $ hg update '"colon:test"'
--- a/tests/test-bundle-r.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bundle-r.t	Mon Oct 22 14:46:06 2018 -0400
@@ -5,7 +5,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  new changesets bfaf4b5cbf01:916f1afdef90
+  new changesets bfaf4b5cbf01:916f1afdef90 (9 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up tip
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -27,13 +27,13 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets bfaf4b5cbf01
+  new changesets bfaf4b5cbf01 (1 drafts)
   (run 'hg update' to get a working copy)
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   0:bfaf4b5cbf01
   searching for changes
   2 changesets found
@@ -41,13 +41,13 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 1 files
-  new changesets bfaf4b5cbf01:21f32785131f
+  new changesets bfaf4b5cbf01:21f32785131f (2 drafts)
   (run 'hg update' to get a working copy)
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
   1:21f32785131f
   searching for changes
   3 changesets found
@@ -55,13 +55,13 @@
   adding manifests
   adding file changes
   added 3 changesets with 3 changes to 1 files
-  new changesets bfaf4b5cbf01:4ce51a113780
+  new changesets bfaf4b5cbf01:4ce51a113780 (3 drafts)
   (run 'hg update' to get a working copy)
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
   2:4ce51a113780
   searching for changes
   4 changesets found
@@ -69,13 +69,13 @@
   adding manifests
   adding file changes
   added 4 changesets with 4 changes to 1 files
-  new changesets bfaf4b5cbf01:93ee6ab32777
+  new changesets bfaf4b5cbf01:93ee6ab32777 (4 drafts)
   (run 'hg update' to get a working copy)
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 4 changesets, 4 total revisions
+  checked 4 changesets with 4 changes to 1 files
   3:93ee6ab32777
   searching for changes
   2 changesets found
@@ -83,13 +83,13 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 1 files
-  new changesets bfaf4b5cbf01:c70afb1ee985
+  new changesets bfaf4b5cbf01:c70afb1ee985 (2 drafts)
   (run 'hg update' to get a working copy)
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
   1:c70afb1ee985
   searching for changes
   3 changesets found
@@ -97,13 +97,13 @@
   adding manifests
   adding file changes
   added 3 changesets with 3 changes to 1 files
-  new changesets bfaf4b5cbf01:f03ae5a9b979
+  new changesets bfaf4b5cbf01:f03ae5a9b979 (3 drafts)
   (run 'hg update' to get a working copy)
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
   2:f03ae5a9b979
   searching for changes
   4 changesets found
@@ -111,13 +111,13 @@
   adding manifests
   adding file changes
   added 4 changesets with 5 changes to 2 files
-  new changesets bfaf4b5cbf01:095cb14b1b4d
+  new changesets bfaf4b5cbf01:095cb14b1b4d (4 drafts)
   (run 'hg update' to get a working copy)
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 4 changesets, 5 total revisions
+  checked 4 changesets with 5 changes to 2 files
   3:095cb14b1b4d
   searching for changes
   5 changesets found
@@ -125,13 +125,13 @@
   adding manifests
   adding file changes
   added 5 changesets with 6 changes to 3 files
-  new changesets bfaf4b5cbf01:faa2e4234c7a
+  new changesets bfaf4b5cbf01:faa2e4234c7a (5 drafts)
   (run 'hg update' to get a working copy)
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 5 changesets, 6 total revisions
+  checked 5 changesets with 6 changes to 3 files
   4:faa2e4234c7a
   searching for changes
   5 changesets found
@@ -139,13 +139,13 @@
   adding manifests
   adding file changes
   added 5 changesets with 5 changes to 2 files
-  new changesets bfaf4b5cbf01:916f1afdef90
+  new changesets bfaf4b5cbf01:916f1afdef90 (5 drafts)
   (run 'hg update' to get a working copy)
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 5 changesets, 5 total revisions
+  checked 5 changesets with 5 changes to 2 files
   4:916f1afdef90
   $ cd test-8
   $ hg pull ../test-7
@@ -163,7 +163,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 9 changesets, 7 total revisions
+  checked 9 changesets with 7 changes to 4 files
   $ hg rollback
   repository tip rolled back to revision 4 (undo pull)
   $ cd ..
@@ -235,7 +235,7 @@
   adding manifests
   adding file changes
   added 6 changesets with 4 changes to 4 files (+1 heads)
-  new changesets 93ee6ab32777:916f1afdef90
+  new changesets 93ee6ab32777:916f1afdef90 (6 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
 revision 8
@@ -247,7 +247,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 9 changesets, 7 total revisions
+  checked 9 changesets with 7 changes to 4 files
   $ hg rollback
   repository tip rolled back to revision 2 (undo unbundle)
 
@@ -260,7 +260,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files
-  new changesets 93ee6ab32777:916f1afdef90
+  new changesets 93ee6ab32777:916f1afdef90 (2 drafts)
   (run 'hg update' to get a working copy)
 
 revision 4
@@ -272,7 +272,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 5 changesets, 5 total revisions
+  checked 5 changesets with 5 changes to 2 files
   $ hg rollback
   repository tip rolled back to revision 2 (undo unbundle)
   $ hg unbundle ../test-bundle-branch2.hg
@@ -280,7 +280,7 @@
   adding manifests
   adding file changes
   added 4 changesets with 3 changes to 3 files (+1 heads)
-  new changesets c70afb1ee985:faa2e4234c7a
+  new changesets c70afb1ee985:faa2e4234c7a (4 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
 revision 6
@@ -292,7 +292,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 7 changesets, 6 total revisions
+  checked 7 changesets with 6 changes to 3 files
   $ hg rollback
   repository tip rolled back to revision 2 (undo unbundle)
   $ hg unbundle ../test-bundle-cset-7.hg
@@ -300,7 +300,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files
-  new changesets 93ee6ab32777:916f1afdef90
+  new changesets 93ee6ab32777:916f1afdef90 (2 drafts)
   (run 'hg update' to get a working copy)
 
 revision 4
@@ -312,7 +312,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 5 changesets, 5 total revisions
+  checked 5 changesets with 5 changes to 2 files
 
   $ cd ../test
   $ hg merge 7
@@ -334,7 +334,7 @@
   adding manifests
   adding file changes
   added 7 changesets with 4 changes to 4 files
-  new changesets 93ee6ab32777:03fc0b0e347c
+  new changesets 93ee6ab32777:03fc0b0e347c (7 drafts)
   (run 'hg update' to get a working copy)
 
 revision 9
@@ -346,6 +346,6 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 10 changesets, 7 total revisions
+  checked 10 changesets with 7 changes to 4 files
 
   $ cd ..
--- a/tests/test-bundle-type.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bundle-type.t	Mon Oct 22 14:46:06 2018 -0400
@@ -18,7 +18,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets c35a0f9217e6
+  new changesets c35a0f9217e6 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-bundle.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bundle.t	Mon Oct 22 14:46:06 2018 -0400
@@ -33,7 +33,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 9 changesets, 7 total revisions
+  checked 9 changesets with 7 changes to 4 files
   $ cd ..
   $ hg init empty
 
@@ -75,7 +75,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  0 files, 0 changesets, 0 total revisions
+  checked 0 changesets with 0 changes to 0 files
 
 #if repobundlerepo
 
@@ -100,7 +100,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  new changesets f9ee2f85a263:aa35859c02ea
+  new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
 Rollback empty
@@ -117,7 +117,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  new changesets f9ee2f85a263:aa35859c02ea
+  new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
 Pull full.hg into test (using -R)
@@ -148,7 +148,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  new changesets f9ee2f85a263:aa35859c02ea
+  new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
 Log -R full.hg in fresh empty
@@ -231,7 +231,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  new changesets f9ee2f85a263:aa35859c02ea
+  new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
   changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=bundle*../full.hg (glob)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
@@ -255,7 +255,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  new changesets f9ee2f85a263:aa35859c02ea
+  new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
   changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=bundle:empty+full.hg
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
@@ -556,7 +556,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  new changesets f9ee2f85a263:aa35859c02ea
+  new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg -R full-clone heads
@@ -596,12 +596,12 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets f9ee2f85a263
+  new changesets f9ee2f85a263 (1 drafts)
   adding changesets
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 34c2bf6b0626
+  new changesets 34c2bf6b0626 (1 drafts)
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
 View full contents of the bundle
@@ -729,7 +729,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 2 files
 
 #if repobundlerepo
 diff against bundle
@@ -796,13 +796,13 @@
   057f4db07f61970e1c11e83be79e9d08adc4dc31
   bundle2-output-bundle: "HG20", (1 params) 2 parts total
   bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
-  bundling: 1/2 changesets (50.00%)
-  bundling: 2/2 changesets (100.00%)
-  bundling: 1/2 manifests (50.00%)
-  bundling: 2/2 manifests (100.00%)
-  bundling: b 1/3 files (33.33%)
-  bundling: b1 2/3 files (66.67%)
-  bundling: x 3/3 files (100.00%)
+  changesets: 1/2 chunks (50.00%)
+  changesets: 2/2 chunks (100.00%)
+  manifests: 1/2 chunks (50.00%)
+  manifests: 2/2 chunks (100.00%)
+  files: b 1/3 files (33.33%)
+  files: b1 2/3 files (66.67%)
+  files: x 3/3 files (100.00%)
   bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
 
 #if repobundlerepo
@@ -815,7 +815,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 3 changesets, 5 total revisions
+  checked 3 changesets with 5 changes to 4 files
 #endif
 
 == Test bundling no commits
--- a/tests/test-bundle2-exchange.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bundle2-exchange.t	Mon Oct 22 14:46:06 2018 -0400
@@ -60,7 +60,7 @@
   adding file changes
   added 8 changesets with 7 changes to 7 files (+3 heads)
   pre-close-tip:02de42196ebe draft 
-  new changesets cd010b8cd998:02de42196ebe
+  new changesets cd010b8cd998:02de42196ebe (8 drafts)
   postclose-tip:02de42196ebe draft 
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:$ID$ HG_TXNNAME=unbundle
   bundle:*/tests/bundles/rebase.hg HG_URL=bundle:*/tests/bundles/rebase.hg (glob)
@@ -95,7 +95,7 @@
   added 2 changesets with 2 changes to 2 files
   1 new obsolescence markers
   pre-close-tip:9520eea781bc draft 
-  new changesets cd010b8cd998:9520eea781bc
+  new changesets cd010b8cd998:9520eea781bc (1 drafts)
   postclose-tip:9520eea781bc draft 
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=9520eea781bcca16c1e15acc0ba14335a0e8e5ba HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
   file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
@@ -124,7 +124,7 @@
   added 1 changesets with 1 changes to 1 files (+1 heads)
   1 new obsolescence markers
   pre-close-tip:24b6387c8c8c draft 
-  new changesets 24b6387c8c8c
+  new changesets 24b6387c8c8c (1 drafts)
   postclose-tip:24b6387c8c8c draft 
   txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_NODE_LAST=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
   file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
@@ -300,7 +300,7 @@
   1 new obsolescence markers
   updating bookmark book_02de
   pre-close-tip:02de42196ebe draft book_02de
-  new changesets 02de42196ebe
+  new changesets 02de42196ebe (1 drafts)
   postclose-tip:02de42196ebe draft book_02de
   txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
   ssh://user@dummy/main HG_URL=ssh://user@dummy/main
@@ -326,7 +326,7 @@
   1 new obsolescence markers
   updating bookmark book_42cc
   pre-close-tip:42ccdea3bb16 draft book_42cc
-  new changesets 42ccdea3bb16
+  new changesets 42ccdea3bb16 (1 drafts)
   postclose-tip:42ccdea3bb16 draft book_42cc
   txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_NODE_LAST=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
   http://localhost:$HGPORT/ HG_URL=http://localhost:$HGPORT/
@@ -587,21 +587,21 @@
   pushing to other
   searching for changes
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
   [255]
 
   $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
   pushing to ssh://user@dummy/other
   searching for changes
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
   [255]
 
   $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
   pushing to http://localhost:$HGPORT2/
   searching for changes
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
   [255]
 
 Doing the actual push: hook abort
@@ -934,7 +934,7 @@
 
   $ cat >> $TESTTMP/locktester.py <<EOF
   > import os
-  > from mercurial import extensions, bundle2, error
+  > from mercurial import bundle2, error, extensions
   > def checklock(orig, repo, *args, **kwargs):
   >     if repo.svfs.lexists(b"lock"):
   >         raise error.Abort(b"Lock should not be taken")
--- a/tests/test-bundle2-format.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bundle2-format.t	Mon Oct 22 14:46:06 2018 -0400
@@ -232,7 +232,7 @@
   > [experimental]
   > evolution.createmarkers=True
   > [ui]
-  > ssh=$PYTHON "$TESTDIR/dummyssh"
+  > ssh="$PYTHON" "$TESTDIR/dummyssh"
   > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
   > [web]
   > push_ssl = false
@@ -824,7 +824,7 @@
 
   $ hg unbundle2 < ../part-race.hg2
   0 unread bytes
-  abort: push race: repository changed while pushing - please try again
+  abort: push race: remote repository changed while pushing - please try again
   [255]
 
 Support for changegroup
@@ -835,7 +835,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 7 changes to 7 files (+3 heads)
-  new changesets cd010b8cd998:02de42196ebe
+  new changesets cd010b8cd998:02de42196ebe (8 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
   $ hg log -G
@@ -873,17 +873,17 @@
   bundle2-output-part: "changegroup" (advisory) streamed payload
   bundle2-output: part 0: "changegroup"
   bundle2-output: header chunk size: 18
-  bundling: 1/4 changesets (25.00%)
-  bundling: 2/4 changesets (50.00%)
-  bundling: 3/4 changesets (75.00%)
-  bundling: 4/4 changesets (100.00%)
-  bundling: 1/4 manifests (25.00%)
-  bundling: 2/4 manifests (50.00%)
-  bundling: 3/4 manifests (75.00%)
-  bundling: 4/4 manifests (100.00%)
-  bundling: D 1/3 files (33.33%)
-  bundling: E 2/3 files (66.67%)
-  bundling: H 3/3 files (100.00%)
+  changesets: 1/4 chunks (25.00%)
+  changesets: 2/4 chunks (50.00%)
+  changesets: 3/4 chunks (75.00%)
+  changesets: 4/4 chunks (100.00%)
+  manifests: 1/4 chunks (25.00%)
+  manifests: 2/4 chunks (50.00%)
+  manifests: 3/4 chunks (75.00%)
+  manifests: 4/4 chunks (100.00%)
+  files: D 1/3 files (33.33%)
+  files: E 2/3 files (66.67%)
+  files: H 3/3 files (100.00%)
   bundle2-output: payload chunk size: 1555
   bundle2-output: closing payload chunk
   bundle2-output: end of bundle
--- a/tests/test-bundle2-pushback.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bundle2-pushback.t	Mon Oct 22 14:46:06 2018 -0400
@@ -36,7 +36,7 @@
 
   $ cat >> $HGRCPATH <<EOF
   > [ui]
-  > ssh = $PYTHON "$TESTDIR/dummyssh"
+  > ssh = "$PYTHON" "$TESTDIR/dummyssh"
   > username = nobody <no.reply@example.com>
   > 
   > [alias]
--- a/tests/test-bundle2-remote-changegroup.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-bundle2-remote-changegroup.t	Mon Oct 22 14:46:06 2018 -0400
@@ -90,12 +90,12 @@
 
 Start a simple HTTP server to serve bundles
 
-  $ $PYTHON "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid
+  $ "$PYTHON" "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid
   $ cat dumb.pid >> $DAEMON_PIDS
 
   $ cat >> $HGRCPATH << EOF
   > [ui]
-  > ssh=$PYTHON "$TESTDIR/dummyssh"
+  > ssh="$PYTHON" "$TESTDIR/dummyssh"
   > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
   > EOF
 
@@ -106,7 +106,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 7 changes to 7 files (+2 heads)
-  new changesets cd010b8cd998:02de42196ebe
+  new changesets cd010b8cd998:02de42196ebe (8 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
   $ hg -R repo log -G
--- a/tests/test-casefolding.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-casefolding.t	Mon Oct 22 14:46:06 2018 -0400
@@ -52,7 +52,8 @@
   $ hg ci -Am addb D/b
   $ hg mv D/b d/b
   D/b: not overwriting - file already committed
-  (hg rename --force to replace the file by recording a rename)
+  ('hg rename --force' to replace the file by recording a rename)
+  [1]
   $ hg mv D/b d/c
   $ hg st
   A D/c
@@ -247,7 +248,7 @@
 X will be using HFS+. If that's not true, this test will fail.
 
   $ rm A
-  >>> open(u'a\u200c'.encode('utf-8'), 'w').write('unicode is fun')
+  >>> open(u'a\u200c'.encode('utf-8'), 'w').write('unicode is fun') and None
   $ hg status
   M A
 
--- a/tests/test-cat.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-cat.t	Mon Oct 22 14:46:06 2018 -0400
@@ -65,7 +65,7 @@
 
 Test template output
 
-  $ hg --cwd tmp cat ../b ../c -T '== {path} ({abspath}) r{rev} ==\n{data}'
+  $ hg --cwd tmp cat ../b ../c -T '== {path|relpath} ({path}) r{rev} ==\n{data}'
   == ../b (b) r2 ==
   1
   == ../c (c) r2 ==
@@ -74,12 +74,10 @@
   $ hg cat b c -Tjson --output -
   [
    {
-    "abspath": "b",
     "data": "1\n",
     "path": "b"
    },
    {
-    "abspath": "c",
     "data": "3\n",
     "path": "c"
    }
@@ -89,7 +87,6 @@
   $ cat tmp/b.json
   [
    {
-    "abspath": "b",
     "data": "1\n",
     "path": "b"
    }
@@ -97,7 +94,6 @@
   $ cat tmp/c.json
   [
    {
-    "abspath": "c",
     "data": "3\n",
     "path": "c"
    }
--- a/tests/test-cbor.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-cbor.py	Mon Oct 22 14:46:06 2018 -0400
@@ -1,6 +1,5 @@
 from __future__ import absolute_import
 
-import io
 import unittest
 
 from mercurial.thirdparty import (
@@ -10,10 +9,17 @@
     cborutil,
 )
 
+class TestCase(unittest.TestCase):
+    if not getattr(unittest.TestCase, 'assertRaisesRegex', False):
+        # Python 3.7 deprecates the regex*p* version, but 2.7 lacks
+        # the regex version.
+        assertRaisesRegex = (# camelcase-required
+            unittest.TestCase.assertRaisesRegexp)
+
 def loadit(it):
     return cbor.loads(b''.join(it))
 
-class BytestringTests(unittest.TestCase):
+class BytestringTests(TestCase):
     def testsimple(self):
         self.assertEqual(
             list(cborutil.streamencode(b'foobar')),
@@ -23,11 +29,20 @@
             loadit(cborutil.streamencode(b'foobar')),
             b'foobar')
 
+        self.assertEqual(cborutil.decodeall(b'\x46foobar'),
+                         [b'foobar'])
+
+        self.assertEqual(cborutil.decodeall(b'\x46foobar\x45fizbi'),
+                         [b'foobar', b'fizbi'])
+
     def testlong(self):
         source = b'x' * 1048576
 
         self.assertEqual(loadit(cborutil.streamencode(source)), source)
 
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
     def testfromiter(self):
         # This is the example from RFC 7049 Section 2.2.2.
         source = [b'\xaa\xbb\xcc\xdd', b'\xee\xff\x99']
@@ -47,6 +62,25 @@
             loadit(cborutil.streamencodebytestringfromiter(source)),
             b''.join(source))
 
+        self.assertEqual(cborutil.decodeall(b'\x5f\x44\xaa\xbb\xcc\xdd'
+                                            b'\x43\xee\xff\x99\xff'),
+                         [b'\xaa\xbb\xcc\xdd', b'\xee\xff\x99', b''])
+
+        for i, chunk in enumerate(
+            cborutil.decodeall(b'\x5f\x44\xaa\xbb\xcc\xdd'
+                               b'\x43\xee\xff\x99\xff')):
+            self.assertIsInstance(chunk, cborutil.bytestringchunk)
+
+            if i == 0:
+                self.assertTrue(chunk.isfirst)
+            else:
+                self.assertFalse(chunk.isfirst)
+
+            if i == 2:
+                self.assertTrue(chunk.islast)
+            else:
+                self.assertFalse(chunk.islast)
+
     def testfromiterlarge(self):
         source = [b'a' * 16, b'b' * 128, b'c' * 1024, b'd' * 1048576]
 
@@ -71,52 +105,417 @@
             source, chunksize=42))
         self.assertEqual(cbor.loads(dest), source)
 
-    def testreadtoiter(self):
-        source = io.BytesIO(b'\x5f\x44\xaa\xbb\xcc\xdd\x43\xee\xff\x99\xff')
+        self.assertEqual(b''.join(cborutil.decodeall(dest)), source)
+
+        for chunk in cborutil.decodeall(dest):
+            self.assertIsInstance(chunk, cborutil.bytestringchunk)
+            self.assertIn(len(chunk), (0, 8, 42))
+
+        encoded = b'\x5f\xff'
+        b = cborutil.decodeall(encoded)
+        self.assertEqual(b, [b''])
+        self.assertTrue(b[0].isfirst)
+        self.assertTrue(b[0].islast)
+
+    def testdecodevariouslengths(self):
+        for i in (0, 1, 22, 23, 24, 25, 254, 255, 256, 65534, 65535, 65536):
+            source = b'x' * i
+            encoded = b''.join(cborutil.streamencode(source))
+
+            if len(source) < 24:
+                hlen = 1
+            elif len(source) < 256:
+                hlen = 2
+            elif len(source) < 65536:
+                hlen = 3
+            elif len(source) < 1048576:
+                hlen = 5
+
+            self.assertEqual(cborutil.decodeitem(encoded),
+                             (True, source, hlen + len(source),
+                              cborutil.SPECIAL_NONE))
+
+    def testpartialdecode(self):
+        encoded = b''.join(cborutil.streamencode(b'foobar'))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -6, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -5, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:7]),
+                         (True, b'foobar', 7, cborutil.SPECIAL_NONE))
+
+    def testpartialdecodevariouslengths(self):
+        lens = [
+            2,
+            3,
+            10,
+            23,
+            24,
+            25,
+            31,
+            100,
+            254,
+            255,
+            256,
+            257,
+            16384,
+            65534,
+            65535,
+            65536,
+            65537,
+            131071,
+            131072,
+            131073,
+            1048575,
+            1048576,
+            1048577,
+        ]
+
+        for size in lens:
+            if size < 24:
+                hlen = 1
+            elif size < 2**8:
+                hlen = 2
+            elif size < 2**16:
+                hlen = 3
+            elif size < 2**32:
+                hlen = 5
+            else:
+                assert False
+
+            source = b'x' * size
+            encoded = b''.join(cborutil.streamencode(source))
+
+            res = cborutil.decodeitem(encoded[0:1])
+
+            if hlen > 1:
+                self.assertEqual(res, (False, None, -(hlen - 1),
+                                       cborutil.SPECIAL_NONE))
+            else:
+                self.assertEqual(res, (False, None, -(size + hlen - 1),
+                                       cborutil.SPECIAL_NONE))
+
+            # Decoding partial header reports remaining header size.
+            for i in range(hlen - 1):
+                self.assertEqual(cborutil.decodeitem(encoded[0:i + 1]),
+                                 (False, None, -(hlen - i - 1),
+                                  cborutil.SPECIAL_NONE))
+
+            # Decoding complete header reports item size.
+            self.assertEqual(cborutil.decodeitem(encoded[0:hlen]),
+                             (False, None, -size, cborutil.SPECIAL_NONE))
 
-        it = cborutil.readindefinitebytestringtoiter(source)
-        self.assertEqual(next(it), b'\xaa\xbb\xcc\xdd')
-        self.assertEqual(next(it), b'\xee\xff\x99')
+            # Decoding single byte after header reports item size - 1
+            self.assertEqual(cborutil.decodeitem(encoded[0:hlen + 1]),
+                             (False, None, -(size - 1), cborutil.SPECIAL_NONE))
+
+            # Decoding all but the last byte reports -1 needed.
+            self.assertEqual(cborutil.decodeitem(encoded[0:hlen + size - 1]),
+                             (False, None, -1, cborutil.SPECIAL_NONE))
+
+            # Decoding last byte retrieves value.
+            self.assertEqual(cborutil.decodeitem(encoded[0:hlen + size]),
+                             (True, source, hlen + size, cborutil.SPECIAL_NONE))
+
+    def testindefinitepartialdecode(self):
+        encoded = b''.join(cborutil.streamencodebytestringfromiter(
+            [b'foobar', b'biz']))
+
+        # First item should be begin of bytestring special.
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (True, None, 1,
+                          cborutil.SPECIAL_START_INDEFINITE_BYTESTRING))
+
+        # Second item should be the first chunk. But only available when
+        # we give it 7 bytes (1 byte header + 6 byte chunk).
+        self.assertEqual(cborutil.decodeitem(encoded[1:2]),
+                         (False, None, -6, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[1:3]),
+                         (False, None, -5, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[1:4]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[1:5]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[1:6]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[1:7]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+
+        self.assertEqual(cborutil.decodeitem(encoded[1:8]),
+                         (True, b'foobar', 7, cborutil.SPECIAL_NONE))
+
+        # Third item should be second chunk. But only available when
+        # we give it 4 bytes (1 byte header + 3 byte chunk).
+        self.assertEqual(cborutil.decodeitem(encoded[8:9]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[8:10]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[8:11]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+
+        self.assertEqual(cborutil.decodeitem(encoded[8:12]),
+                         (True, b'biz', 4, cborutil.SPECIAL_NONE))
+
+        # Fourth item should be end of indefinite stream marker.
+        self.assertEqual(cborutil.decodeitem(encoded[12:13]),
+                         (True, None, 1, cborutil.SPECIAL_INDEFINITE_BREAK))
+
+        # Now test the behavior when going through the decoder.
 
-        with self.assertRaises(StopIteration):
-            next(it)
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:1]),
+                         (False, 1, 0))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:2]),
+                         (False, 1, 6))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:3]),
+                         (False, 1, 5))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:4]),
+                         (False, 1, 4))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:5]),
+                         (False, 1, 3))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:6]),
+                         (False, 1, 2))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:7]),
+                         (False, 1, 1))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:8]),
+                         (True, 8, 0))
+
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:9]),
+                         (True, 8, 3))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:10]),
+                         (True, 8, 2))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:11]),
+                         (True, 8, 1))
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:12]),
+                         (True, 12, 0))
+
+        self.assertEqual(cborutil.sansiodecoder().decode(encoded[0:13]),
+                         (True, 13, 0))
 
-class IntTests(unittest.TestCase):
+        decoder = cborutil.sansiodecoder()
+        decoder.decode(encoded[0:8])
+        values = decoder.getavailable()
+        self.assertEqual(values, [b'foobar'])
+        self.assertTrue(values[0].isfirst)
+        self.assertFalse(values[0].islast)
+
+        self.assertEqual(decoder.decode(encoded[8:12]),
+                         (True, 4, 0))
+        values = decoder.getavailable()
+        self.assertEqual(values, [b'biz'])
+        self.assertFalse(values[0].isfirst)
+        self.assertFalse(values[0].islast)
+
+        self.assertEqual(decoder.decode(encoded[12:]),
+                         (True, 1, 0))
+        values = decoder.getavailable()
+        self.assertEqual(values, [b''])
+        self.assertFalse(values[0].isfirst)
+        self.assertTrue(values[0].islast)
+
+class StringTests(TestCase):
+    def testdecodeforbidden(self):
+        encoded = b'\x63foo'
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'string major type not supported'):
+            cborutil.decodeall(encoded)
+
+class IntTests(TestCase):
     def testsmall(self):
         self.assertEqual(list(cborutil.streamencode(0)), [b'\x00'])
+        self.assertEqual(cborutil.decodeall(b'\x00'), [0])
+
         self.assertEqual(list(cborutil.streamencode(1)), [b'\x01'])
+        self.assertEqual(cborutil.decodeall(b'\x01'), [1])
+
         self.assertEqual(list(cborutil.streamencode(2)), [b'\x02'])
+        self.assertEqual(cborutil.decodeall(b'\x02'), [2])
+
         self.assertEqual(list(cborutil.streamencode(3)), [b'\x03'])
+        self.assertEqual(cborutil.decodeall(b'\x03'), [3])
+
         self.assertEqual(list(cborutil.streamencode(4)), [b'\x04'])
+        self.assertEqual(cborutil.decodeall(b'\x04'), [4])
+
+        # Multiple value decode works.
+        self.assertEqual(cborutil.decodeall(b'\x00\x01\x02\x03\x04'),
+                         [0, 1, 2, 3, 4])
 
     def testnegativesmall(self):
         self.assertEqual(list(cborutil.streamencode(-1)), [b'\x20'])
+        self.assertEqual(cborutil.decodeall(b'\x20'), [-1])
+
         self.assertEqual(list(cborutil.streamencode(-2)), [b'\x21'])
+        self.assertEqual(cborutil.decodeall(b'\x21'), [-2])
+
         self.assertEqual(list(cborutil.streamencode(-3)), [b'\x22'])
+        self.assertEqual(cborutil.decodeall(b'\x22'), [-3])
+
         self.assertEqual(list(cborutil.streamencode(-4)), [b'\x23'])
+        self.assertEqual(cborutil.decodeall(b'\x23'), [-4])
+
         self.assertEqual(list(cborutil.streamencode(-5)), [b'\x24'])
+        self.assertEqual(cborutil.decodeall(b'\x24'), [-5])
+
+        # Multiple value decode works.
+        self.assertEqual(cborutil.decodeall(b'\x20\x21\x22\x23\x24'),
+                         [-1, -2, -3, -4, -5])
 
     def testrange(self):
         for i in range(-70000, 70000, 10):
-            self.assertEqual(
-                b''.join(cborutil.streamencode(i)),
-                cbor.dumps(i))
+            encoded = b''.join(cborutil.streamencode(i))
+
+            self.assertEqual(encoded, cbor.dumps(i))
+            self.assertEqual(cborutil.decodeall(encoded), [i])
+
+    def testdecodepartialubyte(self):
+        encoded = b''.join(cborutil.streamencode(250))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 250, 2, cborutil.SPECIAL_NONE))
+
+    def testdecodepartialbyte(self):
+        encoded = b''.join(cborutil.streamencode(-42))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, -42, 2, cborutil.SPECIAL_NONE))
+
+    def testdecodepartialushort(self):
+        encoded = b''.join(cborutil.streamencode(2**15))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, 2**15, 3, cborutil.SPECIAL_NONE))
+
+    def testdecodepartialshort(self):
+        encoded = b''.join(cborutil.streamencode(-1024))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (True, -1024, 3, cborutil.SPECIAL_NONE))
+
+    def testdecodepartialulong(self):
+        encoded = b''.join(cborutil.streamencode(2**28))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, 2**28, 5, cborutil.SPECIAL_NONE))
+
+    def testdecodepartiallong(self):
+        encoded = b''.join(cborutil.streamencode(-1048580))
 
-class ArrayTests(unittest.TestCase):
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, -1048580, 5, cborutil.SPECIAL_NONE))
+
+    def testdecodepartialulonglong(self):
+        encoded = b''.join(cborutil.streamencode(2**32))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -8, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -7, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -6, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -5, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:7]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:8]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:9]),
+                         (True, 2**32, 9, cborutil.SPECIAL_NONE))
+
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'input data not fully consumed'):
+            cborutil.decodeall(encoded[0:1])
+
+        with self.assertRaisesRegex(
+            cborutil.CBORDecodeError, 'input data not fully consumed'):
+            cborutil.decodeall(encoded[0:2])
+
+    def testdecodepartiallonglong(self):
+        encoded = b''.join(cborutil.streamencode(-7000000000))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -8, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -7, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -6, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -5, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:7]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:8]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:9]),
+                         (True, -7000000000, 9, cborutil.SPECIAL_NONE))
+
+class ArrayTests(TestCase):
     def testempty(self):
         self.assertEqual(list(cborutil.streamencode([])), [b'\x80'])
         self.assertEqual(loadit(cborutil.streamencode([])), [])
 
+        self.assertEqual(cborutil.decodeall(b'\x80'), [[]])
+
     def testbasic(self):
         source = [b'foo', b'bar', 1, -10]
 
-        self.assertEqual(list(cborutil.streamencode(source)), [
-            b'\x84', b'\x43', b'foo', b'\x43', b'bar', b'\x01', b'\x29'])
+        chunks = [
+            b'\x84', b'\x43', b'foo', b'\x43', b'bar', b'\x01', b'\x29']
+
+        self.assertEqual(list(cborutil.streamencode(source)), chunks)
+
+        self.assertEqual(cborutil.decodeall(b''.join(chunks)), [source])
 
     def testemptyfromiter(self):
         self.assertEqual(b''.join(cborutil.streamencodearrayfromiter([])),
                          b'\x9f\xff')
 
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length uint not allowed'):
+            cborutil.decodeall(b'\x9f\xff')
+
     def testfromiter1(self):
         source = [b'foo']
 
@@ -129,26 +528,193 @@
         dest = b''.join(cborutil.streamencodearrayfromiter(source))
         self.assertEqual(cbor.loads(dest), source)
 
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length uint not allowed'):
+            cborutil.decodeall(dest)
+
     def testtuple(self):
         source = (b'foo', None, 42)
+        encoded = b''.join(cborutil.streamencode(source))
 
-        self.assertEqual(cbor.loads(b''.join(cborutil.streamencode(source))),
-                         list(source))
+        self.assertEqual(cbor.loads(encoded), list(source))
+
+        self.assertEqual(cborutil.decodeall(encoded), [list(source)])
+
+    def testpartialdecode(self):
+        source = list(range(4))
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (True, 4, 1, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 4, 1, cborutil.SPECIAL_START_ARRAY))
+
+        source = list(range(23))
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (True, 23, 1, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 23, 1, cborutil.SPECIAL_START_ARRAY))
+
+        source = list(range(24))
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 24, 2, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (True, 24, 2, cborutil.SPECIAL_START_ARRAY))
 
-class SetTests(unittest.TestCase):
+        source = list(range(256))
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (True, 256, 3, cborutil.SPECIAL_START_ARRAY))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (True, 256, 3, cborutil.SPECIAL_START_ARRAY))
+
+    def testnested(self):
+        source = [[], [], [[], [], []]]
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+        source = [True, None, [True, 0, 2], [None], [], [[[]], -87]]
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+        # A set within an array.
+        source = [None, {b'foo', b'bar', None, False}, set()]
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+        # A map within an array.
+        source = [None, {}, {b'foo': b'bar', True: False}, [{}]]
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+    def testindefinitebytestringvalues(self):
+        # Single value array whose value is an empty indefinite bytestring.
+        encoded = b'\x81\x5f\x40\xff'
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length bytestrings not '
+                                    'allowed as array values'):
+            cborutil.decodeall(encoded)
+
+class SetTests(TestCase):
     def testempty(self):
         self.assertEqual(list(cborutil.streamencode(set())), [
             b'\xd9\x01\x02',
             b'\x80',
         ])
 
+        self.assertEqual(cborutil.decodeall(b'\xd9\x01\x02\x80'), [set()])
+
     def testset(self):
         source = {b'foo', None, 42}
+        encoded = b''.join(cborutil.streamencode(source))
 
-        self.assertEqual(cbor.loads(b''.join(cborutil.streamencode(source))),
-                         source)
+        self.assertEqual(cbor.loads(encoded), source)
+
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+    def testinvalidtag(self):
+        # Must use array to encode sets.
+        encoded = b'\xd9\x01\x02\xa0'
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'expected array after finite set '
+                                    'semantic tag'):
+            cborutil.decodeall(encoded)
+
+    def testpartialdecode(self):
+        # Semantic tag item will be 3 bytes. Set header will be variable
+        # depending on length.
+        encoded = b''.join(cborutil.streamencode({i for i in range(23)}))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (True, 23, 4, cborutil.SPECIAL_START_SET))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, 23, 4, cborutil.SPECIAL_START_SET))
+
+        encoded = b''.join(cborutil.streamencode({i for i in range(24)}))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, 24, 5, cborutil.SPECIAL_START_SET))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (True, 24, 5, cborutil.SPECIAL_START_SET))
 
-class BoolTests(unittest.TestCase):
+        encoded = b''.join(cborutil.streamencode({i for i in range(256)}))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (True, 256, 6, cborutil.SPECIAL_START_SET))
+
+    def testinvalidvalue(self):
+        encoded = b''.join([
+            b'\xd9\x01\x02', # semantic tag
+            b'\x81', # array of size 1
+            b'\x5f\x43foo\xff', # indefinite length bytestring "foo"
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length bytestrings not '
+                                    'allowed as set values'):
+            cborutil.decodeall(encoded)
+
+        encoded = b''.join([
+            b'\xd9\x01\x02',
+            b'\x81',
+            b'\x80', # empty array
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'collections not allowed as set values'):
+            cborutil.decodeall(encoded)
+
+        encoded = b''.join([
+            b'\xd9\x01\x02',
+            b'\x81',
+            b'\xa0', # empty map
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'collections not allowed as set values'):
+            cborutil.decodeall(encoded)
+
+        encoded = b''.join([
+            b'\xd9\x01\x02',
+            b'\x81',
+            b'\xd9\x01\x02\x81\x01', # set with integer 1
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'collections not allowed as set values'):
+            cborutil.decodeall(encoded)
+
+class BoolTests(TestCase):
     def testbasic(self):
         self.assertEqual(list(cborutil.streamencode(True)),  [b'\xf5'])
         self.assertEqual(list(cborutil.streamencode(False)), [b'\xf4'])
@@ -156,23 +722,38 @@
         self.assertIs(loadit(cborutil.streamencode(True)), True)
         self.assertIs(loadit(cborutil.streamencode(False)), False)
 
-class NoneTests(unittest.TestCase):
+        self.assertEqual(cborutil.decodeall(b'\xf4'), [False])
+        self.assertEqual(cborutil.decodeall(b'\xf5'), [True])
+
+        self.assertEqual(cborutil.decodeall(b'\xf4\xf5\xf5\xf4'),
+                         [False, True, True, False])
+
+class NoneTests(TestCase):
     def testbasic(self):
         self.assertEqual(list(cborutil.streamencode(None)), [b'\xf6'])
 
         self.assertIs(loadit(cborutil.streamencode(None)), None)
 
-class MapTests(unittest.TestCase):
+        self.assertEqual(cborutil.decodeall(b'\xf6'), [None])
+        self.assertEqual(cborutil.decodeall(b'\xf6\xf6'), [None, None])
+
+class MapTests(TestCase):
     def testempty(self):
         self.assertEqual(list(cborutil.streamencode({})), [b'\xa0'])
         self.assertEqual(loadit(cborutil.streamencode({})), {})
 
+        self.assertEqual(cborutil.decodeall(b'\xa0'), [{}])
+
     def testemptyindefinite(self):
         self.assertEqual(list(cborutil.streamencodemapfromiter([])), [
             b'\xbf', b'\xff'])
 
         self.assertEqual(loadit(cborutil.streamencodemapfromiter([])), {})
 
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length uint not allowed'):
+            cborutil.decodeall(b'\xbf\xff')
+
     def testone(self):
         source = {b'foo': b'bar'}
         self.assertEqual(list(cborutil.streamencode(source)), [
@@ -180,6 +761,8 @@
 
         self.assertEqual(loadit(cborutil.streamencode(source)), source)
 
+        self.assertEqual(cborutil.decodeall(b'\xa1\x43foo\x43bar'), [source])
+
     def testmultiple(self):
         source = {
             b'foo': b'bar',
@@ -192,6 +775,9 @@
             loadit(cborutil.streamencodemapfromiter(source.items())),
             source)
 
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
     def testcomplex(self):
         source = {
             b'key': 1,
@@ -205,6 +791,202 @@
             loadit(cborutil.streamencodemapfromiter(source.items())),
             source)
 
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+    def testnested(self):
+        source = {b'key1': None, b'key2': {b'sub1': b'sub2'}, b'sub2': {}}
+        encoded = b''.join(cborutil.streamencode(source))
+
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+        source = {
+            b'key1': [],
+            b'key2': [None, False],
+            b'key3': {b'foo', b'bar'},
+            b'key4': {},
+        }
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeall(encoded), [source])
+
+    def testillegalkey(self):
+        encoded = b''.join([
+            # map header + len 1
+            b'\xa1',
+            # indefinite length bytestring "foo" in key position
+            b'\x5f\x03foo\xff'
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length bytestrings not '
+                                    'allowed as map keys'):
+            cborutil.decodeall(encoded)
+
+        encoded = b''.join([
+            b'\xa1',
+            b'\x80', # empty array
+            b'\x43foo',
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'collections not supported as map keys'):
+            cborutil.decodeall(encoded)
+
+    def testillegalvalue(self):
+        encoded = b''.join([
+            b'\xa1', # map headers
+            b'\x43foo', # key
+            b'\x5f\x03bar\xff', # indefinite length value
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'indefinite length bytestrings not '
+                                    'allowed as map values'):
+            cborutil.decodeall(encoded)
+
+    def testpartialdecode(self):
+        source = {b'key1': b'value1'}
+        encoded = b''.join(cborutil.streamencode(source))
+
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (True, 1, 1, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 1, 1, cborutil.SPECIAL_START_MAP))
+
+        source = {b'key%d' % i: None for i in range(23)}
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (True, 23, 1, cborutil.SPECIAL_START_MAP))
+
+        source = {b'key%d' % i: None for i in range(24)}
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (True, 24, 2, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (True, 24, 2, cborutil.SPECIAL_START_MAP))
+
+        source = {b'key%d' % i: None for i in range(256)}
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (True, 256, 3, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (True, 256, 3, cborutil.SPECIAL_START_MAP))
+
+        source = {b'key%d' % i: None for i in range(65536)}
+        encoded = b''.join(cborutil.streamencode(source))
+        self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                         (False, None, -4, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                         (False, None, -3, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:3]),
+                         (False, None, -2, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:4]),
+                         (False, None, -1, cborutil.SPECIAL_NONE))
+        self.assertEqual(cborutil.decodeitem(encoded[0:5]),
+                         (True, 65536, 5, cborutil.SPECIAL_START_MAP))
+        self.assertEqual(cborutil.decodeitem(encoded[0:6]),
+                         (True, 65536, 5, cborutil.SPECIAL_START_MAP))
+
+class SemanticTagTests(TestCase):
+    def testdecodeforbidden(self):
+        for i in range(500):
+            if i == cborutil.SEMANTIC_TAG_FINITE_SET:
+                continue
+
+            tag = cborutil.encodelength(cborutil.MAJOR_TYPE_SEMANTIC,
+                                        i)
+
+            encoded = tag + cborutil.encodelength(cborutil.MAJOR_TYPE_UINT, 42)
+
+            # Partial decode is incomplete.
+            if i < 24:
+                pass
+            elif i < 256:
+                self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                                 (False, None, -1, cborutil.SPECIAL_NONE))
+            elif i < 65536:
+                self.assertEqual(cborutil.decodeitem(encoded[0:1]),
+                                 (False, None, -2, cborutil.SPECIAL_NONE))
+                self.assertEqual(cborutil.decodeitem(encoded[0:2]),
+                                 (False, None, -1, cborutil.SPECIAL_NONE))
+
+            with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                        'semantic tag \d+ not allowed'):
+                cborutil.decodeitem(encoded)
+
+class SpecialTypesTests(TestCase):
+    def testforbiddentypes(self):
+        for i in range(256):
+            if i == cborutil.SUBTYPE_FALSE:
+                continue
+            elif i == cborutil.SUBTYPE_TRUE:
+                continue
+            elif i == cborutil.SUBTYPE_NULL:
+                continue
+
+            encoded = cborutil.encodelength(cborutil.MAJOR_TYPE_SPECIAL, i)
+
+            with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                        'special type \d+ not allowed'):
+                cborutil.decodeitem(encoded)
+
+class SansIODecoderTests(TestCase):
+    def testemptyinput(self):
+        decoder = cborutil.sansiodecoder()
+        self.assertEqual(decoder.decode(b''), (False, 0, 0))
+
+class BufferingDecoderTests(TestCase):
+    def testsimple(self):
+        source = [
+            b'foobar',
+            b'x' * 128,
+            {b'foo': b'bar'},
+            True,
+            False,
+            None,
+            [None for i in range(128)],
+        ]
+
+        encoded = b''.join(cborutil.streamencode(source))
+
+        for step in range(1, 32):
+            decoder = cborutil.bufferingdecoder()
+            start = 0
+
+            while start < len(encoded):
+                decoder.decode(encoded[start:start + step])
+                start += step
+
+            self.assertEqual(decoder.getavailable(), [source])
+
+    def testbytearray(self):
+        source = b''.join(cborutil.streamencode(b'foobar'))
+
+        decoder = cborutil.bufferingdecoder()
+        decoder.decode(bytearray(source))
+
+        self.assertEqual(decoder.getavailable(), [b'foobar'])
+
+class DecodeallTests(TestCase):
+    def testemptyinput(self):
+        self.assertEqual(cborutil.decodeall(b''), [])
+
+    def testpartialinput(self):
+        encoded = b''.join([
+            b'\x82', # array of 2 elements
+            b'\x01', # integer 1
+        ])
+
+        with self.assertRaisesRegex(cborutil.CBORDecodeError,
+                                    'input data not complete'):
+            cborutil.decodeall(encoded)
+
 if __name__ == '__main__':
     import silenttestrunner
     silenttestrunner.main(__name__)
--- a/tests/test-censor.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-censor.t	Mon Oct 22 14:46:06 2018 -0400
@@ -180,7 +180,7 @@
   checking files
    target@1: censored file data
    target@2: censored file data
-  2 files, 5 changesets, 7 total revisions
+  checked 5 changesets with 7 changes to 2 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
@@ -215,7 +215,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 5 changesets, 7 total revisions
+  checked 5 changesets with 7 changes to 2 files
 
 May update to revision with censored data with explicit config
 
@@ -306,7 +306,7 @@
 
 Can censor after revlog has expanded to no longer permit inline storage
 
-  $ for x in `$PYTHON $TESTDIR/seq.py 0 50000`
+  $ for x in `"$PYTHON" $TESTDIR/seq.py 0 50000`
   > do
   >   echo "Password: hunter$x" >> target
   > done
@@ -341,7 +341,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 12 changesets, 13 total revisions
+  checked 12 changesets with 13 changes to 2 files
 
 Repo cloned before tainted content introduced can pull censored nodes
 
@@ -353,7 +353,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 1 changesets, 2 total revisions
+  checked 1 changesets with 2 changes to 2 files
   $ hg pull -r $H1 -r $H2
   pulling from $TESTTMP/r
   searching for changes
@@ -380,7 +380,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 12 changesets, 13 total revisions
+  checked 12 changesets with 13 changes to 2 files
 
 Censored nodes can be pushed if they censor previously unexchanged nodes
 
@@ -426,7 +426,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files (+1 heads)
-  new changesets 075be80ac777:dcbaf17bf3a1
+  new changesets 075be80ac777:dcbaf17bf3a1 (2 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg cat -r $REV target
   $ hg cat -r $CLEANREV target
@@ -440,7 +440,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 14 changesets, 15 total revisions
+  checked 14 changesets with 15 changes to 2 files
 
 Censored nodes can be imported on top of censored nodes, consecutively
 
@@ -461,7 +461,7 @@
   adding manifests
   adding file changes
   added 6 changesets with 5 changes to 2 files (+1 heads)
-  new changesets efbe78065929:683e4645fded
+  new changesets efbe78065929:683e4645fded (6 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg update $H2
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -472,7 +472,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 14 changesets, 15 total revisions
+  checked 14 changesets with 15 changes to 2 files
   $ cd ../r
 
 Can import bundle where first revision of a file is censored
@@ -487,6 +487,6 @@
   adding manifests
   adding file changes
   added 1 changesets with 2 changes to 2 files
-  new changesets e97f55b2665a
+  new changesets e97f55b2665a (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg cat -r 0 target
--- a/tests/test-check-code.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-check-code.t	Mon Oct 22 14:46:06 2018 -0400
@@ -22,7 +22,7 @@
   >>> commands = []
   >>> with open('mercurial/debugcommands.py', 'rb') as fh:
   ...     for line in fh:
-  ...         m = re.match("^@command\('([a-z]+)", line)
+  ...         m = re.match(b"^@command\('([a-z]+)", line)
   ...         if m:
   ...             commands.append(m.group(1))
   >>> scommands = list(sorted(commands))
@@ -63,3 +63,7 @@
   > mercurial/parsers.py \
   > mercurial/zstd.py
   [1]
+
+Keep python3 tests sorted:
+  $ sort < contrib/python3-whitelist > $TESTTMP/py3sorted
+  $ cmp contrib/python3-whitelist $TESTTMP/py3sorted || echo 'Please sort passing tests!'
--- a/tests/test-check-config.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-check-config.t	Mon Oct 22 14:46:06 2018 -0400
@@ -30,7 +30,7 @@
 
   $ cd "$TESTDIR"/..
 
-  $ $PYTHON contrib/check-config.py < $TESTTMP/files
+  $ "$PYTHON" contrib/check-config.py < $TESTTMP/files
   foo = ui.configint('ui', 'intdefault', default=42)
   conflict on ui.intdefault: ('int', '42') != ('int', '1')
   at $TESTTMP/testfile.py:12:
@@ -44,4 +44,4 @@
 New errors are not allowed. Warnings are strongly discouraged.
 
   $ testrepohg files "set:(**.py or **.txt) - tests/**" | sed 's|\\|/|g' |
-  >   $PYTHON contrib/check-config.py
+  >   "$PYTHON" contrib/check-config.py
--- a/tests/test-check-help.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-check-help.t	Mon Oct 22 14:46:06 2018 -0400
@@ -7,7 +7,8 @@
   > import re
   > import sys
   > if sys.platform == "win32":
-  >     import os, msvcrt
+  >     import msvcrt
+  >     import os
   >     msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
   > topics = set()
   > topicre = re.compile(br':hg:`help ([a-z0-9\-.]+)`')
@@ -25,5 +26,5 @@
 
   $ testrepohg files 'glob:{hgdemandimport,hgext,mercurial}/**/*.py' \
   > | sed 's|\\|/|g' \
-  > | xargs $PYTHON "$TESTTMP/scanhelptopics.py" \
-  > | xargs -n1 hg help > /dev/null
+  > | xargs "$PYTHON" "$TESTTMP/scanhelptopics.py" \
+  > | xargs -n1 hg help --config extensions.phabricator= > /dev/null
--- a/tests/test-check-interfaces.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-check-interfaces.py	Mon Oct 22 14:46:06 2018 -0400
@@ -28,6 +28,7 @@
     manifest,
     pycompat,
     repository,
+    revlog,
     sshpeer,
     statichttprepo,
     ui as uimod,
@@ -98,17 +99,14 @@
 def main():
     ui = uimod.ui()
     # Needed so we can open a local repo with obsstore without a warning.
-    ui.setconfig('experimental', 'evolution.createmarkers', True)
+    ui.setconfig(b'experimental', b'evolution.createmarkers', True)
 
     checkzobject(badpeer())
 
     ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer)
     checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
 
-    ziverify.verifyClass(repository.ipeerconnection,
-                         httppeer.httpv2peer)
-    ziverify.verifyClass(repository.ipeercapabilities,
-                         httppeer.httpv2peer)
+    ziverify.verifyClass(repository.ipeerv2, httppeer.httpv2peer)
     checkzobject(httppeer.httpv2peer(None, b'', b'', None, None, None))
 
     ziverify.verifyClass(repository.ipeerbase,
@@ -140,9 +138,11 @@
     ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
     checkzobject(unionrepo.unionpeer(dummyrepo()))
 
-    ziverify.verifyClass(repository.completelocalrepository,
+    ziverify.verifyClass(repository.ilocalrepositorymain,
                          localrepo.localrepository)
-    repo = localrepo.localrepository(ui, rootdir)
+    ziverify.verifyClass(repository.ilocalrepositoryfilestorage,
+                         localrepo.revlogfilestorage)
+    repo = localrepo.makelocalrepository(ui, rootdir)
     checkzobject(repo)
 
     ziverify.verifyClass(wireprototypes.baseprotocolhandler,
@@ -175,13 +175,14 @@
     ziverify.verifyClass(repository.imanifestrevisionwritable,
                          manifest.memtreemanifestctx)
     ziverify.verifyClass(repository.imanifestlog, manifest.manifestlog)
+    ziverify.verifyClass(repository.imanifeststorage, manifest.manifestrevlog)
 
     vfs = vfsmod.vfs(b'.')
     fl = filelog.filelog(vfs, b'dummy.i')
     checkzobject(fl, allowextra=True)
 
     # Conforms to imanifestlog.
-    ml = manifest.manifestlog(vfs, repo)
+    ml = manifest.manifestlog(vfs, repo, manifest.manifestrevlog(repo.svfs))
     checkzobject(ml)
     checkzobject(repo.manifestlog)
 
@@ -196,4 +197,26 @@
     # Conforms to imanifestdict.
     checkzobject(mctx.read())
 
+    mrl = manifest.manifestrevlog(vfs)
+    checkzobject(mrl)
+
+    ziverify.verifyClass(repository.irevisiondelta,
+                         revlog.revlogrevisiondelta)
+
+    rd = revlog.revlogrevisiondelta(
+        node=b'',
+        p1node=b'',
+        p2node=b'',
+        basenode=b'',
+        linknode=b'',
+        flags=b'',
+        baserevisionsize=None,
+        revision=b'',
+        delta=None)
+    checkzobject(rd)
+
+    ziverify.verifyClass(repository.iverifyproblem,
+                         revlog.revlogproblem)
+    checkzobject(revlog.revlogproblem())
+
 main()
--- a/tests/test-check-module-imports.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-check-module-imports.t	Mon Oct 22 14:46:06 2018 -0400
@@ -29,18 +29,7 @@
   > -X mercurial/thirdparty \
   > -X tests/hypothesishelpers.py \
   > -X tests/test-check-interfaces.py \
-  > -X tests/test-commit-interactive.t \
-  > -X tests/test-contrib-check-code.t \
   > -X tests/test-demandimport.py \
-  > -X tests/test-extension.t \
-  > -X tests/test-hghave.t \
-  > -X tests/test-hgweb-auth.py \
-  > -X tests/test-hgweb-no-path-info.t \
-  > -X tests/test-hgweb-no-request-uri.t \
-  > -X tests/test-hgweb-non-interactive.t \
-  > -X tests/test-hook.t \
-  > -X tests/test-import.t \
   > -X tests/test-imports-checker.t \
-  > -X tests/test-lock.py \
   > -X tests/test-verify-repo-operations.py \
-  > | sed 's-\\-/-g' | $PYTHON "$import_checker" -
+  > | sed 's-\\-/-g' | "$PYTHON" "$import_checker" -
--- a/tests/test-check-py3-compat.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-check-py3-compat.t	Mon Oct 22 14:46:06 2018 -0400
@@ -3,10 +3,11 @@
   $ . "$TESTDIR/helpers-testrepo.sh"
   $ cd "$TESTDIR"/..
 
+#if no-py3
   $ testrepohg files 'set:(**.py)' \
   > -X hgdemandimport/demandimportpy2.py \
   > -X mercurial/thirdparty/cbor \
-  > | sed 's|\\|/|g' | xargs $PYTHON contrib/check-py3-compat.py
+  > | sed 's|\\|/|g' | xargs "$PYTHON" contrib/check-py3-compat.py
   contrib/python-zstandard/setup.py not using absolute_import
   contrib/python-zstandard/setup_zstd.py not using absolute_import
   contrib/python-zstandard/tests/common.py not using absolute_import
@@ -21,27 +22,27 @@
   contrib/python-zstandard/tests/test_module_attributes.py not using absolute_import
   contrib/python-zstandard/tests/test_train_dictionary.py not using absolute_import
   setup.py not using absolute_import
+#endif
 
-#if py3exe
+#if py3
   $ testrepohg files 'set:(**.py) - grep(pygments)' \
   > -X hgdemandimport/demandimportpy2.py \
   > -X hgext/fsmonitor/pywatchman \
-  > | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py \
+  > -X mercurial/cffi \
+  > -X mercurial/thirdparty \
+  > | sed 's|\\|/|g' | xargs "$PYTHON" contrib/check-py3-compat.py \
   > | sed 's/[0-9][0-9]*)$/*)/'
-  hgext/convert/transport.py: error importing: <*Error> No module named 'svn.client' (error at transport.py:*) (glob)
-  mercurial/cffi/bdiff.py: error importing: <ImportError> cannot import name '_bdiff' (error at bdiff.py:*)
-  mercurial/cffi/bdiffbuild.py: error importing: <ImportError> No module named 'cffi' (error at bdiffbuild.py:*)
-  mercurial/cffi/mpatch.py: error importing: <ImportError> cannot import name '_mpatch' (error at mpatch.py:*)
-  mercurial/cffi/mpatchbuild.py: error importing: <ImportError> No module named 'cffi' (error at mpatchbuild.py:*)
-  mercurial/cffi/osutilbuild.py: error importing: <ImportError> No module named 'cffi' (error at osutilbuild.py:*)
-  mercurial/scmwindows.py: error importing: <*Error> No module named 'msvcrt' (error at win32.py:*) (glob)
-  mercurial/win32.py: error importing: <*Error> No module named 'msvcrt' (error at win32.py:*) (glob)
-  mercurial/windows.py: error importing: <*Error> No module named 'msvcrt' (error at windows.py:*) (glob)
-
+  hgext/convert/transport.py: error importing: <*Error> No module named 'svn.client' (error at transport.py:*) (glob) (?)
+  hgext/infinitepush/sqlindexapi.py: error importing: <*Error> No module named 'mysql' (error at sqlindexapi.py:*) (glob) (?)
+  mercurial/scmwindows.py: error importing: <ValueError> _type_ 'v' not supported (error at win32.py:*) (no-windows !)
+  mercurial/win32.py: error importing: <ValueError> _type_ 'v' not supported (error at win32.py:*) (no-windows !)
+  mercurial/windows.py: error importing: <ModuleNotFoundError> No module named 'msvcrt' (error at windows.py:*) (no-windows !)
+  mercurial/posix.py: error importing: <ModuleNotFoundError> No module named 'fcntl' (error at posix.py:*) (windows !)
+  mercurial/scmposix.py: error importing: <ModuleNotFoundError> No module named 'fcntl' (error at scmposix.py:*) (windows !)
 #endif
 
-#if py3exe py3pygments
+#if py3 pygments
   $ testrepohg files 'set:(**.py) and grep(pygments)' | sed 's|\\|/|g' \
-  > | xargs $PYTHON3 contrib/check-py3-compat.py \
+  > | xargs "$PYTHON" contrib/check-py3-compat.py \
   > | sed 's/[0-9][0-9]*)$/*)/'
 #endif
--- a/tests/test-chg.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-chg.t	Mon Oct 22 14:46:06 2018 -0400
@@ -89,7 +89,7 @@
   > [extensions]
   > pager =
   > [pager]
-  > pager = $PYTHON $TESTTMP/fakepager.py
+  > pager = "$PYTHON" $TESTTMP/fakepager.py
   > EOF
   $ chg version > /dev/null
   $ touch foo
--- a/tests/test-churn.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-churn.t	Mon Oct 22 14:46:06 2018 -0400
@@ -52,7 +52,7 @@
 
   $ hg churn -r :2
   user2      2 ***************************************************************
-  user1      1 ********************************
+  user1      1 *******************************
   $ cd ..
 
 churn with aliases
@@ -83,10 +83,10 @@
 churn by hour
 
   $ hg churn -f '%H' -s
-  06      1 *****************
+  06      1 ****************
   09      2 *********************************
   12      4 ******************************************************************
-  13      1 *****************
+  13      1 ****************
 
 
 churn with separated added/removed lines
@@ -94,16 +94,16 @@
   $ hg rm d/g/f2.txt
   $ hg ci -Am "removed d/g/f2.txt" -u user1 -d 14:00 d/g/f2.txt
   $ hg churn --diffstat
-  user1           +3/-1 +++++++++++++++++++++++++++++++++++++++++--------------
-  user3           +3/-0 +++++++++++++++++++++++++++++++++++++++++
+  user1           +3/-1 ++++++++++++++++++++++++++++++++++++++++-------------
+  user3           +3/-0 ++++++++++++++++++++++++++++++++++++++++
   user2           +2/-0 +++++++++++++++++++++++++++
 
 churn --diffstat with color
 
   $ hg --config extensions.color= churn --config color.mode=ansi \
   >     --diffstat --color=always
-  user1           +3/-1 \x1b[0;32m+++++++++++++++++++++++++++++++++++++++++\x1b[0m\x1b[0;31m--------------\x1b[0m (esc)
-  user3           +3/-0 \x1b[0;32m+++++++++++++++++++++++++++++++++++++++++\x1b[0m (esc)
+  user1           +3/-1 \x1b[0;32m++++++++++++++++++++++++++++++++++++++++\x1b[0m\x1b[0;31m-------------\x1b[0m (esc)
+  user3           +3/-0 \x1b[0;32m++++++++++++++++++++++++++++++++++++++++\x1b[0m (esc)
   user2           +2/-0 \x1b[0;32m+++++++++++++++++++++++++++\x1b[0m (esc)
 
 
@@ -112,7 +112,7 @@
   $ hg churn -c
   user1      4 ***************************************************************
   user3      3 ***********************************************
-  user2      2 ********************************
+  user2      2 *******************************
 
   $ echo 'with space = no-space' >> ../aliases
   $ echo a >> a
@@ -154,9 +154,9 @@
   [user4@x.com]
   $ hg churn -c
   user1            4 *********************************************************
-  user3            3 *******************************************
-  user2            2 *****************************
-  user4@x.com      2 *****************************
+  user3            3 ******************************************
+  user2            2 ****************************
+  user4@x.com      2 ****************************
   with space       1 **************
 
 Test multibyte sequences in names
@@ -165,28 +165,28 @@
   $ hg --encoding utf-8 ci -m'changed bar' -u 'El Niño <nino@x.com>'
   $ hg --encoding utf-8 churn -ct '{author|person}'
   user1           4 **********************************************************
-  user3           3 ********************************************
+  user3           3 *******************************************
   user2           2 *****************************
   user4           2 *****************************
-  El Ni\xc3\xb1o         1 *************** (esc)
-  with space      1 ***************
+  El Ni\xc3\xb1o         1 ************** (esc)
+  with space      1 **************
 
 Test --template argument, with backwards compatibility
 
   $ hg churn -t '{author|user}'
   user1      4 ***************************************************************
   user3      3 ***********************************************
-  user2      2 ********************************
-  nino       1 ****************
-  with       1 ****************
+  user2      2 *******************************
+  nino       1 ***************
+  with       1 ***************
              0 
   user4      0 
   $ hg churn -T '{author|user}'
   user1      4 ***************************************************************
   user3      3 ***********************************************
-  user2      2 ********************************
-  nino       1 ****************
-  with       1 ****************
+  user2      2 *******************************
+  nino       1 ***************
+  with       1 ***************
              0 
   user4      0 
   $ hg churn -t 'alltogether'
--- a/tests/test-clone-cgi.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-clone-cgi.t	Mon Oct 22 14:46:06 2018 -0400
@@ -26,13 +26,13 @@
 
   $ . "$TESTDIR/cgienv"
   $ QUERY_STRING="cmd=changegroup&roots=0000000000000000000000000000000000000000"; export QUERY_STRING
-  $ $PYTHON hgweb.cgi >page1 2>&1
-  $ $PYTHON "$TESTDIR/md5sum.py" page1
+  $ "$PYTHON" hgweb.cgi >page1 2>&1
+  $ "$PYTHON" "$TESTDIR/md5sum.py" page1
   1f424bb22ec05c3c6bc866b6e67efe43  page1
 
 make sure headers are sent even when there is no body
 
-  $ QUERY_STRING="cmd=listkeys&namespace=nosuchnamespace" $PYTHON hgweb.cgi
+  $ QUERY_STRING="cmd=listkeys&namespace=nosuchnamespace" "$PYTHON" hgweb.cgi
   Status: 200 Script output follows\r (esc)
   Content-Type: application/mercurial-0.1\r (esc)
   Content-Length: 0\r (esc)
--- a/tests/test-clone-pull-corruption.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-clone-pull-corruption.t	Mon Oct 22 14:46:06 2018 -0400
@@ -48,6 +48,6 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
 
   $ cd ..
--- a/tests/test-clone-r.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-clone-r.t	Mon Oct 22 14:46:06 2018 -0400
@@ -37,7 +37,7 @@
   $ hg mv afile anotherfile
   $ hg commit -m "0.3m"
 
-  $ hg debugindex -f 1 afile
+  $ hg debugrevlogindex -f 1 afile
      rev flag     size   link     p1     p2       nodeid
        0 0000        2      0     -1     -1 362fef284ce2
        1 0000        4      1      0     -1 125144f7e028
@@ -71,7 +71,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 9 changesets, 7 total revisions
+  checked 9 changesets with 7 changes to 4 files
 
   $ cd ..
 
@@ -96,7 +96,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   
   ---- hg clone -r 1 test test-1
   adding changesets
@@ -110,7 +110,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
   
   ---- hg clone -r 2 test test-2
   adding changesets
@@ -124,7 +124,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
   
   ---- hg clone -r 3 test test-3
   adding changesets
@@ -138,7 +138,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 4 changesets, 4 total revisions
+  checked 4 changesets with 4 changes to 1 files
   
   ---- hg clone -r 4 test test-4
   adding changesets
@@ -152,7 +152,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
   
   ---- hg clone -r 5 test test-5
   adding changesets
@@ -166,7 +166,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
   
   ---- hg clone -r 6 test test-6
   adding changesets
@@ -180,7 +180,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 4 changesets, 5 total revisions
+  checked 4 changesets with 5 changes to 2 files
   
   ---- hg clone -r 7 test test-7
   adding changesets
@@ -194,7 +194,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 5 changesets, 6 total revisions
+  checked 5 changesets with 6 changes to 3 files
   
   ---- hg clone -r 8 test test-8
   adding changesets
@@ -208,7 +208,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 5 changesets, 5 total revisions
+  checked 5 changesets with 5 changes to 2 files
 
   $ cd test-8
   $ hg pull ../test-7
@@ -225,7 +225,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 9 changesets, 7 total revisions
+  checked 9 changesets with 7 changes to 4 files
   $ cd ..
 
   $ hg clone test test-9
--- a/tests/test-clone-uncompressed.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-clone-uncompressed.t	Mon Oct 22 14:46:06 2018 -0400
@@ -2,10 +2,10 @@
 
 #testcases stream-legacy stream-bundle2
 
-#if stream-bundle2
+#if stream-legacy
   $ cat << EOF >> $HGRCPATH
-  > [experimental]
-  > bundle2.stream = yes
+  > [server]
+  > bundle2.stream = no
   > EOF
 #endif
 
@@ -247,6 +247,7 @@
   sending stream_out command
   1027 files to transfer, 96.3 KB of data
   starting 4 threads for background file closing
+  updating the branch cache
   transferred 96.3 KB in * seconds (*/sec) (glob)
   query 1; heads
   sending batch command
@@ -260,6 +261,7 @@
   bundle2-input-part: total payload size 24
   bundle2-input-bundle: 1 parts total
   checking for updated bookmarks
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
 #endif
 #if stream-bundle2
   $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
@@ -275,11 +277,13 @@
   1030 files to transfer, 96.4 KB of data
   starting 4 threads for background file closing
   starting 4 threads for background file closing
+  updating the branch cache
   transferred 96.4 KB in * seconds (* */sec) (glob)
   bundle2-input-part: total payload size 112077
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
   bundle2-input-bundle: 1 parts total
   checking for updated bookmarks
+  (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
 #endif
 
 Cannot stream clone when there are secret changesets
@@ -510,3 +514,54 @@
 #endif
 
   $ killdaemons.py
+
+#if stream-legacy
+
+With v1 of the stream protocol, changeset are always cloned as public. There's
+no obsolescence markers exchange in stream v1.
+
+#endif
+#if stream-bundle2
+
+Stream repository with obsolescence
+-----------------------------------
+
+Clone non-publishing with obsolescence
+
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution=all
+  > EOF
+
+  $ cd server
+  $ echo foo > foo
+  $ hg -q commit -m 'about to be pruned'
+  $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
+  obsoleted 1 changesets
+  $ hg up null -q
+  $ hg log -T '{rev}: {phase}\n'
+  1: draft
+  0: draft
+  $ hg serve -p $HGPORT -d --pid-file=hg.pid
+  $ cat hg.pid > $DAEMON_PIDS
+  $ cd ..
+
+  $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
+  streaming all changes
+  1035 files to transfer, 97.1 KB of data
+  transferred 97.1 KB in * seconds (* */sec) (glob)
+  $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
+  1: draft
+  0: draft
+  $ hg debugobsolete -R with-obsolescence
+  50382b884f66690b7045cac93a540cba4d4c906f 0 {c17445101a72edac06facd130d14808dfbd5c7c2} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+  $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
+  streaming all changes
+  remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
+  abort: pull failed on remote
+  [255]
+
+  $ killdaemons.py
+
+#endif
--- a/tests/test-clone.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-clone.t	Mon Oct 22 14:46:06 2018 -0400
@@ -20,7 +20,7 @@
 
 Create a non-inlined filelog:
 
-  $ $PYTHON -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
+  $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
   $ for j in 0 1 2 3 4 5 6 7 8 9; do
   >   cat data1 >> b
   >   hg commit -m test
@@ -47,6 +47,7 @@
   checklink (symlink !)
   checklink-target (symlink !)
   checknoexec (execbit !)
+  manifestfulltextcache (reporevlogstore !)
   rbc-names-v1
   rbc-revs-v1
 
@@ -74,7 +75,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 11 changesets, 11 total revisions
+  checked 11 changesets with 11 changes to 2 files
 
 Invalid dest '' must abort:
 
@@ -145,7 +146,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 11 changesets, 11 total revisions
+  checked 11 changesets with 11 changes to 2 files
 
 Default destination:
 
@@ -190,7 +191,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 11 changesets, 11 total revisions
+  checked 11 changesets with 11 changes to 2 files
 
 Invalid dest '' with --pull must abort (issue2528):
 
@@ -557,27 +558,27 @@
 iterable in addbranchrevs()
 
   $ cat <<EOF > simpleclone.py
-  > from mercurial import ui, hg
-  > myui = ui.ui.load()
+  > from mercurial import hg, ui as uimod
+  > myui = uimod.ui.load()
   > repo = hg.repository(myui, b'a')
   > hg.clone(myui, {}, repo, dest=b"ua")
   > EOF
 
-  $ $PYTHON simpleclone.py
+  $ "$PYTHON" simpleclone.py
   updating to branch default
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
   $ rm -r ua
 
   $ cat <<EOF > branchclone.py
-  > from mercurial import ui, hg, extensions
-  > myui = ui.ui.load()
+  > from mercurial import extensions, hg, ui as uimod
+  > myui = uimod.ui.load()
   > extensions.loadall(myui)
   > repo = hg.repository(myui, b'a')
   > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable",])
   > EOF
 
-  $ $PYTHON branchclone.py
+  $ "$PYTHON" branchclone.py
   adding changesets
   adding manifests
   adding file changes
@@ -641,7 +642,7 @@
   $ mkdir a
   $ chmod 000 a
   $ hg clone a b
-  abort: repository a not found!
+  abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
   [255]
 
 Inaccessible destination
@@ -649,7 +650,7 @@
   $ hg init b
   $ cd b
   $ hg clone . ../a
-  abort: Permission denied: '../a'
+  abort: Permission denied: *../a* (glob)
   [255]
   $ cd ..
   $ chmod 700 a
@@ -664,7 +665,7 @@
 
   $ mkfifo a
   $ hg clone a b
-  abort: repository a not found!
+  abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
   [255]
   $ rm a
 
@@ -1176,14 +1177,14 @@
 #if windows
   $ hg clone "ssh://%26touch%20owned%20/" --debug
   running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
-  sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
+  sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
   sending hello command
   sending between command
   abort: no suitable response from remote hg!
   [255]
   $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
   running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
-  sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
+  sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
   sending hello command
   sending between command
   abort: no suitable response from remote hg!
@@ -1191,14 +1192,14 @@
 #else
   $ hg clone "ssh://%3btouch%20owned%20/" --debug
   running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
-  sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
+  sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
   sending hello command
   sending between command
   abort: no suitable response from remote hg!
   [255]
   $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
   running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
-  sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
+  sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
   sending hello command
   sending between command
   abort: no suitable response from remote hg!
@@ -1207,7 +1208,7 @@
 
   $ hg clone "ssh://v-alid.example.com/" --debug
   running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
-  sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
+  sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
   sending hello command
   sending between command
   abort: no suitable response from remote hg!
--- a/tests/test-clonebundles.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-clonebundles.t	Mon Oct 22 14:46:06 2018 -0400
@@ -27,6 +27,7 @@
   adding file changes
   added 2 changesets with 2 changes to 2 files
   new changesets 53245c60e682:aaff8d2ffbbf
+  (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
 
   $ cat server/access.log
   * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
@@ -45,6 +46,7 @@
   adding file changes
   added 2 changesets with 2 changes to 2 files
   new changesets 53245c60e682:aaff8d2ffbbf
+  (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
 
 Manifest file with invalid URL aborts
 
@@ -52,7 +54,7 @@
   $ hg clone http://localhost:$HGPORT 404-url
   applying clone bundle from http://does.not.exist/bundle.hg
   error fetching bundle: (.* not known|(\[Errno -?\d+])? No address associated with hostname) (re) (no-windows !)
-  error fetching bundle: [Errno 11004] getaddrinfo failed (windows !)
+  error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
   abort: error applying bundle
   (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
   [255]
@@ -465,10 +467,8 @@
   no compatible clone bundles available on server; falling back to regular clone
   (you may want to report this to the server operator)
   streaming all changes
-  4 files to transfer, 613 bytes of data
-  transferred 613 bytes in * seconds (*) (glob)
-  searching for changes
-  no changes found
+  9 files to transfer, 816 bytes of data
+  transferred 816 bytes in * seconds (*) (glob)
 
 A manifest with a stream clone but no BUNDLESPEC
 
@@ -480,10 +480,8 @@
   no compatible clone bundles available on server; falling back to regular clone
   (you may want to report this to the server operator)
   streaming all changes
-  4 files to transfer, 613 bytes of data
-  transferred 613 bytes in * seconds (*) (glob)
-  searching for changes
-  no changes found
+  9 files to transfer, 816 bytes of data
+  transferred 816 bytes in * seconds (*) (glob)
 
 A manifest with a gzip bundle and a stream clone
 
@@ -526,10 +524,8 @@
   no compatible clone bundles available on server; falling back to regular clone
   (you may want to report this to the server operator)
   streaming all changes
-  4 files to transfer, 613 bytes of data
-  transferred 613 bytes in * seconds (*) (glob)
-  searching for changes
-  no changes found
+  9 files to transfer, 816 bytes of data
+  transferred 816 bytes in * seconds (*) (glob)
 
 Test clone bundle retrieved through bundle2
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-close-head.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,74 @@
+  $ hg init test-content
+  $ cd test-content
+  $ hg debugbuilddag '+2*2*3*4+7'
+  $ hg bookmark -r 1 @
+  $ hg log -G --template '{rev}:{node|short}'
+  o  11:1d876b1f862c
+  |
+  o  10:ea5f71948eb8
+  |
+  o  9:f1b0356d867a
+  |
+  o  8:e8d1253fb0d7
+  |
+  o  7:d423bbba4459
+  |
+  o  6:a2f58e9c1e56
+  |
+  o  5:3a367db1fabc
+  |
+  o  4:e7bd5218ca15
+  |
+  | o  3:6100d3090acf
+  |/
+  | o  2:fa942426a6fd
+  |/
+  | o  1:66f7d451a68b
+  |/
+  o  0:1ea73414a91b
+  
+  $ hg --config extensions.closehead= close-head -m 'Not a head' 0 1
+  abort: revision is not an open head: 0
+  [255]
+  $ hg --config extensions.closehead= close-head -m 'Not a head' -r 0 1
+  abort: revision is not an open head: 0
+  [255]
+  $ hg --config extensions.closehead= close-head -m 'Close old heads' -r 1 2
+  $ hg bookmark
+     @                         1:66f7d451a68b
+  $ hg heads
+  changeset:   11:1d876b1f862c
+  user:        debugbuilddag
+  date:        Thu Jan 01 00:00:11 1970 +0000
+  summary:     r11
+  
+  changeset:   3:6100d3090acf
+  parent:      0:1ea73414a91b
+  user:        debugbuilddag
+  date:        Thu Jan 01 00:00:03 1970 +0000
+  summary:     r3
+  
+  $ hg --config extensions.closehead= close-head -m 'Close more old heads' -r 11
+  $ hg heads
+  changeset:   3:6100d3090acf
+  parent:      0:1ea73414a91b
+  user:        debugbuilddag
+  date:        Thu Jan 01 00:00:03 1970 +0000
+  summary:     r3
+  
+  $ hg --config extensions.closehead= close-head -m 'Not a head' 0
+  abort: revision is not an open head: 0
+  [255]
+  $ hg --config extensions.closehead= close-head -m 'Already closed head' 1
+  abort: revision is not an open head: 1
+  [255]
+
+  $ hg init ../test-empty
+  $ cd ../test-empty
+  $ hg debugbuilddag '+1'
+  $ hg log -G --template '{rev}:{node|short}'
+  o  0:1ea73414a91b
+  
+  $ hg --config extensions.closehead= close-head -m 'Close initial revision' 0
+  $ hg heads
+  [1]
--- a/tests/test-commandserver.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-commandserver.t	Mon Oct 22 14:46:06 2018 -0400
@@ -13,17 +13,17 @@
   $ hg init repo
   $ cd repo
 
-  >>> from __future__ import absolute_import, print_function
+  >>> from __future__ import absolute_import
   >>> import os
   >>> import sys
-  >>> from hgclient import check, readchannel, runcommand
+  >>> from hgclient import bprint, check, readchannel, runcommand
   >>> @check
   ... def hellomessage(server):
   ...     ch, data = readchannel(server)
-  ...     print('%c, %r' % (ch, data))
+  ...     bprint(b'%c, %r' % (ch, data))
   ...     # run an arbitrary command to make sure the next thing the server
   ...     # sends isn't part of the hello message
-  ...     runcommand(server, ['id'])
+  ...     runcommand(server, [b'id'])
   o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
   *** runcommand id
   000000000000 tip
@@ -31,7 +31,7 @@
   >>> from hgclient import check
   >>> @check
   ... def unknowncommand(server):
-  ...     server.stdin.write('unknowncommand\n')
+  ...     server.stdin.write(b'unknowncommand\n')
   abort: unknown command unknowncommand
 
   >>> from hgclient import check, readchannel, runcommand
@@ -44,19 +44,19 @@
   ...     runcommand(server, [])
   ... 
   ...     # global options
-  ...     runcommand(server, ['id', '--quiet'])
+  ...     runcommand(server, [b'id', b'--quiet'])
   ... 
   ...     # make sure global options don't stick through requests
-  ...     runcommand(server, ['id'])
+  ...     runcommand(server, [b'id'])
   ... 
   ...     # --config
-  ...     runcommand(server, ['id', '--config', 'ui.quiet=True'])
+  ...     runcommand(server, [b'id', b'--config', b'ui.quiet=True'])
   ... 
   ...     # make sure --config doesn't stick
-  ...     runcommand(server, ['id'])
+  ...     runcommand(server, [b'id'])
   ... 
   ...     # negative return code should be masked
-  ...     runcommand(server, ['id', '-runknown'])
+  ...     runcommand(server, [b'id', b'-runknown'])
   *** runcommand 
   Mercurial Distributed SCM
   
@@ -93,16 +93,16 @@
   abort: unknown revision 'unknown'!
    [255]
 
-  >>> from hgclient import check, readchannel
+  >>> from hgclient import bprint, check, readchannel
   >>> @check
   ... def inputeof(server):
   ...     readchannel(server)
-  ...     server.stdin.write('runcommand\n')
+  ...     server.stdin.write(b'runcommand\n')
   ...     # close stdin while server is waiting for input
   ...     server.stdin.close()
   ... 
   ...     # server exits with 1 if the pipe closed while reading the command
-  ...     print('server exit code =', server.wait())
+  ...     bprint(b'server exit code =', b'%d' % server.wait())
   server exit code = 1
 
   >>> from hgclient import check, readchannel, runcommand, stringio
@@ -110,7 +110,7 @@
   ... def serverinput(server):
   ...     readchannel(server)
   ... 
-  ...     patch = """
+  ...     patch = b"""
   ... # HG changeset patch
   ... # User test
   ... # Date 0 0
@@ -125,8 +125,8 @@
   ... +1
   ... """
   ... 
-  ...     runcommand(server, ['import', '-'], input=stringio(patch))
-  ...     runcommand(server, ['log'])
+  ...     runcommand(server, [b'import', b'-'], input=stringio(patch))
+  ...     runcommand(server, [b'log'])
   *** runcommand import -
   applying patch from stdin
   *** runcommand log
@@ -145,23 +145,22 @@
   >>> @check
   ... def cwd(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['log', '-b', '--config=alias.log=!echo pwned',
-  ...                         'default'])
+  ...     runcommand(server, [b'log', b'-b', b'--config=alias.log=!echo pwned',
+  ...                         b'default'])
   *** runcommand log -b --config=alias.log=!echo pwned default
   abort: unknown revision '--config=alias.log=!echo pwned'!
    [255]
 
 check that "histedit --commands=-" can read rules from the input channel:
 
-  >>> import cStringIO
-  >>> from hgclient import check, readchannel, runcommand
+  >>> from hgclient import check, readchannel, runcommand, stringio
   >>> @check
   ... def serverinput(server):
   ...     readchannel(server)
-  ...     rules = 'pick eff892de26ec\n'
-  ...     runcommand(server, ['histedit', '0', '--commands=-',
-  ...                         '--config', 'extensions.histedit='],
-  ...                input=cStringIO.StringIO(rules))
+  ...     rules = b'pick eff892de26ec\n'
+  ...     runcommand(server, [b'histedit', b'0', b'--commands=-',
+  ...                         b'--config', b'extensions.histedit='],
+  ...                input=stringio(rules))
   *** runcommand histedit 0 --commands=- --config extensions.histedit=
 
 check that --cwd doesn't persist between requests:
@@ -172,8 +171,8 @@
   >>> @check
   ... def cwd(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['--cwd', 'foo', 'st', 'bar'])
-  ...     runcommand(server, ['st', 'foo/bar'])
+  ...     runcommand(server, [b'--cwd', b'foo', b'st', b'bar'])
+  ...     runcommand(server, [b'st', b'foo/bar'])
   *** runcommand --cwd foo st bar
   ? bar
   *** runcommand st foo/bar
@@ -198,11 +197,11 @@
   ... 
   ...     # the cached repo local hgrc contains ui.foo=bar, so showconfig should
   ...     # show it
-  ...     runcommand(server, ['showconfig'], outfilter=sep)
+  ...     runcommand(server, [b'showconfig'], outfilter=sep)
   ... 
   ...     # but not for this repo
-  ...     runcommand(server, ['init', 'foo'])
-  ...     runcommand(server, ['-R', 'foo', 'showconfig', 'ui', 'defaults'])
+  ...     runcommand(server, [b'init', b'foo'])
+  ...     runcommand(server, [b'-R', b'foo', b'showconfig', b'ui', b'defaults'])
   *** runcommand showconfig
   bundle.mainreporoot=$TESTTMP/repo
   devel.all-warnings=true
@@ -235,21 +234,21 @@
 #endif
 
   $ cat <<EOF > hook.py
-  > from __future__ import print_function
   > import sys
+  > from hgclient import bprint
   > def hook(**args):
-  >     print('hook talking')
-  >     print('now try to read something: %r' % sys.stdin.read())
+  >     bprint(b'hook talking')
+  >     bprint(b'now try to read something: %r' % sys.stdin.read())
   > EOF
 
   >>> from hgclient import check, readchannel, runcommand, stringio
   >>> @check
   ... def hookoutput(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['--config',
-  ...                         'hooks.pre-identify=python:hook.hook',
-  ...                         'id'],
-  ...                input=stringio('some input'))
+  ...     runcommand(server, [b'--config',
+  ...                         b'hooks.pre-identify=python:hook.hook',
+  ...                         b'id'],
+  ...                input=stringio(b'some input'))
   *** runcommand --config hooks.pre-identify=python:hook.hook id
   eff892de26ec tip
   hook talking
@@ -265,10 +264,10 @@
   >>> @check
   ... def outsidechanges(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['status'])
+  ...     runcommand(server, [b'status'])
   ...     os.system('hg ci -Am2')
-  ...     runcommand(server, ['tip'])
-  ...     runcommand(server, ['status'])
+  ...     runcommand(server, [b'tip'])
+  ...     runcommand(server, [b'status'])
   *** runcommand status
   M a
   *** runcommand tip
@@ -281,28 +280,28 @@
   *** runcommand status
 
   >>> import os
-  >>> from hgclient import check, readchannel, runcommand
+  >>> from hgclient import bprint, check, readchannel, runcommand
   >>> @check
   ... def bookmarks(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['bookmarks'])
+  ...     runcommand(server, [b'bookmarks'])
   ... 
   ...     # changes .hg/bookmarks
   ...     os.system('hg bookmark -i bm1')
   ...     os.system('hg bookmark -i bm2')
-  ...     runcommand(server, ['bookmarks'])
+  ...     runcommand(server, [b'bookmarks'])
   ... 
   ...     # changes .hg/bookmarks.current
   ...     os.system('hg upd bm1 -q')
-  ...     runcommand(server, ['bookmarks'])
+  ...     runcommand(server, [b'bookmarks'])
   ... 
-  ...     runcommand(server, ['bookmarks', 'bm3'])
+  ...     runcommand(server, [b'bookmarks', b'bm3'])
   ...     f = open('a', 'ab')
-  ...     f.write('a\n')
+  ...     f.write(b'a\n') and None
   ...     f.close()
-  ...     runcommand(server, ['commit', '-Amm'])
-  ...     runcommand(server, ['bookmarks'])
-  ...     print('')
+  ...     runcommand(server, [b'commit', b'-Amm'])
+  ...     runcommand(server, [b'bookmarks'])
+  ...     bprint(b'')
   *** runcommand bookmarks
   no bookmarks set
   *** runcommand bookmarks
@@ -324,9 +323,9 @@
   >>> @check
   ... def tagscache(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['id', '-t', '-r', '0'])
+  ...     runcommand(server, [b'id', b'-t', b'-r', b'0'])
   ...     os.system('hg tag -r 0 foo')
-  ...     runcommand(server, ['id', '-t', '-r', '0'])
+  ...     runcommand(server, [b'id', b'-t', b'-r', b'0'])
   *** runcommand id -t -r 0
   
   *** runcommand id -t -r 0
@@ -337,24 +336,24 @@
   >>> @check
   ... def setphase(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['phase', '-r', '.'])
+  ...     runcommand(server, [b'phase', b'-r', b'.'])
   ...     os.system('hg phase -r . -p')
-  ...     runcommand(server, ['phase', '-r', '.'])
+  ...     runcommand(server, [b'phase', b'-r', b'.'])
   *** runcommand phase -r .
   3: draft
   *** runcommand phase -r .
   3: public
 
   $ echo a >> a
-  >>> from hgclient import check, readchannel, runcommand
+  >>> from hgclient import bprint, check, readchannel, runcommand
   >>> @check
   ... def rollback(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['phase', '-r', '.', '-p'])
-  ...     runcommand(server, ['commit', '-Am.'])
-  ...     runcommand(server, ['rollback'])
-  ...     runcommand(server, ['phase', '-r', '.'])
-  ...     print('')
+  ...     runcommand(server, [b'phase', b'-r', b'.', b'-p'])
+  ...     runcommand(server, [b'commit', b'-Am.'])
+  ...     runcommand(server, [b'rollback'])
+  ...     runcommand(server, [b'phase', b'-r', b'.'])
+  ...     bprint(b'')
   *** runcommand phase -r . -p
   no phases changed
   *** runcommand commit -Am.
@@ -370,9 +369,9 @@
   >>> @check
   ... def branch(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['branch'])
+  ...     runcommand(server, [b'branch'])
   ...     os.system('hg branch foo')
-  ...     runcommand(server, ['branch'])
+  ...     runcommand(server, [b'branch'])
   ...     os.system('hg branch default')
   *** runcommand branch
   default
@@ -385,19 +384,19 @@
 
   $ touch .hgignore
   >>> import os
-  >>> from hgclient import check, readchannel, runcommand
+  >>> from hgclient import bprint, check, readchannel, runcommand
   >>> @check
   ... def hgignore(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['commit', '-Am.'])
+  ...     runcommand(server, [b'commit', b'-Am.'])
   ...     f = open('ignored-file', 'ab')
-  ...     f.write('')
+  ...     f.write(b'') and None
   ...     f.close()
   ...     f = open('.hgignore', 'ab')
-  ...     f.write('ignored-file')
+  ...     f.write(b'ignored-file')
   ...     f.close()
-  ...     runcommand(server, ['status', '-i', '-u'])
-  ...     print('')
+  ...     runcommand(server, [b'status', b'-i', b'-u'])
+  ...     bprint(b'')
   *** runcommand commit -Am.
   adding .hgignore
   *** runcommand status -i -u
@@ -408,22 +407,22 @@
 (issue4855):
 
   >>> import os
-  >>> from hgclient import check, readchannel, runcommand
+  >>> from hgclient import bprint, check, readchannel, runcommand
   >>> @check
   ... def phasesetscacheaftercommit(server):
   ...     readchannel(server)
   ...     # load _phasecache._phaserevs and _phasesets
-  ...     runcommand(server, ['log', '-qr', 'draft()'])
+  ...     runcommand(server, [b'log', b'-qr', b'draft()'])
   ...     # create draft commits by another process
   ...     for i in range(5, 7):
   ...         f = open('a', 'ab')
   ...         f.seek(0, os.SEEK_END)
-  ...         f.write('a\n')
+  ...         f.write(b'a\n') and None
   ...         f.close()
   ...         os.system('hg commit -Aqm%d' % i)
   ...     # new commits should be listed as draft revisions
-  ...     runcommand(server, ['log', '-qr', 'draft()'])
-  ...     print('')
+  ...     runcommand(server, [b'log', b'-qr', b'draft()'])
+  ...     bprint(b'')
   *** runcommand log -qr draft()
   4:7966c8e3734d
   *** runcommand log -qr draft()
@@ -433,17 +432,17 @@
   
 
   >>> import os
-  >>> from hgclient import check, readchannel, runcommand
+  >>> from hgclient import bprint, check, readchannel, runcommand
   >>> @check
   ... def phasesetscacheafterstrip(server):
   ...     readchannel(server)
   ...     # load _phasecache._phaserevs and _phasesets
-  ...     runcommand(server, ['log', '-qr', 'draft()'])
+  ...     runcommand(server, [b'log', b'-qr', b'draft()'])
   ...     # strip cached revisions by another process
   ...     os.system('hg --config extensions.strip= strip -q 5')
   ...     # shouldn't abort by "unknown revision '6'"
-  ...     runcommand(server, ['log', '-qr', 'draft()'])
-  ...     print('')
+  ...     runcommand(server, [b'log', b'-qr', b'draft()'])
+  ...     bprint(b'')
   *** runcommand log -qr draft()
   4:7966c8e3734d
   5:41f6602d1c4f
@@ -461,23 +460,23 @@
   ...     readchannel(server)
   ... 
   ...     # create new head, 5:731265503d86
-  ...     runcommand(server, ['update', '-C', '0'])
+  ...     runcommand(server, [b'update', b'-C', b'0'])
   ...     f = open('a', 'ab')
-  ...     f.write('a\n')
+  ...     f.write(b'a\n') and None
   ...     f.close()
-  ...     runcommand(server, ['commit', '-Am.', 'a'])
-  ...     runcommand(server, ['log', '-Gq'])
+  ...     runcommand(server, [b'commit', b'-Am.', b'a'])
+  ...     runcommand(server, [b'log', b'-Gq'])
   ... 
   ...     # make it public; draft marker moves to 4:7966c8e3734d
-  ...     runcommand(server, ['phase', '-p', '.'])
+  ...     runcommand(server, [b'phase', b'-p', b'.'])
   ...     # load _phasecache.phaseroots
-  ...     runcommand(server, ['phase', '.'], outfilter=sep)
+  ...     runcommand(server, [b'phase', b'.'], outfilter=sep)
   ... 
   ...     # strip 1::4 outside server
   ...     os.system('hg -q --config extensions.mq= strip 1')
   ... 
   ...     # shouldn't raise "7966c8e3734d: no node!"
-  ...     runcommand(server, ['branches'])
+  ...     runcommand(server, [b'branches'])
   *** runcommand update -C 0
   1 files updated, 0 files merged, 2 files removed, 0 files unresolved
   (leaving bookmark bm3)
@@ -510,9 +509,9 @@
   >>> @check
   ... def txabort(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['commit', '--config', 'hooks.pretxncommit=false',
-  ...                         '-mfoo'])
-  ...     runcommand(server, ['verify'])
+  ...     runcommand(server, [b'commit', b'--config', b'hooks.pretxncommit=false',
+  ...                         b'-mfoo'])
+  ...     runcommand(server, [b'verify'])
   *** runcommand commit --config hooks.pretxncommit=false -mfoo
   transaction abort!
   rollback completed
@@ -523,7 +522,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
   $ hg revert --no-backup -aq
 
   $ cat >> .hg/hgrc << EOF
@@ -537,14 +536,14 @@
   ... def obsolete(server):
   ...     readchannel(server)
   ... 
-  ...     runcommand(server, ['up', 'null'])
-  ...     runcommand(server, ['phase', '-df', 'tip'])
+  ...     runcommand(server, [b'up', b'null'])
+  ...     runcommand(server, [b'phase', b'-df', b'tip'])
   ...     cmd = 'hg debugobsolete `hg log -r tip --template {node}`'
   ...     if os.name == 'nt':
   ...         cmd = 'sh -c "%s"' % cmd # run in sh, not cmd.exe
   ...     os.system(cmd)
-  ...     runcommand(server, ['log', '--hidden'])
-  ...     runcommand(server, ['log'])
+  ...     runcommand(server, [b'log', b'--hidden'])
+  ...     runcommand(server, [b'log'])
   *** runcommand up null
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   *** runcommand phase -df tip
@@ -588,15 +587,15 @@
   ...     readchannel(server)
   ... 
   ...     # load repo.mq
-  ...     runcommand(server, ['qapplied'])
+  ...     runcommand(server, [b'qapplied'])
   ...     os.system('hg qnew 0.diff')
   ...     # repo.mq should be invalidated
-  ...     runcommand(server, ['qapplied'])
+  ...     runcommand(server, [b'qapplied'])
   ... 
-  ...     runcommand(server, ['qpop', '--all'])
+  ...     runcommand(server, [b'qpop', b'--all'])
   ...     os.system('hg qqueue --create foo')
   ...     # repo.mq should be recreated to point to new queue
-  ...     runcommand(server, ['qqueue', '--active'])
+  ...     runcommand(server, [b'qqueue', b'--active'])
   *** runcommand qapplied
   *** runcommand qapplied
   0.diff
@@ -614,16 +613,16 @@
   > command = registrar.command(cmdtable)
   > @command(b"debuggetpass", norepo=True)
   > def debuggetpass(ui):
-  >     ui.write("%s\\n" % ui.getpass())
+  >     ui.write(b"%s\\n" % ui.getpass())
   > @command(b"debugprompt", norepo=True)
   > def debugprompt(ui):
-  >     ui.write("%s\\n" % ui.prompt("prompt:"))
+  >     ui.write(b"%s\\n" % ui.prompt(b"prompt:"))
   > @command(b"debugreadstdin", norepo=True)
   > def debugreadstdin(ui):
-  >     ui.write("read: %r\n" % sys.stdin.read(1))
+  >     ui.write(b"read: %r\n" % sys.stdin.read(1))
   > @command(b"debugwritestdout", norepo=True)
   > def debugwritestdout(ui):
-  >     os.write(1, "low-level stdout fd and\n")
+  >     os.write(1, b"low-level stdout fd and\n")
   >     sys.stdout.write("stdout should be redirected to stderr\n")
   >     sys.stdout.flush()
   > EOF
@@ -636,20 +635,20 @@
   >>> @check
   ... def getpass(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['debuggetpass', '--config',
-  ...                         'ui.interactive=True'],
-  ...                input=stringio('1234\n'))
-  ...     runcommand(server, ['debuggetpass', '--config',
-  ...                         'ui.interactive=True'],
-  ...                input=stringio('\n'))
-  ...     runcommand(server, ['debuggetpass', '--config',
-  ...                         'ui.interactive=True'],
-  ...                input=stringio(''))
-  ...     runcommand(server, ['debugprompt', '--config',
-  ...                         'ui.interactive=True'],
-  ...                input=stringio('5678\n'))
-  ...     runcommand(server, ['debugreadstdin'])
-  ...     runcommand(server, ['debugwritestdout'])
+  ...     runcommand(server, [b'debuggetpass', b'--config',
+  ...                         b'ui.interactive=True'],
+  ...                input=stringio(b'1234\n'))
+  ...     runcommand(server, [b'debuggetpass', b'--config',
+  ...                         b'ui.interactive=True'],
+  ...                input=stringio(b'\n'))
+  ...     runcommand(server, [b'debuggetpass', b'--config',
+  ...                         b'ui.interactive=True'],
+  ...                input=stringio(b''))
+  ...     runcommand(server, [b'debugprompt', b'--config',
+  ...                         b'ui.interactive=True'],
+  ...                input=stringio(b'5678\n'))
+  ...     runcommand(server, [b'debugreadstdin'])
+  ...     runcommand(server, [b'debugwritestdout'])
   *** runcommand debuggetpass --config ui.interactive=True
   password: 1234
   *** runcommand debuggetpass --config ui.interactive=True
@@ -668,19 +667,18 @@
 
 run commandserver in commandserver, which is silly but should work:
 
-  >>> from __future__ import print_function
-  >>> from hgclient import check, readchannel, runcommand, stringio
+  >>> from hgclient import bprint, check, readchannel, runcommand, stringio
   >>> @check
   ... def nested(server):
-  ...     print('%c, %r' % readchannel(server))
+  ...     bprint(b'%c, %r' % readchannel(server))
   ...     class nestedserver(object):
-  ...         stdin = stringio('getencoding\n')
+  ...         stdin = stringio(b'getencoding\n')
   ...         stdout = stringio()
-  ...     runcommand(server, ['serve', '--cmdserver', 'pipe'],
+  ...     runcommand(server, [b'serve', b'--cmdserver', b'pipe'],
   ...                output=nestedserver.stdout, input=nestedserver.stdin)
   ...     nestedserver.stdout.seek(0)
-  ...     print('%c, %r' % readchannel(nestedserver))  # hello
-  ...     print('%c, %r' % readchannel(nestedserver))  # getencoding
+  ...     bprint(b'%c, %r' % readchannel(nestedserver))  # hello
+  ...     bprint(b'%c, %r' % readchannel(nestedserver))  # getencoding
   o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
   *** runcommand serve --cmdserver pipe
   o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
@@ -691,15 +689,14 @@
 
   $ cd ..
 
-  >>> from __future__ import print_function
-  >>> from hgclient import check, readchannel, runcommand
+  >>> from hgclient import bprint, check, readchannel, runcommand
   >>> @check
   ... def hellomessage(server):
   ...     ch, data = readchannel(server)
-  ...     print('%c, %r' % (ch, data))
+  ...     bprint(b'%c, %r' % (ch, data))
   ...     # run an arbitrary command to make sure the next thing the server
   ...     # sends isn't part of the hello message
-  ...     runcommand(server, ['id'])
+  ...     runcommand(server, [b'id'])
   o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
   *** runcommand id
   abort: there is no Mercurial repository here (.hg not found)
@@ -709,8 +706,8 @@
   >>> @check
   ... def startwithoutrepo(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['init', 'repo2'])
-  ...     runcommand(server, ['id', '-R', 'repo2'])
+  ...     runcommand(server, [b'init', b'repo2'])
+  ...     runcommand(server, [b'id', b'-R', b'repo2'])
   *** runcommand init repo2
   *** runcommand id -R repo2
   000000000000 tip
@@ -732,24 +729,23 @@
 
 #if unix-socket unix-permissions
 
-  >>> from __future__ import print_function
-  >>> from hgclient import check, readchannel, runcommand, stringio, unixserver
-  >>> server = unixserver('.hg/server.sock', '.hg/server.log')
+  >>> from hgclient import bprint, check, readchannel, runcommand, stringio, unixserver
+  >>> server = unixserver(b'.hg/server.sock', b'.hg/server.log')
   >>> def hellomessage(conn):
   ...     ch, data = readchannel(conn)
-  ...     print('%c, %r' % (ch, data))
-  ...     runcommand(conn, ['id'])
+  ...     bprint(b'%c, %r' % (ch, data))
+  ...     runcommand(conn, [b'id'])
   >>> check(hellomessage, server.connect)
   o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
   *** runcommand id
   eff892de26ec tip bm1/bm2/bm3
   >>> def unknowncommand(conn):
   ...     readchannel(conn)
-  ...     conn.stdin.write('unknowncommand\n')
+  ...     conn.stdin.write(b'unknowncommand\n')
   >>> check(unknowncommand, server.connect)  # error sent to server.log
   >>> def serverinput(conn):
   ...     readchannel(conn)
-  ...     patch = """
+  ...     patch = b"""
   ... # HG changeset patch
   ... # User test
   ... # Date 0 0
@@ -762,8 +758,8 @@
   ...  1
   ... +2
   ... """
-  ...     runcommand(conn, ['import', '-'], input=stringio(patch))
-  ...     runcommand(conn, ['log', '-rtip', '-q'])
+  ...     runcommand(conn, [b'import', b'-'], input=stringio(patch))
+  ...     runcommand(conn, [b'log', b'-rtip', b'-q'])
   >>> check(serverinput, server.connect)
   *** runcommand import -
   applying patch from stdin
@@ -784,26 +780,26 @@
   > [cmdserver]
   > log = inexistent/path.log
   > EOF
-  >>> from __future__ import print_function
-  >>> from hgclient import check, readchannel, unixserver
-  >>> server = unixserver('.hg/server.sock', '.hg/server.log')
+  >>> from hgclient import bprint, check, readchannel, unixserver
+  >>> server = unixserver(b'.hg/server.sock', b'.hg/server.log')
   >>> def earlycrash(conn):
   ...     while True:
   ...         try:
   ...             ch, data = readchannel(conn)
-  ...             if not data.startswith('  '):
-  ...                 print('%c, %r' % (ch, data))
+  ...             for l in data.splitlines(True):
+  ...                 if not l.startswith(b'  '):
+  ...                     bprint(b'%c, %r' % (ch, l))
   ...         except EOFError:
   ...             break
   >>> check(earlycrash, server.connect)
   e, 'Traceback (most recent call last):\n'
-  e, "IOError: *" (glob)
+  e, "(IOError|FileNotFoundError): .*" (re)
   >>> server.shutdown()
 
   $ cat .hg/server.log | grep -v '^  '
   listening at .hg/server.sock
   Traceback (most recent call last):
-  IOError: * (glob)
+  (IOError|FileNotFoundError): .* (re)
   killed!
 #endif
 #if no-unix-socket
@@ -835,19 +831,19 @@
   > command = registrar.command(cmdtable)
   > configtable = {}
   > configitem = registrar.configitem(configtable)
-  > configitem('failafterfinalize', 'fail',
+  > configitem(b'failafterfinalize', b'fail',
   >     default=None,
   > )
   > def fail(tr):
-  >     raise error.Abort('fail after finalization')
+  >     raise error.Abort(b'fail after finalization')
   > def reposetup(ui, repo):
   >     class failrepo(repo.__class__):
   >         def commitctx(self, ctx, error=False):
-  >             if self.ui.configbool('failafterfinalize', 'fail'):
+  >             if self.ui.configbool(b'failafterfinalize', b'fail'):
   >                 # 'sorted()' by ASCII code on category names causes
   >                 # invoking 'fail' after finalization of changelog
   >                 # using "'cl-%i' % id(self)" as category name
-  >                 self.currenttransaction().addfinalize('zzzzzzzz', fail)
+  >                 self.currenttransaction().addfinalize(b'zzzzzzzz', fail)
   >             return super(failrepo, self).commitctx(ctx, error)
   >     repo.__class__ = failrepo
   > EOF
@@ -874,11 +870,11 @@
   >>> @check
   ... def abort(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['commit',
-  ...                         '--config', 'hooks.pretxncommit=false',
-  ...                         '-mfoo'])
-  ...     runcommand(server, ['log'])
-  ...     runcommand(server, ['verify', '-q'])
+  ...     runcommand(server, [b'commit',
+  ...                         b'--config', b'hooks.pretxncommit=false',
+  ...                         b'-mfoo'])
+  ...     runcommand(server, [b'log'])
+  ...     runcommand(server, [b'verify', b'-q'])
   *** runcommand commit --config hooks.pretxncommit=false -mfoo
   transaction abort!
   rollback completed
@@ -893,11 +889,11 @@
   >>> @check
   ... def abort(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['commit',
-  ...                         '--config', 'failafterfinalize.fail=true',
-  ...                         '-mfoo'])
-  ...     runcommand(server, ['log'])
-  ...     runcommand(server, ['verify', '-q'])
+  ...     runcommand(server, [b'commit',
+  ...                         b'--config', b'failafterfinalize.fail=true',
+  ...                         b'-mfoo'])
+  ...     runcommand(server, [b'log'])
+  ...     runcommand(server, [b'verify', b'-q'])
   *** runcommand commit --config failafterfinalize.fail=true -mfoo
   transaction abort!
   rollback completed
@@ -918,11 +914,11 @@
   >>> @check
   ... def abort(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['commit',
-  ...                         '--config', 'hooks.pretxncommit=false',
-  ...                         '-mfoo', 'foo'])
-  ...     runcommand(server, ['log'])
-  ...     runcommand(server, ['verify', '-q'])
+  ...     runcommand(server, [b'commit',
+  ...                         b'--config', b'hooks.pretxncommit=false',
+  ...                         b'-mfoo', b'foo'])
+  ...     runcommand(server, [b'log'])
+  ...     runcommand(server, [b'verify', b'-q'])
   *** runcommand commit --config hooks.pretxncommit=false -mfoo foo
   transaction abort!
   rollback completed
@@ -938,11 +934,11 @@
   >>> @check
   ... def abort(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['commit',
-  ...                         '--config', 'failafterfinalize.fail=true',
-  ...                         '-mfoo', 'foo'])
-  ...     runcommand(server, ['log'])
-  ...     runcommand(server, ['verify', '-q'])
+  ...     runcommand(server, [b'commit',
+  ...                         b'--config', b'failafterfinalize.fail=true',
+  ...                         b'-mfoo', b'foo'])
+  ...     runcommand(server, [b'log'])
+  ...     runcommand(server, [b'verify', b'-q'])
   *** runcommand commit --config failafterfinalize.fail=true -mfoo foo
   transaction abort!
   rollback completed
@@ -989,10 +985,10 @@
   ... def merge(server):
   ...     readchannel(server)
   ...     # audit a/poisoned as a good path
-  ...     runcommand(server, ['up', '-qC', '2'])
-  ...     runcommand(server, ['up', '-qC', '1'])
+  ...     runcommand(server, [b'up', b'-qC', b'2'])
+  ...     runcommand(server, [b'up', b'-qC', b'1'])
   ...     # here a is a symlink, so a/poisoned is bad
-  ...     runcommand(server, ['merge', '2'])
+  ...     runcommand(server, [b'merge', b'2'])
   *** runcommand up -qC 2
   *** runcommand up -qC 1
   *** runcommand merge 2
@@ -1009,13 +1005,13 @@
   >>> @check
   ... def files(server):
   ...     readchannel(server)
-  ...     runcommand(server, ['up', '-qC', '2'])
+  ...     runcommand(server, [b'up', b'-qC', b'2'])
   ...     # audit a/poisoned as a good path
-  ...     runcommand(server, ['files', 'a/poisoned'])
-  ...     runcommand(server, ['up', '-qC', '0'])
-  ...     runcommand(server, ['up', '-qC', '1'])
+  ...     runcommand(server, [b'files', b'a/poisoned'])
+  ...     runcommand(server, [b'up', b'-qC', b'0'])
+  ...     runcommand(server, [b'up', b'-qC', b'1'])
   ...     # here 'a' is a symlink, so a/poisoned should be warned
-  ...     runcommand(server, ['files', 'a/poisoned'])
+  ...     runcommand(server, [b'files', b'a/poisoned'])
   *** runcommand up -qC 2
   *** runcommand files a/poisoned
   a/poisoned
--- a/tests/test-commit-amend.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-commit-amend.t	Mon Oct 22 14:46:06 2018 -0400
@@ -824,7 +824,8 @@
   $ hg merge -q bar --config ui.interactive=True << EOF
   > c
   > EOF
-  local [working copy] changed aa which other [merge rev] deleted
+  file 'aa' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? c
   $ hg ci -m 'merge bar (with conflicts)'
   $ hg log --config diff.git=1 -pr .
--- a/tests/test-commit-interactive-curses.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-commit-interactive-curses.t	Mon Oct 22 14:46:06 2018 -0400
@@ -327,30 +327,12 @@
   hello world
   lower
 
-Check spacemovesdown
-
-  $ cat <<EOF >> $HGRCPATH
-  > [experimental]
-  > spacemovesdown = true
-  > EOF
-  $ cat <<EOF >testModeCommands
-  > TOGGLE
-  > TOGGLE
-  > X
-  > EOF
-  $ hg status -q
-  M b
-  M x
-  $ hg commit -i -m "nothing to commit?" -d "0 0"
-  no changes to record
-  [1]
-
 Check ui.interface logic for the chunkselector
 
 The default interface is text
   $ cp $HGRCPATH.pretest $HGRCPATH
   $ chunkselectorinterface() {
-  > $PYTHON <<EOF
+  > "$PYTHON" <<EOF
   > from mercurial import hg, ui;\
   > repo = hg.repository(ui.ui.load(), ".");\
   > print(repo.ui.interface("chunkselector"))
--- a/tests/test-commit-interactive.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-commit-interactive.t	Mon Oct 22 14:46:06 2018 -0400
@@ -338,7 +338,7 @@
 
 Record showfunc should preserve function across sections
 
-  $ cat > f1.py <<EOF
+  $ cat > f1.py <<NO_CHECK_EOF
   > def annotate(ui, repo, *pats, **opts):
   >     """show changeset information by line for each file
   > 
@@ -372,10 +372,10 @@
   >     .. container:: verbose
   > 
   >     Valid types are:
-  > EOF
+  > NO_CHECK_EOF
   $ hg add f1.py
   $ hg commit -m funcs
-  $ cat > f1.py <<EOF
+  $ cat > f1.py <<NO_CHECK_EOF
   > def annotate(ui, repo, *pats, **opts):
   >     """show changeset information by line for each file
   > 
@@ -405,7 +405,7 @@
   >     .. container:: verbose
   > 
   >     Valid types are:
-  > EOF
+  > NO_CHECK_EOF
   $ hg commit -i -m interactive <<EOF
   > y
   > y
@@ -915,7 +915,7 @@
   >         b''.join(escape(c) for c in pycompat.iterbytestr(l)))
   > EOF
 
-  $ hg commit -i --encoding cp932 2>&1 <<EOF | $PYTHON $TESTTMP/escape.py | grep '^y - '
+  $ hg commit -i --encoding cp932 2>&1 <<EOF | "$PYTHON" $TESTTMP/escape.py | grep '^y - '
   > ?
   > q
   > EOF
--- a/tests/test-commit-multiple.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-commit-multiple.t	Mon Oct 22 14:46:06 2018 -0400
@@ -80,8 +80,8 @@
 
 now test that we fixed the bug for all scripts/extensions
   $ cat > $TESTTMP/committwice.py <<__EOF__
-  > from mercurial import ui, hg, match, node
-  > from time import sleep
+  > import time
+  > from mercurial import hg, match, node, ui as uimod
   > 
   > def replacebyte(fn, b):
   >     f = open(fn, "rb+")
@@ -94,12 +94,12 @@
   >                    % (rev, b', '.join(b"'%s'" % f
   >                                       for f in repo[rev].files())))
   > 
-  > repo = hg.repository(ui.ui.load(), b'.')
+  > repo = hg.repository(uimod.ui.load(), b'.')
   > assert len(repo) == 6, \
   >        "initial: len(repo): %d, expected: 6" % len(repo)
   > 
   > replacebyte(b"bugfix", b"u")
-  > sleep(2)
+  > time.sleep(2)
   > try:
   >     repo.ui.status(b"PRE: len(repo): %d\n" % len(repo))
   >     wlock = repo.wlock()
@@ -115,7 +115,7 @@
   > printfiles(repo, 6)
   > printfiles(repo, 7)
   > __EOF__
-  $ $PYTHON $TESTTMP/committwice.py
+  $ "$PYTHON" $TESTTMP/committwice.py
   PRE: len(repo): 6
   POST: len(repo): 8
   revision 6 files: ['bugfix', 'file1']
--- a/tests/test-commit.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-commit.t	Mon Oct 22 14:46:06 2018 -0400
@@ -650,11 +650,11 @@
   > def filectxfn(repo, memctx, path):
   >     return context.memfilectx(repo, memctx, path,
   >         b'[hooks]\nupdate = echo owned')
-  > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+  > c = context.memctx(r, [r.changelog.tip(), node.nullid],
   >                    b'evil', [notrc], filectxfn, 0)
   > r.commitctx(c)
   > EOF
-  $ $PYTHON evil-commit.py
+  $ "$PYTHON" evil-commit.py
 #if windows
   $ hg co --clean tip
   abort: path contains illegal component: .h\xe2\x80\x8cg\\hgrc (esc)
@@ -680,7 +680,7 @@
   >                    b'evil', [notrc], filectxfn, 0)
   > r.commitctx(c)
   > EOF
-  $ $PYTHON evil-commit.py
+  $ "$PYTHON" evil-commit.py
   $ hg co --clean tip
   abort: path contains illegal component: HG~1/hgrc
   [255]
@@ -700,7 +700,7 @@
   >                    b'evil', [notrc], filectxfn, 0)
   > r.commitctx(c)
   > EOF
-  $ $PYTHON evil-commit.py
+  $ "$PYTHON" evil-commit.py
   $ hg co --clean tip
   abort: path contains illegal component: HG8B6C~2/hgrc
   [255]
--- a/tests/test-completion.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-completion.t	Mon Oct 22 14:46:06 2018 -0400
@@ -94,10 +94,12 @@
   debugignore
   debugindex
   debugindexdot
+  debugindexstats
   debuginstall
   debugknown
   debuglabelcomplete
   debuglocks
+  debugmanifestfulltextcache
   debugmergestate
   debugnamecomplete
   debugobsolete
@@ -110,6 +112,7 @@
   debugrebuildfncache
   debugrename
   debugrevlog
+  debugrevlogindex
   debugrevspec
   debugserve
   debugsetparents
@@ -228,31 +231,18 @@
 Show all commands + options
   $ hg debugcommands
   add: include, exclude, subrepos, dry-run
+  addremove: similarity, subrepos, include, exclude, dry-run
   annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
-  clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
-  commit: addremove, close-branch, amend, secret, edit, interactive, include, exclude, message, logfile, date, user, subrepos
-  diff: rev, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
-  export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
-  forget: interactive, include, exclude, dry-run
-  init: ssh, remotecmd, insecure
-  log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
-  merge: force, rev, preview, abort, tool
-  pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure
-  push: force, rev, bookmark, branch, new-branch, pushvars, ssh, remotecmd, insecure
-  remove: after, force, subrepos, include, exclude, dry-run
-  serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
-  status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
-  summary: remote
-  update: clean, check, merge, date, rev, tool
-  addremove: similarity, subrepos, include, exclude, dry-run
   archive: no-decode, prefix, rev, type, subrepos, include, exclude
   backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
   bisect: reset, good, bad, skip, extend, command, noupdate
-  bookmarks: force, rev, delete, rename, inactive, template
+  bookmarks: force, rev, delete, rename, inactive, list, template
   branch: force, clean, rev
   branches: active, closed, template
   bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
   cat: output, rev, decode, include, exclude, template
+  clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
+  commit: addremove, close-branch, amend, secret, edit, interactive, include, exclude, message, logfile, date, user, subrepos
   config: untrusted, edit, local, global, template
   copy: after, force, include, exclude, dry-run
   debugancestor: 
@@ -269,21 +259,23 @@
   debugdata: changelog, manifest, dir
   debugdate: extended
   debugdeltachain: changelog, manifest, dir, template
-  debugdirstate: nodates, datesort
+  debugdirstate: nodates, dates, datesort
   debugdiscovery: old, nonheads, rev, ssh, remotecmd, insecure
   debugdownload: output
   debugextensions: template
-  debugfileset: rev, all-files
+  debugfileset: rev, all-files, show-matcher, show-stage
   debugformat: template
   debugfsinfo: 
   debuggetbundle: head, common, type
   debugignore: 
-  debugindex: changelog, manifest, dir, format
+  debugindex: changelog, manifest, dir, template
   debugindexdot: changelog, manifest, dir
+  debugindexstats: 
   debuginstall: template
   debugknown: 
   debuglabelcomplete: 
   debuglocks: force-lock, force-wlock, set-lock, set-wlock
+  debugmanifestfulltextcache: clear, add
   debugmergestate: 
   debugnamecomplete: 
   debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
@@ -296,6 +288,7 @@
   debugrebuildfncache: 
   debugrename: rev
   debugrevlog: changelog, manifest, dir, dump
+  debugrevlogindex: changelog, manifest, dir, format
   debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
   debugserve: sshstdio, logiofd, logiofile
   debugsetparents: 
@@ -311,7 +304,10 @@
   debugwhyunstable: 
   debugwireargs: three, four, five, ssh, remotecmd, insecure
   debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
+  diff: rev, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
+  export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
   files: rev, print0, include, exclude, template, subrepos
+  forget: interactive, include, exclude, dry-run
   graft: rev, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
   grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
   heads: rev, topo, active, closed, style, template
@@ -319,22 +315,32 @@
   identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
   import: strip, base, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
   incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
+  init: ssh, remotecmd, insecure
   locate: rev, print0, fullpath, include, exclude
+  log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
   manifest: rev, all, template
+  merge: force, rev, preview, abort, tool
   outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
   parents: rev, style, template
   paths: template
   phase: public, draft, secret, force, rev
+  pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure
+  push: force, rev, bookmark, branch, new-branch, pushvars, ssh, remotecmd, insecure
   recover: 
+  remove: after, force, subrepos, include, exclude, dry-run
   rename: after, force, include, exclude, dry-run
-  resolve: all, list, mark, unmark, no-status, tool, include, exclude, template
+  resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
   revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
   rollback: dry-run, force
   root: 
+  serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
+  status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
+  summary: remote
   tag: force, local, rev, remove, edit, message, date, user
   tags: template
   tip: patch, git, style, template
   unbundle: update
+  update: clean, check, merge, date, rev, tool
   verify: 
   version: template
 
--- a/tests/test-conflict.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-conflict.t	Mon Oct 22 14:46:06 2018 -0400
@@ -58,8 +58,19 @@
   # To mark files as resolved:  hg resolve --mark FILE
   
   # To continue:    hg commit
-  # To abort:       hg update --clean . (warning: this will discard uncommitted changes)
+  # To abort:       hg merge --abort
   
+  $ hg status -Tjson
+  [
+   {
+    "path": "a",
+    "status": "M"
+   },
+   {
+    "path": "a.orig",
+    "status": "?"
+   }
+  ]
 
   $ cat a
   Small Mathematical Series.
@@ -137,7 +148,7 @@
 Verify line trimming of custom conflict marker using multi-byte characters
 
   $ hg up -q --clean .
-  $ $PYTHON <<EOF
+  $ "$PYTHON" <<EOF
   > fp = open('logfile', 'wb')
   > fp.write(b'12345678901234567890123456789012345678901234567890' +
   >          b'1234567890') # there are 5 more columns for 80 columns
--- a/tests/test-confused-revert.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-confused-revert.t	Mon Oct 22 14:46:06 2018 -0400
@@ -14,8 +14,8 @@
   R a
 
   $ hg revert --all
+  forgetting b
   undeleting a
-  forgetting b
 
 Should show b unknown and a back to normal:
 
@@ -66,8 +66,8 @@
 Revert should be ok now:
 
   $ hg revert -r2 --all
+  forgetting b
   undeleting a
-  forgetting b
 
 Should show b unknown and a marked modified (merged):
 
--- a/tests/test-context-metadata.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-context-metadata.t	Mon Oct 22 14:46:06 2018 -0400
@@ -22,7 +22,7 @@
   >     with repo.wlock(), repo.lock(), repo.transaction(b'metaedit'):
   >         old = repo[b'.']
   >         kwargs = dict(s.split(b'=', 1) for s in arg.split(b';'))
-  >         if 'parents' in kwargs:
+  >         if b'parents' in kwargs:
   >             kwargs[b'parents'] = map(int, kwargs[b'parents'].split(b','))
   >         new = context.metadataonlyctx(repo, old,
   >                                       **pycompat.strkwargs(kwargs))
--- a/tests/test-contrib-check-code.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-contrib-check-code.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,14 +1,14 @@
-  $ cat > correct.py <<EOF
+  $ cat > correct.py <<NO_CHECK_EOF
   > def toto(arg1, arg2):
   >     del arg2
   >     return (5 + 6, 9)
-  > EOF
-  $ cat > wrong.py <<EOF
+  > NO_CHECK_EOF
+  $ cat > wrong.py <<NO_CHECK_EOF
   > def toto( arg1, arg2):
   >     del(arg2)
   >     return ( 5+6, 9)
-  > EOF
-  $ cat > quote.py <<EOF
+  > NO_CHECK_EOF
+  $ cat > quote.py <<NO_CHECK_EOF
   > # let's use quote in comments
   > (''' ( 4x5 )
   > but """\\''' and finally''',
@@ -16,8 +16,8 @@
   > '"""', 42+1, """and
   > ( 4-1 ) """, "( 1+1 )\" and ")
   > a, '\\\\\\\\', "\\\\\\" x-2", "c-1"
-  > EOF
-  $ cat > classstyle.py <<EOF
+  > NO_CHECK_EOF
+  $ cat > classstyle.py <<NO_CHECK_EOF
   > class newstyle_class(object):
   >     pass
   > 
@@ -29,7 +29,7 @@
   > 
   > no_class = 1:
   >     pass
-  > EOF
+  > NO_CHECK_EOF
   $ check_code="$TESTDIR"/../contrib/check-code.py
   $ "$check_code" ./wrong.py ./correct.py ./quote.py ./classstyle.py
   ./wrong.py:1:
@@ -52,11 +52,11 @@
    > class empty():
    class foo() creates old style object, use class foo(object)
   [1]
-  $ cat > python3-compat.py << EOF
+  $ cat > python3-compat.py << NO_CHECK_EOF
   > foo <> bar
   > reduce(lambda a, b: a + b, [1, 2, 3, 4])
   > dict(key=value)
-  > EOF
+  > NO_CHECK_EOF
   $ "$check_code" python3-compat.py
   python3-compat.py:1:
    > foo <> bar
@@ -69,13 +69,13 @@
    dict() is different in Py2 and 3 and is slower than {}
   [1]
 
-  $ cat > foo.c <<EOF
+  $ cat > foo.c <<NO_CHECK_EOF
   > void narf() {
   > 	strcpy(foo, bar);
   > 	// strcpy_s is okay, but this comment is not
   > 	strcpy_s(foo, bar);
   > }
-  > EOF
+  > NO_CHECK_EOF
   $ "$check_code" ./foo.c
   ./foo.c:2:
    > 	strcpy(foo, bar);
@@ -85,7 +85,7 @@
    don't use //-style comments
   [1]
 
-  $ cat > is-op.py <<EOF
+  $ cat > is-op.py <<NO_CHECK_EOF
   > # is-operator comparing number or string literal
   > x = None
   > y = x is 'foo'
@@ -96,7 +96,7 @@
   > y = x is not "foo"
   > y = x is not 5346
   > y = x is not -6
-  > EOF
+  > NO_CHECK_EOF
 
   $ "$check_code" ./is-op.py
   ./is-op.py:3:
@@ -125,21 +125,21 @@
    object comparison with literal
   [1]
 
-  $ cat > for-nolineno.py <<EOF
+  $ cat > for-nolineno.py <<NO_CHECK_EOF
   > except:
-  > EOF
+  > NO_CHECK_EOF
   $ "$check_code" for-nolineno.py --nolineno
   for-nolineno.py:0:
    > except:
    naked except clause
   [1]
 
-  $ cat > warning.t <<EOF
+  $ cat > warning.t <<NO_CHECK_EOF
   >   $ function warnonly {
   >   > }
   >   $ diff -N aaa
   >   $ function onwarn {}
-  > EOF
+  > NO_CHECK_EOF
   $ "$check_code" warning.t
   $ "$check_code" --warn warning.t
   warning.t:1:
@@ -152,20 +152,20 @@
    >   $ function onwarn {}
    warning: don't use 'function', use old style
   [1]
-  $ cat > error.t <<EOF
+  $ cat > error.t <<NO_CHECK_EOF
   >   $ [ foo == bar ]
-  > EOF
+  > NO_CHECK_EOF
   $ "$check_code" error.t
   error.t:1:
    >   $ [ foo == bar ]
    [ foo == bar ] is a bashism, use [ foo = bar ] instead
   [1]
   $ rm error.t
-  $ cat > raise-format.py <<EOF
+  $ cat > raise-format.py <<NO_CHECK_EOF
   > raise SomeException, message
   > # this next line is okay
   > raise SomeException(arg1, arg2)
-  > EOF
+  > NO_CHECK_EOF
   $ "$check_code" not-existing.py raise-format.py
   Skipping*not-existing.py* (glob)
   raise-format.py:1:
@@ -173,10 +173,10 @@
    don't use old-style two-argument raise, use Exception(message)
   [1]
 
-  $ cat <<EOF > tab.t
+  $ cat <<NO_CHECK_EOF > tab.t
   > 	indent
   >   > 	heredoc
-  > EOF
+  > NO_CHECK_EOF
   $ "$check_code" tab.t
   tab.t:1:
    > 	indent
@@ -184,7 +184,7 @@
   [1]
   $ rm tab.t
 
-  $ cat > rst.py <<EOF
+  $ cat > rst.py <<NO_CHECK_EOF
   > """problematic rst text
   > 
   > .. note::
@@ -213,7 +213,7 @@
   >     .. note::
   >         plus bad
   > """
-  > EOF
+  > NO_CHECK_EOF
   $ $check_code -w rst.py
   rst.py:3:
    > .. note::
@@ -223,7 +223,7 @@
    warning: add two newlines after '.. note::'
   [1]
 
-  $ cat > ./map-inside-gettext.py <<EOF
+  $ cat > ./map-inside-gettext.py <<NO_CHECK_EOF
   > print(_("map inside gettext %s" % v))
   > 
   > print(_("concatenating " " by " " space %s" % v))
@@ -234,7 +234,7 @@
   > 
   > print(_(
   >         "leading spaces inside of '(' %s" % v))
-  > EOF
+  > NO_CHECK_EOF
   $ "$check_code" ./map-inside-gettext.py
   ./map-inside-gettext.py:1:
    > print(_("map inside gettext %s" % v))
@@ -256,12 +256,12 @@
 web templates
 
   $ mkdir -p mercurial/templates
-  $ cat > mercurial/templates/example.tmpl <<EOF
+  $ cat > mercurial/templates/example.tmpl <<NO_CHECK_EOF
   > {desc}
   > {desc|escape}
   > {desc|firstline}
   > {desc|websub}
-  > EOF
+  > NO_CHECK_EOF
 
   $ "$check_code" --warnings mercurial/templates/example.tmpl
   mercurial/templates/example.tmpl:2:
@@ -271,7 +271,7 @@
 
 'string join across lines with no space' detection
 
-  $ cat > stringjoin.py <<EOF
+  $ cat > stringjoin.py <<NO_CHECK_EOF
   > foo = (' foo'
   >        'bar foo.'
   >        'bar foo:'
@@ -281,11 +281,11 @@
   >        'bar foo+'
   >        'bar foo-'
   >        'bar')
-  > EOF
+  > NO_CHECK_EOF
 
 'missing _() in ui message' detection
 
-  $ cat > uigettext.py <<EOF
+  $ cat > uigettext.py <<NO_CHECK_EOF
   > ui.status("% 10s %05d % -3.2f %*s %%"
   >           # this use '\\\\' instead of '\\', because the latter in
   >           # heredoc on shell becomes just '\'
@@ -294,11 +294,11 @@
   >           """
   >           '''.:*+-=
   >           ''' "%-6d \n 123456 .:*+-= foobar")
-  > EOF
+  > NO_CHECK_EOF
 
 superfluous pass
 
-  $ cat > superfluous_pass.py <<EOF
+  $ cat > superfluous_pass.py <<NO_CHECK_EOF
   > # correct examples
   > if foo:
   >     pass
@@ -326,7 +326,7 @@
   >     docstring also
   >     means no pass"""
   >     pass
-  > EOF
+  > NO_CHECK_EOF
 
 (Checking multiple invalid files at once examines whether caching
 translation table for repquote() works as expected or not. All files
--- a/tests/test-contrib-check-commit.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-contrib-check-commit.t	Mon Oct 22 14:46:06 2018 -0400
@@ -130,10 +130,6 @@
    This has no topic and ends with a period.
   7: don't add trailing period on summary line
    This has no topic and ends with a period.
-  19: adds double empty line
-   +
   20: adds a function with foo_bar naming
    + def blah_blah(x):
-  23: adds double empty line
-   +
   [1]
--- a/tests/test-contrib-dumprevlog.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-contrib-dumprevlog.t	Mon Oct 22 14:46:06 2018 -0400
@@ -19,10 +19,10 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
 
 Dumping revlog of file a to stdout:
-  $ $PYTHON "$CONTRIBDIR/dumprevlog" .hg/store/data/a.i
+  $ "$PYTHON" "$CONTRIBDIR/dumprevlog" .hg/store/data/a.i
   file: .hg/store/data/a.i
   node: 183d2312b35066fb6b3b449b84efc370d50993d0
   linkrev: 0
@@ -54,14 +54,14 @@
 
 Dump all revlogs to file repo.dump:
 
-  $ find .hg/store -name "*.i" | sort | xargs $PYTHON "$CONTRIBDIR/dumprevlog" > ../repo.dump
+  $ find .hg/store -name "*.i" | sort | xargs "$PYTHON" "$CONTRIBDIR/dumprevlog" > ../repo.dump
   $ cd ..
 
 Undumping into repo-b:
 
   $ hg init repo-b
   $ cd repo-b
-  $ $PYTHON "$CONTRIBDIR/undumprevlog" < ../repo.dump
+  $ "$PYTHON" "$CONTRIBDIR/undumprevlog" < ../repo.dump
   .hg/store/00changelog.i
   .hg/store/00manifest.i
   .hg/store/data/a.i
@@ -84,7 +84,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
 
 Compare repos:
 
--- a/tests/test-contrib-perf.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-contrib-perf.t	Mon Oct 22 14:46:06 2018 -0400
@@ -55,6 +55,8 @@
                  benchmark parsing bookmarks from disk to memory
    perfbranchmap
                  benchmark the update of a branchmap
+   perfbranchmapload
+                 benchmark reading the branchmap
    perfbundleread
                  Benchmark reading of bundle files.
    perfcca       (no help text available)
@@ -82,6 +84,8 @@
                  (no help text available)
    perfheads     (no help text available)
    perfindex     (no help text available)
+   perflinelogedits
+                 (no help text available)
    perfloadmarkers
                  benchmark the time to parse the on-disk markers for a repo
    perflog       (no help text available)
@@ -156,11 +160,16 @@
 #endif
   $ hg perfheads
   $ hg perfindex
+  $ hg perflinelogedits -n 1
   $ hg perfloadmarkers
   $ hg perflog
   $ hg perflookup 2
   $ hg perflrucache
   $ hg perfmanifest 2
+  $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
+  $ hg perfmanifest -m 44fe2c8352bb
+  abort: manifest revision must be integer or full node
+  [255]
   $ hg perfmergecalculate -r 3
   $ hg perfmoonwalk
   $ hg perfnodelookup 2
@@ -198,6 +207,50 @@
   ! wall * comb * user * sys * (avg of *) (glob)
   ! wall * comb * user * sys * (median of *) (glob)
 
+test json output
+----------------
+
+normal output:
+
+  $ hg perfheads --template json --config perf.stub=no
+  [
+   {
+    "comb": *, (glob)
+    "count": *, (glob)
+    "sys": *, (glob)
+    "user": *, (glob)
+    "wall": * (glob)
+   }
+  ]
+
+detailed output:
+
+  $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
+  [
+   {
+    "avg.comb": *, (glob)
+    "avg.count": *, (glob)
+    "avg.sys": *, (glob)
+    "avg.user": *, (glob)
+    "avg.wall": *, (glob)
+    "comb": *, (glob)
+    "count": *, (glob)
+    "max.comb": *, (glob)
+    "max.count": *, (glob)
+    "max.sys": *, (glob)
+    "max.user": *, (glob)
+    "max.wall": *, (glob)
+    "median.comb": *, (glob)
+    "median.count": *, (glob)
+    "median.sys": *, (glob)
+    "median.user": *, (glob)
+    "median.wall": *, (glob)
+    "sys": *, (glob)
+    "user": *, (glob)
+    "wall": * (glob)
+   }
+  ]
+
 Check perf.py for historical portability
 ----------------------------------------
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-contrib-relnotes.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,291 @@
+#require test-repo py3exe
+  $ . "$TESTDIR/helpers-testrepo.sh"
+
+  $ cd $TESTDIR/..
+  $ python3 contrib/relnotes 4.4 --stoprev 4.5
+  changeset 3398603c5621: unexpected block in release notes directive feature
+  New Features
+  ============
+  
+  revert --interactive
+  --------------------
+  
+  The revert command now accepts the flag --interactive to allow reverting only
+  some of the changes to the specified files.
+  
+  Rebase with different destination per source revision
+  -----------------------------------------------------
+  
+  Previously, rebase only supports one unique destination. Now "SRC" and
+  "ALLSRC" can be used in rebase destination revset to precisely define
+  destination per each individual source revision.
+  
+  For example, the following command could move some orphaned changesets to
+  reasonable new places so they become no longer orphaned:
+  
+  hg rebase   -r 'orphan()-obsolete()'   -d 'max((successors(max(roots(ALLSRC) &
+  ::SRC)^)-obsolete())::)'
+  
+  Accessing hidden changesets
+  ---------------------------
+  
+  Set config option 'experimental.directaccess = True' to access hidden
+  changesets from read only commands.
+  
+  githelp extension
+  -----------------
+  
+  The "githelp" extension provides the "hg githelp" command. This command
+  attempts to convert a "git" command to its Mercurial equivalent. The extension
+  can be useful to Git users new to Mercurial.
+  
+  Other Changes
+  -------------
+  
+  * When interactive revert is run against a revision other than the working
+    directory parent, the diff shown is the diff to *apply* to the working
+    directory, rather than the diff to *discard* from the working copy. This is
+    in line with related user experiences with 'git' and appears to be less
+    confusing with 'ui.interface=curses'.
+  
+  * Let 'hg rebase' avoid content-divergence by skipping obsolete changesets
+    (and their descendants) when they are present in the rebase set along with
+    one of their successors but none of their successors is in destination.
+  
+  * hgweb now displays phases of non-public changesets
+  
+  * The "HGPLAINEXCEPT" environment variable can now include "color" to allow
+    automatic output colorization in otherwise automated environments.
+  
+  * A new unamend command in uncommit extension which undoes the effect of the
+    amend command by creating a new changeset which was there before amend and
+    moving the changes that were amended to the working directory.
+  
+  * A '--abort' flag to merge command to abort the ongoing merge.
+  
+  * An experimental flag '--rev' to 'hg branch' which can be used to change
+    branch of changesets.
+  
+  Backwards Compatibility Changes
+  ===============================
+  
+  * "log --follow-first -rREV", which is deprecated, now follows the first
+    parent of merge revisions from the specified "REV" just like "log --follow
+    -rREV".
+  
+  * "log --follow -rREV FILE.." now follows file history across copies and
+    renames.
+  
+  Bug Fixes
+  =========
+  
+  Issue 5165
+  ----------
+  
+  Bookmark, whose name is longer than 255, can again be exchanged again between
+  4.4+ client and servers.
+  
+  Performance Improvements
+  ========================
+  
+  * bundle2 read I/O throughput significantly increased.
+  
+  * Significant memory use reductions when reading from bundle2 bundles.
+  
+    On the BSD repository, peak RSS during changegroup application decreased by
+    ~185 MB from ~752 MB to ~567 MB.
+  
+  API Changes
+  ===========
+  
+  * bundlerepo.bundlerepository.bundle and
+    bundlerepo.bundlerepository.bundlefile are now prefixed with an underscore.
+  
+  * Rename bundlerepo.bundlerepository.bundlefilespos to _cgfilespos.
+  
+  * dirstate no longer provides a 'dirs()' method.  To test for the existence of
+    a directory in the dirstate, use 'dirstate.hasdir(dirname)'.
+  
+  * bundle2 parts are no longer seekable by default.
+  
+  * mapping does not contain all template resources. use context.resource() in
+    template functions.
+  
+  * "text=False|True" option is dropped from the vfs interface because of Python
+    3 compatibility issue. Use "util.tonativeeol/fromnativeeol()" to convert EOL
+    manually.
+  
+  * wireproto.streamres.__init__ no longer accepts a "reader" argument. Use the
+    "gen" argument instead.
+  
+  * exchange.getbundlechunks() now returns a 2-tuple instead of just an
+    iterator.
+  
+  
+  === commands ===
+   * amend: do not drop missing files (Bts:issue5732)
+   * amend: do not take untracked files as modified or clean (Bts:issue5732)
+   * amend: update .hgsubstate before committing a memctx (Bts:issue5677)
+   * annotate: add support to specify hidden revs if directaccess config is set
+   * bookmark: add methods to binary encode and decode bookmark values
+   * bookmark: deprecate direct update of a bookmark value
+   * bookmark: introduce a 'bookmarks' part
+   * bookmark: introduce in advance a variant of the exchange test
+   * bookmark: run 'pushkey' hooks after bookmark move, not 'prepushkey'
+   * bookmarks: add bookmarks to hidden revs if directaccess config is set
+   * bookmarks: calculate visibility exceptions only once
+   * bookmarks: display the obsfate of hidden revision we create a bookmark on
+   * bookmarks: fix pushkey compatibility mode (Bts:issue5777)
+   * bookmarks: use context managers for lock and transaction in update()
+   * bookmarks: use context managers for locks and transaction in pushbookmark()
+   * branch: allow changing branch name to existing name if possible
+   * clone: add support for storing remotenames while cloning
+   * clone: use utility function to write hgrc
+   * clonebundle: make it possible to retrieve the initial bundle through largefile
+   * commandserver: restore cwd in case of exception
+   * commandserver: unblock SIGCHLD
+   * help: deprecate ui.slash in favor of slashpath template filter (Bts:issue5572)
+   * log: allow matchfn to be non-null even if both --patch/--stat are off
+   * log: build follow-log filematcher at once
+   * log: don't expand aliases in revset built from command options
+   * log: make "slowpath" condition slightly more readable
+   * log: make opt2revset table a module constant
+   * log: merge getlogrevs() and getgraphlogrevs()
+   * log: remove temporary variable 'date' used only once
+   * log: resolve --follow thoroughly in getlogrevs()
+   * log: resolve --follow with -rREV in cmdutil.getlogrevs()
+   * log: simplify 'x or ancestors(x)' expression
+   * log: translate column labels at once (Bts:issue5750)
+   * log: use revsetlang.formatspec() thoroughly
+   * log: use revsetlang.formatspec() to concatenate list expression
+   * log: use smartset.slice() to limit number of revisions to be displayed
+   * merge: cache unknown dir checks (Bts:issue5716)
+   * merge: check created file dirs for path conflicts only once (Bts:issue5716)
+   * patch: add within-line color diff capacity
+   * patch: catch unexpected case in _inlinediff
+   * patch: do not break up multibyte character when highlighting word
+   * patch: improve heuristics to not take the word "diff" as header (Bts:issue1879)
+   * patch: reverse _inlinediff output for consistency
+   * pull: clarify that -u only updates linearly
+   * pull: hold wlock for the full operation when --update is used
+   * pull: retrieve bookmarks through the binary part when possible
+   * pull: store binary node in pullop.remotebookmarks
+   * push: include a 'check:bookmarks' part when possible
+   * push: restrict common discovery to the pushed set
+   * revert: support reverting to hidden cset if directaccess config is set
+  
+  === core ===
+   * filelog: add the ability to report the user facing name
+   * revlog: choose between ifh and dfh once for all
+   * revlog: don't use slicing to return parents
+   * revlog: group delta computation methods under _deltacomputer object
+   * revlog: group revision info into a dedicated structure
+   * revlog: introduce 'deltainfo' to distinguish from 'delta'
+   * revlog: rename 'rev' to 'base', as it is the base revision
+   * revlog: separate diff computation from the collection of other info
+   * revset: evaluate filesets against each revision for 'file()' (Bts:issue5778)
+   * revset: parse x^:: as (x^):: (Bts:issue5764)
+   * templater: look up symbols/resources as if they were separated (Bts:issue5699)
+   * transaction: register summary callbacks only at start of transaction (BC)
+   * util: whitelist NTFS for hardlink creation (Bts:issue4580)
+  
+  === extensions ===
+   * convert: restore the ability to use bzr < 2.6.0 (Bts:issue5733)
+   * histedit: add support to output nodechanges using formatter
+   * largefiles: add a 'debuglfput' command to put largefile into the store
+   * largefiles: add support for 'largefiles://' url scheme
+   * largefiles: allow to run 'debugupgraderepo' on repo with largefiles
+   * largefiles: convert EOL of hgrc before appending to bytes IO
+   * largefiles: explicitly set the source and sink types to 'hg' for lfconvert
+   * largefiles: modernize how capabilities are added to the wire protocol
+   * largefiles: pay attention to dropped standin files when updating largefiles
+   * rebase: add concludememorynode(), and call it when rebasing in-memory
+   * rebase: add the --inmemory option flag; assign a wctx object for the rebase
+   * rebase: add ui.log calls for whether IMM used, whether rebasing WCP
+   * rebase: disable 'inmemory' if the rebaseset contains the working copy
+   * rebase: do not bail on uncomitted changes if rebasing in-memory
+   * rebase: do not update if IMM; instead, set the overlaywctx's parents
+   * rebase: don't run IMM if running rebase in a transaction
+   * rebase: don't take out a dirstate guard for in-memory rebase
+   * rebase: drop --style option
+   * rebase: fix for hgsubversion
+   * rebase: pass the wctx object (IMM or on-disk) to merge.update
+   * rebase: pass wctx to rebasenode()
+   * rebase: rerun a rebase on-disk if IMM merge conflicts arise
+   * rebase: switch ui.log calls to common style
+   * rebase: use fm.formatlist() and fm.formatdict() to support user template
+  
+  === hgweb ===
+   * hgweb: disable diff.noprefix option for diffstat
+   * hgweb: drop support of browsers that don't understand <canvas> (BC)
+   * hgweb: only include graph-related data in jsdata variable on /graph pages (BC)
+   * hgweb: stop adding strings to innerHTML of #graphnodes and #nodebgs (BC)
+  
+  === unsorted ===
+   * archive: add support to specify hidden revs if directaccess config is set
+   * atomicupdate: add an experimental option to use atomictemp when updating
+   * bundle: allow bundlerepo to support alternative manifest implementations
+   * changelog: introduce a 'tiprev' method
+   * changelog: use 'tiprev()' in 'tip()'
+   * completion: add support for new "amend" command
+   * debugssl: convert port number to int (Bts:issue5757)
+   * diff: disable diff.noprefix option for diffstat (Bts:issue5759)
+   * dispatch: abort if early boolean options can't be parsed
+   * dispatch: add HGPLAIN=+strictflags to restrict early parsing of global options
+   * dispatch: add option to not strip command args parsed by _earlygetopt()
+   * dispatch: alias --repo to --repository while parsing early options
+   * dispatch: convert non-list option parsed by _earlygetopt() to string
+   * dispatch: fix early parsing of short option with value like -R=foo
+   * dispatch: handle IOError when writing to stderr
+   * dispatch: stop parsing of early boolean option at "--"
+   * dispatch: verify result of early command parsing
+   * evolution: make reporting of new unstable changesets optional
+   * extdata: abort if external command exits with non-zero status (BC)
+   * fancyopts: add early-options parser compatible with getopt()
+   * graphlog: add another graph node type, unstable, using character "*" (BC)
+   * hgdemandimport: use correct hyperlink to python-bug in comments (Bts:issue5765)
+   * httppeer: add support for tracing all http request made by the peer
+   * identify: document -r. explicitly how to disable wdir scanning (Bts:issue5622)
+   * lfs: register config options
+   * localrepo: specify optional callback parameter to pathauditor as a keyword
+   * match: do not weirdly include explicit files excluded by -X option
+   * memfilectx: make changectx argument mandatory in constructor (API)
+   * morestatus: don't crash with different drive letters for repo.root and CWD
+   * outgoing: respect ":pushurl" paths (Bts:issue5365)
+   * remove: print message for each file in verbose mode only while using '-A' (BC)
+   * rewriteutil: use precheck() in uncommit and amend commands
+   * scmutil: don't try to delete origbackup symlinks to directories (Bts:issue5731)
+   * sshpeer: add support for request tracing
+   * streamclone: add support for bundle2 based stream clone
+   * streamclone: add support for cloning non append-only file
+   * streamclone: also stream caches to the client
+   * streamclone: define first iteration of version 2 of stream format
+   * streamclone: move wire protocol status code from wireproto command
+   * streamclone: rework canperformstreamclone
+   * streamclone: tests phase exchange during stream clone
+   * streamclone: use readexactly when reading stream v2
+   * subrepo: add config option to reject any subrepo operations (SEC)
+   * subrepo: disable git and svn subrepos by default (BC) (SEC)
+   * subrepo: extend config option to disable subrepos by type (SEC)
+   * subrepo: handle 'C:' style paths on the command line (Bts:issue5770)
+   * subrepo: use per-type config options to enable subrepos
+   * svnsubrepo: check if subrepo is missing when checking dirty state (Bts:issue5657)
+   * tr-summary: keep a weakref to the unfiltered repository
+   * unamend: fix command summary line
+   * uncommit: unify functions _uncommitdirstate and _unamenddirstate to one
+   * update: support updating to hidden cset if directaccess config is set
+  
+  === BC ===
+  
+   * extdata: abort if external command exits with non-zero status (BC)
+   * graphlog: add another graph node type, unstable, using character "*" (BC)
+   * hgweb: drop support of browsers that don't understand <canvas> (BC)
+   * hgweb: only include graph-related data in jsdata variable on /graph pages (BC)
+   * hgweb: stop adding strings to innerHTML of #graphnodes and #nodebgs (BC)
+   * remove: print message for each file in verbose mode only while using '-A' (BC)
+   * subrepo: disable git and svn subrepos by default (BC) (SEC)
+   * transaction: register summary callbacks only at start of transaction (BC)
+  
+  === API Changes ===
+  
+   * memfilectx: make changectx argument mandatory in constructor (API)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-contrib-testparseutil.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,192 @@
+  $ testparseutil="$TESTDIR"/../contrib/testparseutil.py
+
+Internal test by doctest
+
+  $ "$PYTHON" -m doctest "$testparseutil"
+
+Tests for embedded python script
+
+Typical cases
+
+  $ "$PYTHON" "$testparseutil" -v pyembedded <<NO_CHECK_EOF
+  >   >>> for f in [1, 2, 3]:
+  >   ...     foo = 1
+  >   >>> foo = 2
+  >   $ echo "doctest is terminated by command, empty line, or comment"
+  >   >>> foo = 31
+  >   expected output of doctest fragment
+  >   >>> foo = 32
+  >   
+  >   >>> foo = 33
+  > 
+  >   >>> foo = 34
+  > comment
+  >   >>> foo = 35
+  > 
+  >   $ "\$PYTHON" <<EOF
+  >   > foo = 4
+  >   > 
+  >   > EOF
+  >   $ cat > foo.py <<EOF
+  >   > foo = 5
+  >   > EOF
+  >   $ cat >> foo.py <<EOF
+  >   > foo = 6 # appended
+  >   > EOF
+  > 
+  > NO_CHECK_EOF limit mark makes parsing ignore corresponded fragment
+  > (this is useful to use bad code intentionally)
+  > 
+  >   $ "\$PYTHON" <<NO_CHECK_EOF
+  >   > foo = 7 # this should be ignored at detection
+  >   > NO_CHECK_EOF
+  >   $ cat > foo.py <<NO_CHECK_EOF
+  >   > foo = 8 # this should be ignored at detection
+  >   > NO_CHECK_EOF
+  > 
+  > doctest fragment ended by EOF
+  > 
+  >   >>> foo = 9
+  > NO_CHECK_EOF
+  <stdin>:1: <anonymous> starts
+    |for f in [1, 2, 3]:
+    |    foo = 1
+    |foo = 2
+  <stdin>:4: <anonymous> ends
+  <stdin>:5: <anonymous> starts
+    |foo = 31
+    |
+    |foo = 32
+    |
+    |foo = 33
+  <stdin>:10: <anonymous> ends
+  <stdin>:11: <anonymous> starts
+    |foo = 34
+  <stdin>:12: <anonymous> ends
+  <stdin>:13: <anonymous> starts
+    |foo = 35
+  <stdin>:14: <anonymous> ends
+  <stdin>:16: <anonymous> starts
+    |foo = 4
+    |
+  <stdin>:18: <anonymous> ends
+  <stdin>:20: foo.py starts
+    |foo = 5
+  <stdin>:21: foo.py ends
+  <stdin>:23: foo.py starts
+    |foo = 6 # appended
+  <stdin>:24: foo.py ends
+  <stdin>:38: <anonymous> starts
+    |foo = 9
+  <stdin>:39: <anonymous> ends
+
+Invalid test script
+
+(similar test for shell script and hgrc configuration is omitted,
+because this tests common base class of them)
+
+  $ "$PYTHON" "$testparseutil" -v pyembedded <<NO_CHECK_EOF > detected
+  >   $ "\$PYTHON" <<EOF
+  >   > foo = 1
+  > 
+  >   $ "\$PYTHON" <<EOF
+  >   > foo = 2
+  >   $ cat > bar.py <<EOF
+  >   > bar = 2 # this fragment will be detected as expected
+  >   > EOF
+  > 
+  >   $ cat > foo.py <<EOF
+  >   > foo = 3
+  > NO_CHECK_EOF
+  <stdin>:3: unexpected line for "heredoc python invocation"
+  <stdin>:6: unexpected line for "heredoc python invocation"
+  <stdin>:11: unexpected end of file for "heredoc .py file"
+  [1]
+  $ cat detected
+  <stdin>:7: bar.py starts
+    |bar = 2 # this fragment will be detected as expected
+  <stdin>:8: bar.py ends
+
+Tests for embedded shell script
+
+  $ "$PYTHON" "$testparseutil" -v shembedded <<NO_CHECK_EOF
+  >   $ cat > foo.sh <<EOF
+  >   > foo = 1
+  >   > 
+  >   > foo = 2
+  >   > EOF
+  >   $ cat >> foo.sh <<EOF
+  >   > foo = 3 # appended
+  >   > EOF
+  > 
+  > NO_CHECK_EOF limit mark makes parsing ignore corresponded fragment
+  > (this is useful to use bad code intentionally)
+  > 
+  >   $ cat > foo.sh <<NO_CHECK_EOF
+  >   > # this should be ignored at detection
+  >   > foo = 4
+  >   > NO_CHECK_EOF
+  > 
+  > NO_CHECK_EOF
+  <stdin>:2: foo.sh starts
+    |foo = 1
+    |
+    |foo = 2
+  <stdin>:5: foo.sh ends
+  <stdin>:7: foo.sh starts
+    |foo = 3 # appended
+  <stdin>:8: foo.sh ends
+
+Tests for embedded hgrc configuration
+
+  $ "$PYTHON" "$testparseutil" -v hgrcembedded <<NO_CHECK_EOF
+  >   $ cat > .hg/hgrc <<EOF
+  >   > [ui]
+  >   > verbose = true
+  >   > 
+  >   > # end of local configuration
+  >   > EOF
+  > 
+  >   $ cat > \$HGRCPATH <<EOF
+  >   > [extensions]
+  >   > rebase =
+  >   > # end of global configuration
+  >   > EOF
+  > 
+  >   $ cat >> \$HGRCPATH <<EOF
+  >   > # appended
+  >   > [extensions]
+  >   > rebase =!
+  >   > EOF
+  > 
+  > NO_CHECK_EOF limit mark makes parsing ignore corresponded fragment
+  > (this is useful to use bad code intentionally)
+  > 
+  >   $ cat > .hg/hgrc <<NO_CHECK_EOF
+  >   > # this local configuration should be ignored at detection
+  >   > [ui]
+  >   > username = foo bar
+  >   > NO_CHECK_EOF
+  > 
+  >   $ cat > \$HGRCPATH <<NO_CHECK_EOF
+  >   > # this global configuration should be ignored at detection
+  >   > [extensions]
+  >   > foobar =
+  >   > NO_CHECK_EOF
+  > NO_CHECK_EOF
+  <stdin>:2: .hg/hgrc starts
+    |[ui]
+    |verbose = true
+    |
+    |# end of local configuration
+  <stdin>:6: .hg/hgrc ends
+  <stdin>:9: $HGRCPATH starts
+    |[extensions]
+    |rebase =
+    |# end of global configuration
+  <stdin>:12: $HGRCPATH ends
+  <stdin>:15: $HGRCPATH starts
+    |# appended
+    |[extensions]
+    |rebase =!
+  <stdin>:18: $HGRCPATH ends
--- a/tests/test-contrib.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-contrib.t	Mon Oct 22 14:46:06 2018 -0400
@@ -14,7 +14,7 @@
 
 changing local directly
 
-  $ $PYTHON simplemerge local base other && echo "merge succeeded"
+  $ "$PYTHON" simplemerge local base other && echo "merge succeeded"
   merge succeeded
   $ cat local
   local
@@ -24,7 +24,7 @@
 
 printing to stdout
 
-  $ $PYTHON simplemerge -p local base other
+  $ "$PYTHON" simplemerge -p local base other
   local
   base
   other
@@ -43,7 +43,7 @@
   $ echo end >> conflict-local
   $ echo end >> conflict-other
 
-  $ $PYTHON simplemerge -p conflict-local base conflict-other
+  $ "$PYTHON" simplemerge -p conflict-local base conflict-other
   base
   <<<<<<< conflict-local
   not other
@@ -55,7 +55,7 @@
 
 1 label
 
-  $ $PYTHON simplemerge -p -L foo conflict-local base conflict-other
+  $ "$PYTHON" simplemerge -p -L foo conflict-local base conflict-other
   base
   <<<<<<< foo
   not other
@@ -67,7 +67,7 @@
 
 2 labels
 
-  $ $PYTHON simplemerge -p -L foo -L bar conflict-local base conflict-other
+  $ "$PYTHON" simplemerge -p -L foo -L bar conflict-local base conflict-other
   base
   <<<<<<< foo
   not other
@@ -79,7 +79,7 @@
 
 3 labels
 
-  $ $PYTHON simplemerge -p -L foo -L bar -L base conflict-local base conflict-other
+  $ "$PYTHON" simplemerge -p -L foo -L bar -L base conflict-local base conflict-other
   base
   <<<<<<< foo
   not other
@@ -93,21 +93,21 @@
 
 too many labels
 
-  $ $PYTHON simplemerge -p -L foo -L bar -L baz -L buz conflict-local base conflict-other
+  $ "$PYTHON" simplemerge -p -L foo -L bar -L baz -L buz conflict-local base conflict-other
   abort: can only specify three labels.
   [255]
 
 binary file
 
-  $ $PYTHON -c "f = open('binary-local', 'w'); f.write('\x00'); f.close()"
+  $ "$PYTHON" -c "f = open('binary-local', 'w'); f.write('\x00'); f.close()"
   $ cat orig >> binary-local
-  $ $PYTHON simplemerge -p binary-local base other
+  $ "$PYTHON" simplemerge -p binary-local base other
   warning: binary-local looks like a binary file.
   [1]
 
 binary file --text
 
-  $ $PYTHON simplemerge -a -p binary-local base other 2>&1
+  $ "$PYTHON" simplemerge -a -p binary-local base other 2>&1
   warning: binary-local looks like a binary file.
   \x00local (esc)
   base
@@ -115,7 +115,7 @@
 
 help
 
-  $ $PYTHON simplemerge --help
+  $ "$PYTHON" simplemerge --help
   simplemerge [OPTS] LOCAL BASE OTHER
   
       Simple three-way file merge utility with a minimal feature set.
@@ -134,7 +134,7 @@
 
 wrong number of arguments
 
-  $ $PYTHON simplemerge
+  $ "$PYTHON" simplemerge
   simplemerge: wrong number of arguments
   simplemerge [OPTS] LOCAL BASE OTHER
   
@@ -155,7 +155,7 @@
 
 bad option
 
-  $ $PYTHON simplemerge --foo -p local base other
+  $ "$PYTHON" simplemerge --foo -p local base other
   simplemerge: option --foo not recognized
   simplemerge [OPTS] LOCAL BASE OTHER
   
--- a/tests/test-convert-bzr-ghosts.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-bzr-ghosts.t	Mon Oct 22 14:46:06 2018 -0400
@@ -21,7 +21,7 @@
   $ bzr add -q somefile
   $ bzr commit -q -m 'Initial layout setup'
   $ echo morecontent >> somefile
-  $ $PYTHON ../../ghostcreator.py 'Commit with ghost revision' ghostrev
+  $ "$PYTHON" ../../ghostcreator.py 'Commit with ghost revision' ghostrev
   $ cd ..
   $ hg convert source source-hg
   initializing destination source-hg repository
@@ -31,9 +31,9 @@
   1 Initial layout setup
   0 Commit with ghost revision
   $ glog -R source-hg
-  o  1@source "Commit with ghost revision" files: somefile
+  o  1@source "Commit with ghost revision" files+: [], files-: [], files: [somefile]
   |
-  o  0@source "Initial layout setup" files: somefile
+  o  0@source "Initial layout setup" files+: [somefile], files-: [], files: []
   
 
   $ cd ..
--- a/tests/test-convert-bzr-merges.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-bzr-merges.t	Mon Oct 22 14:46:06 2018 -0400
@@ -13,7 +13,8 @@
   $ bzr init -q source
   $ cd source
   $ echo content > file
-  $ bzr add -q file
+  $ echo text > rename_me
+  $ bzr add -q file rename_me
   $ bzr commit -q -m 'Initial add' '--commit-time=2009-10-10 08:00:00 +0100'
   $ cd ..
   $ bzr branch -q source source-branch1
@@ -32,6 +33,8 @@
   $ cd source-branch2
   $ echo somecontent > file-branch2
   $ bzr add -q file-branch2
+  $ bzr mv -q rename_me renamed
+  $ echo change > renamed
   $ bzr commit -q -m 'Added brach2 file' '--commit-time=2009-10-10 08:00:03 +0100'
   $ sleep 1
   $ cd ../source
@@ -39,6 +42,9 @@
   $ bzr merge -q --force ../source-branch2
   $ bzr commit -q -m 'Merged branches' '--commit-time=2009-10-10 08:00:04 +0100'
   $ cd ..
+
+BUG: file-branch2 should not be added in rev 4, and the rename_me -> renamed
+move should be recorded in the fixup merge.
   $ hg convert --datesort --config convert.bzr.saverev=False source source-hg
   initializing destination source-hg repository
   scanning source...
@@ -49,18 +55,19 @@
   2 Added parent file
   1 Added brach2 file
   0 Merged branches
+  warning: can't find ancestor for 'renamed' copied from 'rename_me'!
   $ glog -R source-hg
-  o    5@source "(octopus merge fixup)" files:
+  o    5@source "(octopus merge fixup)" files+: [], files-: [], files: [renamed]
   |\
-  | o    4@source "Merged branches" files: file-branch2
+  | o    4@source "Merged branches" files+: [file-branch1 file-branch2 renamed], files-: [rename_me], files: [file]
   | |\
-  o---+  3@source-branch2 "Added brach2 file" files: file-branch2
+  o---+  3@source-branch2 "Added brach2 file" files+: [file-branch2 renamed], files-: [rename_me], files: []
    / /
-  | o  2@source "Added parent file" files: file-parent
+  | o  2@source "Added parent file" files+: [file-parent], files-: [], files: []
   | |
-  o |  1@source-branch1 "Added branch1 file" files: file file-branch1
+  o |  1@source-branch1 "Added branch1 file" files+: [file-branch1], files-: [], files: [file]
   |/
-  o  0@source "Initial add" files: file
+  o  0@source "Initial add" files+: [file rename_me], files-: [], files: []
   
   $ manifest source-hg tip
   % manifest of tip
@@ -68,6 +75,7 @@
   644   file-branch1
   644   file-branch2
   644   file-parent
+  644   renamed
 
   $ hg convert source-hg hg2hg
   initializing destination hg2hg repository
@@ -80,38 +88,107 @@
   2 Added brach2 file
   1 Merged branches
   0 (octopus merge fixup)
+
+BUG: The manifest entries should be the same for matching revisions, and
+nothing should be outgoing
+
+  $ hg -R source-hg manifest --debug -r tip | grep renamed
+  67109fdebf6c556eb0a9d5696dd98c8420520405 644   renamed
+  $ hg -R hg2hg manifest --debug -r tip | grep renamed
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
+  $ hg -R source-hg manifest --debug -r 'tip^' | grep renamed
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
+  $ hg -R hg2hg manifest --debug -r 'tip^' | grep renamed
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
+
+BUG: The revisions found should be the same in both repos
+
+  $ hg --cwd source-hg log -r 'file("renamed")' -G -Tcompact
+  o    5[tip]:4,3   6652429c300a   2009-10-10 08:00 +0100   foo
+  |\     (octopus merge fixup)
+  | |
+  | o    4:2,1   e0ae8af3503a   2009-10-10 08:00 +0100   foo
+  | |\     Merged branches
+  | ~ ~
+  o  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  |    Added brach2 file
+  ~
+  $ hg --cwd hg2hg log -r 'file("renamed")' -G -Tcompact
+  o    4:2,1   e0ae8af3503a   2009-10-10 08:00 +0100   foo
+  |\     Merged branches
+  ~ ~
+  o  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  |    Added brach2 file
+  ~
+
+BUG(?): The move seems to be recorded in rev 4, so it should probably show up
+there.  It's not recorded as a move in rev 5, even in source-hg.
+
+  $ hg -R source-hg up -q tip
+  $ hg -R hg2hg up -q tip
+  $ hg --cwd source-hg log -r 'follow("renamed")' -G -Tcompact
+  @    5[tip]:4,3   6652429c300a   2009-10-10 08:00 +0100   foo
+  |\     (octopus merge fixup)
+  | :
+  o :  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  :/     Added brach2 file
+  :
+  o  0   18b86f5df51b   2009-10-10 08:00 +0100   foo
+       Initial add
+  
+  $ hg --cwd hg2hg log -r 'follow("renamed")' -G -Tcompact
+  o  3   138bed2e14be   2009-10-10 08:00 +0100   foo
+  :    Added brach2 file
+  :
+  o  0   18b86f5df51b   2009-10-10 08:00 +0100   foo
+       Initial add
+  
+
   $ hg -R hg2hg out source-hg -T compact
   comparing with source-hg
   searching for changes
-  5[tip]:4,3   6bd55e826939   2009-10-10 08:00 +0100   foo
+  5[tip]:4,3   3be2299ccd31   2009-10-10 08:00 +0100   foo
     (octopus merge fixup)
   
-XXX: The manifest lines should probably agree, to avoid changing the hash when
-converting hg -> hg
+
+  $ glog -R hg2hg
+  @    5@source "(octopus merge fixup)" files+: [], files-: [], files: []
+  |\
+  | o    4@source "Merged branches" files+: [file-branch1 file-branch2 renamed], files-: [rename_me], files: [file]
+  | |\
+  o---+  3@source-branch2 "Added brach2 file" files+: [file-branch2 renamed], files-: [rename_me], files: []
+   / /
+  | o  2@source "Added parent file" files+: [file-parent], files-: [], files: []
+  | |
+  o |  1@source-branch1 "Added branch1 file" files+: [file-branch1], files-: [], files: [file]
+  |/
+  o  0@source "Initial add" files+: [file rename_me], files-: [], files: []
+  
 
   $ hg -R source-hg log --debug -r tip
-  changeset:   5:b209510f11b2c987f920749cd8e352aa4b3230f2
+  changeset:   5:6652429c300ab66fdeaf2e730945676a00b53231
   branch:      source
   tag:         tip
   phase:       draft
-  parent:      4:1dc38c377bb35eeea4fa955056fbe4440d54a743
-  parent:      3:4aaba1bfb426b8941bbf63f9dd52301152695164
-  manifest:    5:1109e42bdcbd1f51baa69bc91079011d77057dbb
+  parent:      4:e0ae8af3503af9bbffb0b29268a02744cc61a561
+  parent:      3:138bed2e14be415a2692b02e41405b2864f758b4
+  manifest:    5:1eabd5f5d4b985784cf2c45c717ff053eca14b0d
   user:        Foo Bar <foo.bar@example.com>
   date:        Sat Oct 10 08:00:04 2009 +0100
+  files:       renamed
   extra:       branch=source
   description:
   (octopus merge fixup)
   
   
   $ hg -R hg2hg log --debug -r tip
-  changeset:   5:6bd55e8269392769783345686faf7ff7b3b0215d
+  changeset:   5:3be2299ccd315ff9aab2b49bdb0d14e3244435e8
   branch:      source
   tag:         tip
   phase:       draft
-  parent:      4:1dc38c377bb35eeea4fa955056fbe4440d54a743
-  parent:      3:4aaba1bfb426b8941bbf63f9dd52301152695164
-  manifest:    4:daa315d56a98ba20811fdd0d9d575861f65cfa8c
+  parent:      4:e0ae8af3503af9bbffb0b29268a02744cc61a561
+  parent:      3:138bed2e14be415a2692b02e41405b2864f758b4
+  manifest:    4:3ece3c7f2cc6df15b3cbbf3273c69869fc7c3ab0
   user:        Foo Bar <foo.bar@example.com>
   date:        Sat Oct 10 08:00:04 2009 +0100
   extra:       branch=source
@@ -124,21 +201,25 @@
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  67109fdebf6c556eb0a9d5696dd98c8420520405 644   renamed
   $ hg -R source-hg manifest --debug -r 'tip^'
   cdf31ed9242b209cd94697112160e2c5b37a667d 644   file
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
 
   $ hg -R hg2hg manifest --debug -r tip
   cdf31ed9242b209cd94697112160e2c5b37a667d 644   file
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
   $ hg -R hg2hg manifest --debug -r 'tip^'
   cdf31ed9242b209cd94697112160e2c5b37a667d 644   file
   5108144f585149b29779d7c7e51d61dd22303ffe 644   file-branch1
   80753c4a9ac3806858405b96b24a907b309e3616 644   file-branch2
   7108421418404a937c684d2479a34a24d2ce4757 644   file-parent
+  27c968376d7c3afd095ecb9c7697919b933448c8 644   renamed
 
   $ cd ..
--- a/tests/test-convert-bzr-treeroot.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-bzr-treeroot.t	Mon Oct 22 14:46:06 2018 -0400
@@ -20,7 +20,7 @@
   $ echo content > file
   $ bzr add -q file
   $ bzr commit -q -m 'Initial add'
-  $ $PYTHON ../../treeset.py 'Changed root' new
+  $ "$PYTHON" ../../treeset.py 'Changed root' new
   $ cd ..
   $ hg convert source source-hg
   initializing destination source-hg repository
--- a/tests/test-convert-bzr.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-bzr.t	Mon Oct 22 14:46:06 2018 -0400
@@ -42,9 +42,9 @@
   1 Initial add: a, c, e
   0 rename a into b, create a, rename c into d
   $ glog -R source-hg
-  o  1@source "rename a into b, create a, rename c into d" files: a b c d e f
+  o  1@source "rename a into b, create a, rename c into d" files+: [b d f], files-: [c e], files: [a]
   |
-  o  0@source "Initial add: a, c, e" files: a c e
+  o  0@source "Initial add: a, c, e" files+: [a c e], files-: [], files: []
   
 
 manifest
@@ -64,7 +64,7 @@
   converting...
   0 Initial add: a, c, e
   $ glog -R source-1-hg
-  o  0@source "Initial add: a, c, e" files: a c e
+  o  0@source "Initial add: a, c, e" files+: [a c e], files-: [], files: []
   
 
 test with filemap
@@ -129,10 +129,10 @@
   $ bzr branch -q source source-improve
   $ cd source
   $ echo more >> a
-  $ $PYTHON ../helper.py 'Editing a' 100
+  $ "$PYTHON" ../helper.py 'Editing a' 100
   $ cd ../source-improve
   $ echo content3 >> b
-  $ $PYTHON ../helper.py 'Editing b' 200
+  $ "$PYTHON" ../helper.py 'Editing b' 200
   $ cd ../source
   $ bzr merge -q ../source-improve
   $ bzr commit -q -m 'Merged improve branch'
@@ -147,13 +147,13 @@
   1 Editing b
   0 Merged improve branch
   $ glog -R source-hg
-  o    3@source "Merged improve branch" files:
+  o    3@source "Merged improve branch" files+: [], files-: [], files: [b]
   |\
-  | o  2@source-improve "Editing b" files: b
+  | o  2@source-improve "Editing b" files+: [], files-: [], files: [b]
   | |
-  o |  1@source "Editing a" files: a
+  o |  1@source "Editing a" files+: [], files-: [], files: [a]
   |/
-  o  0@source "Initial add" files: a b
+  o  0@source "Initial add" files+: [a b], files-: [], files: []
   
   $ cd ..
 
@@ -250,13 +250,13 @@
   0 changea
   updating tags
   $ (cd repo-bzr; glog)
-  o  3@default "update tags" files: .hgtags
+  o  3@default "update tags" files+: [.hgtags], files-: [], files: []
   |
-  o  2@default "changea" files: a
+  o  2@default "changea" files+: [], files-: [], files: [a]
   |
-  | o  1@branch "addb" files: b
+  | o  1@branch "addb" files+: [b], files-: [], files: []
   |/
-  o  0@default "adda" files: a
+  o  0@default "adda" files+: [a], files-: [], files: []
   
 
 Test tags (converted identifiers are not stable because bzr ones are
--- a/tests/test-convert-clonebranches.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-clonebranches.t	Mon Oct 22 14:46:06 2018 -0400
@@ -42,7 +42,7 @@
 convert
 
   $ hg convert -v --config convert.hg.clonebranches=1 source dest |
-  >     $PYTHON filter.py
+  >     "$PYTHON" filter.py
   3 adda
   2 changea
   1 addb
@@ -75,7 +75,7 @@
 incremental conversion
 
   $ hg convert -v --config convert.hg.clonebranches=1 source dest |
-  >     $PYTHON filter.py
+  >     "$PYTHON" filter.py
   2 c1
   pulling from branch0 into branch1
   4 changesets found
--- a/tests/test-convert-filemap.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-filemap.t	Mon Oct 22 14:46:06 2018 -0400
@@ -317,7 +317,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 5 changesets, 7 total revisions
+  checked 5 changesets with 7 changes to 4 files
 
   $ hg -R renames.repo manifest --debug
   d43feacba7a4f1f2080dde4a4b985bd8a0236d46 644   copied2
@@ -780,7 +780,7 @@
   converting...
   0 3
   $ hg -R .-hg log -G -T '{shortest(node)} {desc}\n{files % "- {file}\n"}\n'
-  o    e9ed 3
+  o    bbfe 3
   |\
   | o  33a0 2
   | |  - f
--- a/tests/test-convert-git.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-git.t	Mon Oct 22 14:46:06 2018 -0400
@@ -420,7 +420,7 @@
   $ mkdir git-repo3
   $ cd git-repo3
   $ git init-db >/dev/null 2>/dev/null
-  $ $PYTHON -c 'import struct; open("b", "wb").write(b"".join([struct.Struct(">B").pack(i) for i in range(256)])*16)'
+  $ "$PYTHON" -c 'import struct; open("b", "wb").write(b"".join([struct.Struct(">B").pack(i) for i in range(256)])*16)'
   $ git add b
   $ commit -a -m addbinary
   $ cd ..
@@ -437,7 +437,7 @@
   $ cd git-repo3-hg
   $ hg up -C
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ $PYTHON -c 'from __future__ import print_function; print(len(open("b", "rb").read()))'
+  $ "$PYTHON" -c 'from __future__ import print_function; print(len(open("b", "rb").read()))'
   4096
   $ cd ..
 
--- a/tests/test-convert-hg-source.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-hg-source.t	Mon Oct 22 14:46:06 2018 -0400
@@ -130,7 +130,7 @@
   >          for i, l in enumerate(open(sys.argv[1], 'rb'))]
   > open(sys.argv[1], 'wb').write(b''.join(lines))
   > EOF
-  $ $PYTHON rewrite.py new/.hg/shamap
+  $ "$PYTHON" rewrite.py new/.hg/shamap
   $ cd orig
   $ hg up -qC 1
   $ echo foo >> foo
@@ -193,7 +193,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 5 changesets, 5 total revisions
+  checked 5 changesets with 5 changes to 3 files
 
 manifest -r 0
 
--- a/tests/test-convert-hg-svn.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-hg-svn.t	Mon Oct 22 14:46:06 2018 -0400
@@ -12,9 +12,9 @@
 
   $ SVNREPOPATH=`pwd`/svn-repo
 #if windows
-  $ SVNREPOURL=file:///`$PYTHON -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file:///`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
 #else
-  $ SVNREPOURL=file://`$PYTHON -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file://`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
 #endif
 
   $ svnadmin create "$SVNREPOPATH"
--- a/tests/test-convert-mtn.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-mtn.t	Mon Oct 22 14:46:06 2018 -0400
@@ -43,7 +43,7 @@
   $ mkdir dir
   $ echo b > dir/b
   $ echo d > dir/d
-  $ $PYTHON -c 'open("bin", "wb").write(b"a\\x00b")'
+  $ "$PYTHON" -c 'open("bin", "wb").write(b"a\\x00b") and None'
   $ echo c > c
   $ mtn add a dir/b dir/d c bin
   mtn: adding 'a' to workspace manifest
@@ -65,7 +65,7 @@
   $ echo b >> dir/b
   $ mtn drop c
   mtn: dropping 'c' from workspace manifest
-  $ $PYTHON -c 'open("bin", "wb").write(b"b\\x00c")'
+  $ "$PYTHON" -c 'open("bin", "wb").write(b"b\\x00c") and None'
   $ mtn ci -m update1
   mtn: beginning commit on branch 'com.selenic.test'
   mtn: committed revision 51d0a982464573a2a2cf5ee2c9219c652aaebeff
@@ -218,7 +218,7 @@
 test large file support (> 32kB)
 
   >>> fp = open('large-file', 'wb')
-  >>> for x in range(10000): fp.write(b'%d\n' % x)
+  >>> for x in range(10000): fp.write(b'%d\n' % x) and None
   >>> fp.close()
   $ md5sum.py large-file
   5d6de8a95c3b6bf9e0ffb808ba5299c1  large-file
--- a/tests/test-convert-p4-filetypes.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-p4-filetypes.t	Mon Oct 22 14:46:06 2018 -0400
@@ -52,7 +52,7 @@
   >          p4 add -t $T file_$T2
   >          ;;
   >       binary*)
-  >          $PYTHON -c "open('file_$T2', 'wb').write(b'this is $T')"
+  >          "$PYTHON" -c "open('file_$T2', 'wb').write(b'this is $T')"
   >          p4 add -t $T file_$T2
   >          ;;
   >       *)
--- a/tests/test-convert-svn-branches.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-svn-branches.t	Mon Oct 22 14:46:06 2018 -0400
@@ -85,8 +85,8 @@
   $ hg branches
   newbranch                     11:a6d7cc050ad1
   default                       10:6e2b33404495
-  old                            9:93c4b0f99529
-  old2                           8:b52884d7bead (inactive)
+  old                            9:1b494af68c0b
+  old2                           8:5be40b8dcbf6 (inactive)
   $ hg tags -q
   tip
   $ cd ..
--- a/tests/test-convert-svn-encoding.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-svn-encoding.t	Mon Oct 22 14:46:06 2018 -0400
@@ -52,6 +52,7 @@
   5 init projA
   source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@1
   converting: 0/6 revisions (0.00%)
+  reusing manifest from p1 (no file change)
   committing changelog
   updating the branch cache
   4 hello
@@ -118,6 +119,7 @@
   converting: 4/6 revisions (66.67%)
   reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
   scanning paths: /branches/branch\xc3\xa9 0/1 paths (0.00%) (esc)
+  reusing manifest from p1 (no file change)
   committing changelog
   updating the branch cache
   0 branch to branch?e
@@ -125,6 +127,7 @@
   converting: 5/6 revisions (83.33%)
   reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
   scanning paths: /branches/branch\xc3\xa9e 0/1 paths (0.00%) (esc)
+  reusing manifest from p1 (no file change)
   committing changelog
   updating the branch cache
   reparent to file:/*/$TESTTMP/svn-repo (glob)
--- a/tests/test-convert-svn-move.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-svn-move.t	Mon Oct 22 14:46:06 2018 -0400
@@ -9,9 +9,9 @@
   $ svnadmin load -q svn-repo < "$TESTDIR/svn/move.svndump"
   $ SVNREPOPATH=`pwd`/svn-repo
 #if windows
-  $ SVNREPOURL=file:///`$PYTHON -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file:///`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
 #else
-  $ SVNREPOURL=file://`$PYTHON -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file://`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
 #endif
 
 Convert trunk and branches
--- a/tests/test-convert-svn-sink.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-svn-sink.t	Mon Oct 22 14:46:06 2018 -0400
@@ -10,7 +10,7 @@
   >        if [ $2 -gt 0 ]; then
   >            limit="--limit=$2"
   >        fi
-  >        svn log --xml -v $limit | $PYTHON "$TESTDIR/svnxml.py"
+  >        svn log --xml -v $limit | "$PYTHON" "$TESTDIR/svnxml.py"
   >     )
   > }
 
--- a/tests/test-convert-svn-source.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert-svn-source.t	Mon Oct 22 14:46:06 2018 -0400
@@ -14,9 +14,9 @@
   $ svnadmin create svn-repo
   $ SVNREPOPATH=`pwd`/svn-repo
 #if windows
-  $ SVNREPOURL=file:///`$PYTHON -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file:///`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
 #else
-  $ SVNREPOURL=file://`$PYTHON -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file://`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
 #endif
   $ INVALIDREVISIONID=svn:x2147622-4a9f-4db4-a8d3-13562ff547b2/proj%20B/mytrunk@1
   $ VALIDREVISIONID=svn:a2147622-4a9f-4db4-a8d3-13562ff547b2/proj%20B/mytrunk/mytrunk@1
--- a/tests/test-convert.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-convert.t	Mon Oct 22 14:46:06 2018 -0400
@@ -446,7 +446,7 @@
   $ chmod 000 bogusdir
 
   $ hg convert a bogusdir
-  abort: Permission denied: 'bogusdir'
+  abort: Permission denied: *bogusdir* (glob)
   [255]
 
 user permissions should succeed
@@ -482,7 +482,7 @@
 override $PATH to ensure p4 not visible; use $PYTHON in case we're
 running from a devel copy, not a temp installation
 
-  $ PATH="$BINDIR" $PYTHON "$BINDIR"/hg convert emptydir
+  $ PATH="$BINDIR" "$PYTHON" "$BINDIR"/hg convert emptydir
   assuming destination emptydir-hg
   initializing destination emptydir-hg repository
   emptydir does not look like a CVS checkout
@@ -533,9 +533,11 @@
 
 test bogus URL
 
+#if no-msys
   $ hg convert -q bzr+ssh://foobar@selenic.com/baz baz
   abort: bzr+ssh://foobar@selenic.com/baz: missing or unsupported repository
   [255]
+#endif
 
 test revset converted() lookup
 
--- a/tests/test-copy-move-merge.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-copy-move-merge.t	Mon Oct 22 14:46:06 2018 -0400
@@ -88,7 +88,8 @@
   > c
   > EOF
   rebasing 2:add3f11052fa "other" (tip)
-  other [source] changed a which local [dest] deleted
+  file 'a' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
 
   $ cat b
--- a/tests/test-copy.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-copy.t	Mon Oct 22 14:46:06 2018 -0400
@@ -101,7 +101,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 2 files
 
   $ cd ..
 
@@ -148,6 +148,7 @@
 copy --after to a nonexistent target filename
   $ hg cp -A foo dummy
   foo: not recording copy - dummy does not exist
+  [1]
 
 dry-run; should show that foo is clean
   $ hg copy --dry-run foo bar
@@ -224,12 +225,14 @@
 Trying to copy on top of an existing file fails,
   $ hg copy -A bar foo
   foo: not overwriting - file already committed
-  (hg copy --after --force to replace the file by recording a copy)
+  ('hg copy --after --force' to replace the file by recording a copy)
+  [1]
 same error without the --after, so the user doesn't have to go through
 two hints:
   $ hg copy bar foo
   foo: not overwriting - file already committed
-  (hg copy --force to replace the file by recording a copy)
+  ('hg copy --force' to replace the file by recording a copy)
+  [1]
 but it's considered modified after a copy --after --force
   $ hg copy -Af bar foo
   $ hg st -AC foo
@@ -240,6 +243,7 @@
   $ touch xyzzy
   $ hg cp bar xyzzy
   xyzzy: not overwriting - file exists
-  (hg copy --after to record the copy)
+  ('hg copy --after' to record the copy)
+  [1]
 
   $ cd ..
--- a/tests/test-copytrace-heuristics.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-copytrace-heuristics.t	Mon Oct 22 14:46:06 2018 -0400
@@ -86,7 +86,8 @@
 
   $ hg rebase -s . -d 1
   rebasing 2:d526312210b9 "mode a" (tip)
-  other [source] changed a which local [dest] deleted
+  file 'a' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   unresolved conflicts (see hg resolve, then hg rebase --continue)
   [1]
@@ -242,7 +243,8 @@
   $ hg rebase -s 2 -d 1 --config experimental.copytrace.movecandidateslimit=0
   rebasing 2:ef716627c70b "mod a" (tip)
   skipping copytracing for 'a', more candidates than the limit: 7
-  other [source] changed a which local [dest] deleted
+  file 'a' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   unresolved conflicts (see hg resolve, then hg rebase --continue)
   [1]
@@ -697,7 +699,8 @@
 
   $ hg rebase -s 8b6e13696 -d .
   rebasing 1:8b6e13696c38 "added more things to a"
-  other [source] changed a which local [dest] deleted
+  file 'a' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   unresolved conflicts (see hg resolve, then hg rebase --continue)
   [1]
--- a/tests/test-debugcommands.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-debugcommands.t	Mon Oct 22 14:46:06 2018 -0400
@@ -15,6 +15,39 @@
   adding a
   $ hg ci -Am make-it-full
 #if reporevlogstore
+  $ hg debugrevlog -c
+  format : 1
+  flags  : inline
+  
+  revisions     :   3
+      merges    :   0 ( 0.00%)
+      normal    :   3 (100.00%)
+  revisions     :   3
+      empty     :   0 ( 0.00%)
+                     text  :   0 (100.00%)
+                     delta :   0 (100.00%)
+      snapshot  :   3 (100.00%)
+        lvl-0   :         3 (100.00%)
+      deltas    :   0 ( 0.00%)
+  revision size : 191
+      snapshot  : 191 (100.00%)
+        lvl-0   :       191 (100.00%)
+      deltas    :   0 ( 0.00%)
+  
+  chunks        :   3
+      0x75 (u)  :   3 (100.00%)
+  chunks size   : 191
+      0x75 (u)  : 191 (100.00%)
+  
+  avg chain length  :  0
+  max chain length  :  0
+  max chain reach   : 67
+  compression ratio :  0
+  
+  uncompressed data size (min/max/avg) : 57 / 66 / 62
+  full revision size (min/max/avg)     : 58 / 67 / 63
+  inter-snapshot size (min/max/avg)    : 0 / 0 / 0
+  delta size (min/max/avg)             : 0 / 0 / 0
   $ hg debugrevlog -m
   format : 1
   flags  : inline, generaldelta
@@ -23,10 +56,15 @@
       merges    :  0 ( 0.00%)
       normal    :  3 (100.00%)
   revisions     :  3
-      full      :  3 (100.00%)
+      empty     :  1 (33.33%)
+                     text  :  1 (100.00%)
+                     delta :  0 ( 0.00%)
+      snapshot  :  2 (66.67%)
+        lvl-0   :        2 (66.67%)
       deltas    :  0 ( 0.00%)
   revision size : 88
-      full      : 88 (100.00%)
+      snapshot  : 88 (100.00%)
+        lvl-0   :       88 (100.00%)
       deltas    :  0 ( 0.00%)
   
   chunks        :  3
@@ -42,42 +80,120 @@
   compression ratio :  0
   
   uncompressed data size (min/max/avg) : 0 / 43 / 28
-  full revision size (min/max/avg)     : 0 / 44 / 29
+  full revision size (min/max/avg)     : 44 / 44 / 44
+  inter-snapshot size (min/max/avg)    : 0 / 0 / 0
+  delta size (min/max/avg)             : 0 / 0 / 0
+  $ hg debugrevlog a
+  format : 1
+  flags  : inline, generaldelta
+  
+  revisions     : 1
+      merges    : 0 ( 0.00%)
+      normal    : 1 (100.00%)
+  revisions     : 1
+      empty     : 0 ( 0.00%)
+                     text  : 0 (100.00%)
+                     delta : 0 (100.00%)
+      snapshot  : 1 (100.00%)
+        lvl-0   :       1 (100.00%)
+      deltas    : 0 ( 0.00%)
+  revision size : 3
+      snapshot  : 3 (100.00%)
+        lvl-0   :       3 (100.00%)
+      deltas    : 0 ( 0.00%)
+  
+  chunks        : 1
+      0x75 (u)  : 1 (100.00%)
+  chunks size   : 3
+      0x75 (u)  : 3 (100.00%)
+  
+  avg chain length  : 0
+  max chain length  : 0
+  max chain reach   : 3
+  compression ratio : 0
+  
+  uncompressed data size (min/max/avg) : 2 / 2 / 2
+  full revision size (min/max/avg)     : 3 / 3 / 3
+  inter-snapshot size (min/max/avg)    : 0 / 0 / 0
   delta size (min/max/avg)             : 0 / 0 / 0
 #endif
 
 Test debugindex, with and without the --verbose/--debug flag
-  $ hg debugindex a
+  $ hg debugrevlogindex a
      rev linkrev nodeid       p1           p2
        0       0 b789fdd96dc2 000000000000 000000000000
 
 #if no-reposimplestore
-  $ hg --verbose debugindex a
+  $ hg --verbose debugrevlogindex a
      rev    offset  length linkrev nodeid       p1           p2
        0         0       3       0 b789fdd96dc2 000000000000 000000000000
 
-  $ hg --debug debugindex a
+  $ hg --debug debugrevlogindex a
      rev    offset  length linkrev nodeid                                   p1                                       p2
        0         0       3       0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
 #endif
 
-  $ hg debugindex -f 1 a
+  $ hg debugrevlogindex -f 1 a
      rev flag     size   link     p1     p2       nodeid
        0 0000        2      0     -1     -1 b789fdd96dc2
 
 #if no-reposimplestore
-  $ hg --verbose debugindex -f 1 a
+  $ hg --verbose debugrevlogindex -f 1 a
      rev flag   offset   length     size   link     p1     p2       nodeid
        0 0000        0        3        2      0     -1     -1 b789fdd96dc2
 
-  $ hg --debug debugindex -f 1 a
+  $ hg --debug debugrevlogindex -f 1 a
      rev flag   offset   length     size   link     p1     p2                                   nodeid
        0 0000        0        3        2      0     -1     -1 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
 #endif
 
+  $ hg debugindex -c
+     rev linkrev nodeid       p1           p2
+       0       0 07f494440405 000000000000 000000000000
+       1       1 8cccb4b5fec2 07f494440405 000000000000
+       2       2 b1e228c512c5 8cccb4b5fec2 000000000000
+  $ hg debugindex -c --debug
+     rev linkrev nodeid                                   p1                                       p2
+       0       0 07f4944404050f47db2e5c5071e0e84e7a27bba9 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+       1       1 8cccb4b5fec20cafeb99dd01c26d4dee8ea4388a 07f4944404050f47db2e5c5071e0e84e7a27bba9 0000000000000000000000000000000000000000
+       2       2 b1e228c512c5d7066d70562ed839c3323a62d6d2 8cccb4b5fec20cafeb99dd01c26d4dee8ea4388a 0000000000000000000000000000000000000000
+  $ hg debugindex -m
+     rev linkrev nodeid       p1           p2
+       0       0 a0c8bcbbb45c 000000000000 000000000000
+       1       1 57faf8a737ae a0c8bcbbb45c 000000000000
+       2       2 a35b10320954 57faf8a737ae 000000000000
+  $ hg debugindex -m --debug
+     rev linkrev nodeid                                   p1                                       p2
+       0       0 a0c8bcbbb45c63b90b70ad007bf38961f64f2af0 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+       1       1 57faf8a737ae7faf490582941a82319ba6529dca a0c8bcbbb45c63b90b70ad007bf38961f64f2af0 0000000000000000000000000000000000000000
+       2       2 a35b103209548032201c16c7688cb2657f037a38 57faf8a737ae7faf490582941a82319ba6529dca 0000000000000000000000000000000000000000
+  $ hg debugindex a
+     rev linkrev nodeid       p1           p2
+       0       0 b789fdd96dc2 000000000000 000000000000
+  $ hg debugindex --debug a
+     rev linkrev nodeid                                   p1                                       p2
+       0       0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+
 debugdelta chain basic output
 
-#if reporevlogstore
+#if reporevlogstore pure
+  $ hg debugindexstats
+  abort: debugindexstats only works with native code
+  [255]
+#endif
+#if reporevlogstore no-pure
+  $ hg debugindexstats
+  node trie capacity: 4
+  node trie count: 2
+  node trie depth: 1
+  node trie last rev scanned: -1
+  node trie lookups: 4
+  node trie misses: 1
+  node trie splits: 1
+  revs in memory: 3
+#endif
+
+#if reporevlogstore no-pure
   $ hg debugdeltachain -m
       rev  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
         0       1        1       -1    base         44         43         44   1.02326        44         0    0.00000
@@ -94,7 +210,8 @@
    {
     "chainid": 1,
     "chainlen": 1,
-    "chainratio": 1.02325581395,
+    "chainratio": 1.02325581395, (no-py3 !)
+    "chainratio": 1.0232558139534884, (py3 !)
     "chainsize": 44,
     "compsize": 44,
     "deltatype": "base",
@@ -122,7 +239,8 @@
    {
     "chainid": 3,
     "chainlen": 1,
-    "chainratio": 1.02325581395,
+    "chainratio": 1.02325581395, (no-py3 !)
+    "chainratio": 1.0232558139534884, (py3 !)
     "chainsize": 44,
     "compsize": 44,
     "deltatype": "base",
@@ -157,7 +275,8 @@
    {
     "chainid": 1,
     "chainlen": 1,
-    "chainratio": 1.02325581395,
+    "chainratio": 1.02325581395, (no-py3 !)
+    "chainratio": 1.0232558139534884, (py3 !)
     "chainsize": 44,
     "compsize": 44,
     "deltatype": "base",
@@ -193,7 +312,8 @@
    {
     "chainid": 3,
     "chainlen": 1,
-    "chainratio": 1.02325581395,
+    "chainratio": 1.02325581395, (no-py3 !)
+    "chainratio": 1.0232558139534884, (py3 !)
     "chainsize": 44,
     "compsize": 44,
     "deltatype": "base",
@@ -411,6 +531,7 @@
   $ ls -r .hg/cache/*
   .hg/cache/rbc-revs-v1
   .hg/cache/rbc-names-v1
+  .hg/cache/manifestfulltextcache (reporevlogstore !)
   .hg/cache/branch2-served
 
 Test debugcolor
@@ -432,29 +553,31 @@
 
   $ cat > debugstacktrace.py << EOF
   > from __future__ import absolute_import
-  > import sys
-  > from mercurial import util
+  > from mercurial import (
+  >     pycompat,
+  >     util,
+  > )
   > def f():
-  >     util.debugstacktrace(f=sys.stdout)
+  >     util.debugstacktrace(f=pycompat.stdout)
   >     g()
   > def g():
-  >     util.dst('hello from g\\n', skip=1)
+  >     util.dst(b'hello from g\\n', skip=1)
   >     h()
   > def h():
-  >     util.dst('hi ...\\nfrom h hidden in g', 1, depth=2)
+  >     util.dst(b'hi ...\\nfrom h hidden in g', 1, depth=2)
   > f()
   > EOF
-  $ $PYTHON debugstacktrace.py
+  $ "$PYTHON" debugstacktrace.py
   stacktrace at:
-   debugstacktrace.py:12 in * (glob)
-   debugstacktrace.py:5  in f
+   debugstacktrace.py:14 in * (glob)
+   debugstacktrace.py:7  in f
   hello from g at:
-   debugstacktrace.py:12 in * (glob)
-   debugstacktrace.py:6  in f
+   debugstacktrace.py:14 in * (glob)
+   debugstacktrace.py:8  in f
   hi ...
   from h hidden in g at:
-   debugstacktrace.py:6 in f
-   debugstacktrace.py:9 in g
+   debugstacktrace.py:8  in f
+   debugstacktrace.py:11 in g
 
 Test debugcapabilities command:
 
@@ -508,8 +631,8 @@
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
-  remote: 413
-  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  remote: 427
+  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
   devel-peer-request:   caps: * bytes (glob)
--- a/tests/test-debugindexdot.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-debugindexdot.t	Mon Oct 22 14:46:06 2018 -0400
@@ -13,6 +13,24 @@
   $ HGMERGE=true hg merge -q
   $ hg ci -m merge -d '3 0'
 
+  $ hg debugindexdot -c
+  digraph G {
+  	-1 -> 0
+  	0 -> 1
+  	0 -> 2
+  	2 -> 3
+  	1 -> 3
+  }
+
+  $ hg debugindexdot -m
+  digraph G {
+  	-1 -> 0
+  	0 -> 1
+  	0 -> 2
+  	2 -> 3
+  	1 -> 3
+  }
+
   $ hg debugindexdot a
   digraph G {
   	-1 -> 0
--- a/tests/test-devel-warnings.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-devel-warnings.t	Mon Oct 22 14:46:06 2018 -0400
@@ -342,7 +342,7 @@
   $ cat << EOF > ${TESTTMP}/buggyconfig.py
   > """A small extension that tests our developer warnings for config"""
   > 
-  > from mercurial import registrar, configitems
+  > from mercurial import configitems, registrar
   > 
   > cmdtable = {}
   > command = registrar.command(cmdtable)
--- a/tests/test-diff-binary-file.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-diff-binary-file.t	Mon Oct 22 14:46:06 2018 -0400
@@ -83,7 +83,7 @@
   > path = sys.argv[1]
   > open(path, 'wb').write(b'\x00\x01\x02\x03')
   > EOF
-  $ $PYTHON writebin.py binfile.bin
+  $ "$PYTHON" writebin.py binfile.bin
   $ hg add binfile.bin
   $ hg ci -m 'add binfile.bin'
 
--- a/tests/test-diff-color.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-diff-color.t	Mon Oct 22 14:46:06 2018 -0400
@@ -22,7 +22,7 @@
   > c
   > EOF
   $ hg ci -Am adda
-  adding a
+  \x1b[0;32madding a\x1b[0m (esc)
   $ cat > a <<EOF
   > c
   > c
@@ -57,7 +57,7 @@
   >>> with open('a', 'rb') as f:
   ...     data = f.read()
   >>> with open('a', 'wb') as f:
-  ...     f.write(data.replace('dd', 'dd \r'))
+  ...     f.write(data.replace(b'dd', b'dd \r')) and None
   $ hg diff --nodates
   \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc)
   \x1b[0;31;1m--- a/a\x1b[0m (esc)
@@ -218,7 +218,7 @@
   $ hg init sub
   $ echo b > sub/b
   $ hg -R sub commit -Am 'create sub'
-  adding b
+  \x1b[0;32madding b\x1b[0m (esc)
   $ echo 'sub = sub' > .hgsub
   $ hg add .hgsub
   $ hg commit -m 'add subrepo sub'
@@ -396,12 +396,12 @@
 
 multibyte character shouldn't be broken up in word diff:
 
-  $ $PYTHON <<'EOF'
+  $ "$PYTHON" <<'EOF'
   > with open("utf8", "wb") as f:
   >     f.write(b"blah \xe3\x82\xa2 blah\n")
   > EOF
   $ hg ci -Am 'add utf8 char' utf8
-  $ $PYTHON <<'EOF'
+  $ "$PYTHON" <<'EOF'
   > with open("utf8", "wb") as f:
   >     f.write(b"blah \xe3\x82\xa4 blah\n")
   > EOF
--- a/tests/test-diff-newlines.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-diff-newlines.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,6 +1,6 @@
   $ hg init
 
-  $ $PYTHON -c 'open("a", "wb").write(b"confuse str.splitlines\nembedded\rnewline\n")'
+  $ "$PYTHON" -c 'open("a", "wb").write(b"confuse str.splitlines\nembedded\rnewline\n")'
   $ hg ci -Ama -d '1 0'
   adding a
 
--- a/tests/test-diff-upgrade.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-diff-upgrade.t	Mon Oct 22 14:46:06 2018 -0400
@@ -16,7 +16,7 @@
 
   $ echo regular > regular
   $ echo rmregular > rmregular
-  $ $PYTHON -c "open('bintoregular', 'wb').write(b'\0')"
+  $ "$PYTHON" -c "open('bintoregular', 'wb').write(b'\0')"
   $ touch rmempty
   $ echo exec > exec
   $ chmod +x exec
@@ -26,7 +26,7 @@
   $ echo unsetexec > unsetexec
   $ chmod +x unsetexec
   $ echo binary > binary
-  $ $PYTHON -c "open('rmbinary', 'wb').write(b'\0')"
+  $ "$PYTHON" -c "open('rmbinary', 'wb').write(b'\0')"
   $ hg ci -Am addfiles
   adding binary
   adding bintoregular
@@ -50,8 +50,8 @@
   $ rm rmexec
   $ chmod +x setexec
   $ chmod -x unsetexec
-  $ $PYTHON -c "open('binary', 'wb').write(b'\0\0')"
-  $ $PYTHON -c "open('newbinary', 'wb').write(b'\0')"
+  $ "$PYTHON" -c "open('binary', 'wb').write(b'\0\0')"
+  $ "$PYTHON" -c "open('newbinary', 'wb').write(b'\0')"
   $ rm rmbinary
   $ hg addremove -s 0
   adding newbinary
--- a/tests/test-dirstate-race.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-dirstate-race.t	Mon Oct 22 14:46:06 2018 -0400
@@ -57,7 +57,7 @@
   >     extensions.wrapfunction(context.workingctx, '_checklookup', overridechecklookup)
   > def overridechecklookup(orig, self, files):
   >     # make an update that changes the dirstate from underneath
-  >     self._repo.ui.system(r"sh '$TESTTMP/dirstaterace.sh'",
+  >     self._repo.ui.system(br"sh '$TESTTMP/dirstaterace.sh'",
   >                          cwd=self._repo.root)
   >     return orig(self, files)
   > EOF
--- a/tests/test-dispatch.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-dispatch.t	Mon Oct 22 14:46:06 2018 -0400
@@ -26,6 +26,7 @@
       --decode              apply any matching decode filter
    -I --include PATTERN [+] include names matching the given patterns
    -X --exclude PATTERN [+] exclude names matching the given patterns
+   -T --template TEMPLATE   display with template
   
   (use 'hg cat -h' to show more help)
   [255]
--- a/tests/test-duplicateoptions.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-duplicateoptions.py	Mon Oct 22 14:46:06 2018 -0400
@@ -8,6 +8,12 @@
 
 ignore = {b'highlight', b'win32text', b'factotum', b'beautifygraph'}
 
+try:
+    import sqlite3
+    del sqlite3 # unused, just checking that import works
+except ImportError:
+    ignore.add(b'sqlitestore')
+
 if os.name != 'nt':
     ignore.add(b'win32mbcs')
 
--- a/tests/test-empty.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-empty.t	Mon Oct 22 14:46:06 2018 -0400
@@ -14,7 +14,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  0 files, 0 changesets, 0 total revisions
+  checked 0 changesets with 0 changes to 0 files
 
 Check the basic files created:
 
@@ -39,7 +39,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  0 files, 0 changesets, 0 total revisions
+  checked 0 changesets with 0 changes to 0 files
   $ ls .hg
   00changelog.i
   hgrc
--- a/tests/test-encoding-align.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-encoding-align.t	Mon Oct 22 14:46:06 2018 -0400
@@ -4,7 +4,7 @@
   $ export HGENCODING
   $ hg init t
   $ cd t
-  $ $PYTHON << EOF
+  $ "$PYTHON" << EOF
   > # (byte, width) = (6, 4)
   > s = b"\xe7\x9f\xad\xe5\x90\x8d"
   > # (byte, width) = (7, 7): odd width is good for alignment test
--- a/tests/test-encoding.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-encoding.t	Mon Oct 22 14:46:06 2018 -0400
@@ -10,11 +10,11 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 1 files
-  new changesets 1e78a93102a3:0e5b7e3f9c4a
+  new changesets 1e78a93102a3:0e5b7e3f9c4a (2 drafts)
   (run 'hg update' to get a working copy)
   $ hg co
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ $PYTHON << EOF
+  $ "$PYTHON" << EOF
   > f = open('latin-1', 'wb'); f.write(b"latin-1 e' encoded: \xe9"); f.close()
   > f = open('utf-8', 'wb'); f.write(b"utf-8 e' encoded: \xc3\xa9"); f.close()
   > f = open('latin-1-tag', 'wb'); f.write(b"\xe9"); f.close()
@@ -278,9 +278,10 @@
 
 #if hypothesis
 
-  >>> from hypothesishelpers import *
+  >>> import hypothesishelpers
   >>> from mercurial import encoding
-  >>> roundtrips(st.binary(), encoding.fromutf8b, encoding.toutf8b)
+  >>> hypothesishelpers.roundtrips(hypothesishelpers.st.binary(),
+  ...                              encoding.fromutf8b, encoding.toutf8b)
   Round trip OK
 
 #endif
--- a/tests/test-eol.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-eol.t	Mon Oct 22 14:46:06 2018 -0400
@@ -73,7 +73,7 @@
   >     echo '% a.txt'
   >     cat a.txt
   >     hg diff
-  >     $PYTHON ../switch-eol.py $1 a.txt
+  >     "$PYTHON" ../switch-eol.py $1 a.txt
   >     echo '% hg diff only reports a single changed line:'
   >     hg diff
   >     echo "% reverting back to $1 format"
--- a/tests/test-excessive-merge.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-excessive-merge.t	Mon Oct 22 14:46:06 2018 -0400
@@ -98,4 +98,4 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 5 changesets, 4 total revisions
+  checked 5 changesets with 4 changes to 2 files
--- a/tests/test-exchange-obsmarkers-case-A1.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-exchange-obsmarkers-case-A1.t	Mon Oct 22 14:46:06 2018 -0400
@@ -103,7 +103,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   1 new obsolescence markers
-  new changesets f5bc6836db60
+  new changesets f5bc6836db60 (1 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
@@ -145,7 +145,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   1 new obsolescence markers
-  new changesets f5bc6836db60
+  new changesets f5bc6836db60 (1 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
@@ -246,7 +246,7 @@
   adding file changes
   added 2 changesets with 2 changes to 2 files
   1 new obsolescence markers
-  new changesets f5bc6836db60:f6fbb35d8ac9
+  new changesets f5bc6836db60:f6fbb35d8ac9 (2 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
@@ -288,7 +288,7 @@
   adding file changes
   added 2 changesets with 2 changes to 2 files
   1 new obsolescence markers
-  new changesets f5bc6836db60:f6fbb35d8ac9
+  new changesets f5bc6836db60:f6fbb35d8ac9 (2 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
--- a/tests/test-exchange-obsmarkers-case-A2.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-exchange-obsmarkers-case-A2.t	Mon Oct 22 14:46:06 2018 -0400
@@ -111,7 +111,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   1 new obsolescence markers
-  new changesets f5bc6836db60
+  new changesets f5bc6836db60 (1 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
--- a/tests/test-exchange-obsmarkers-case-A3.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-exchange-obsmarkers-case-A3.t	Mon Oct 22 14:46:06 2018 -0400
@@ -131,7 +131,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   1 new obsolescence markers
-  new changesets e5ea8f9c7314
+  new changesets e5ea8f9c7314 (1 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
@@ -238,7 +238,7 @@
   1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
-  new changesets e5ea8f9c7314
+  new changesets e5ea8f9c7314 (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   ## post pull state
   # obstore: main
--- a/tests/test-exchange-obsmarkers-case-A4.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-exchange-obsmarkers-case-A4.t	Mon Oct 22 14:46:06 2018 -0400
@@ -118,7 +118,7 @@
   adding file changes
   added 2 changesets with 2 changes to 2 files
   1 new obsolescence markers
-  new changesets 28b51eb45704:06055a7959d4
+  new changesets 28b51eb45704:06055a7959d4 (2 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
--- a/tests/test-exchange-obsmarkers-case-A5.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-exchange-obsmarkers-case-A5.t	Mon Oct 22 14:46:06 2018 -0400
@@ -126,7 +126,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   1 new obsolescence markers
-  new changesets f6298a8ac3a4
+  new changesets f6298a8ac3a4 (1 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
--- a/tests/test-exchange-obsmarkers-case-B3.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-exchange-obsmarkers-case-B3.t	Mon Oct 22 14:46:06 2018 -0400
@@ -106,7 +106,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets f5bc6836db60
+  new changesets f5bc6836db60 (1 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
--- a/tests/test-exchange-obsmarkers-case-B5.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-exchange-obsmarkers-case-B5.t	Mon Oct 22 14:46:06 2018 -0400
@@ -138,7 +138,7 @@
   adding file changes
   added 3 changesets with 3 changes to 3 files
   1 new obsolescence markers
-  new changesets 28b51eb45704:1d0f3cd25300
+  new changesets 28b51eb45704:1d0f3cd25300 (3 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
--- a/tests/test-exchange-obsmarkers-case-C2.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-exchange-obsmarkers-case-C2.t	Mon Oct 22 14:46:06 2018 -0400
@@ -119,7 +119,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   2 new obsolescence markers
-  new changesets e5ea8f9c7314
+  new changesets e5ea8f9c7314 (1 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
@@ -167,7 +167,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   2 new obsolescence markers
-  new changesets e5ea8f9c7314
+  new changesets e5ea8f9c7314 (1 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
--- a/tests/test-exchange-obsmarkers-case-D1.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-exchange-obsmarkers-case-D1.t	Mon Oct 22 14:46:06 2018 -0400
@@ -118,7 +118,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   2 new obsolescence markers
-  new changesets e5ea8f9c7314
+  new changesets e5ea8f9c7314 (1 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
@@ -166,7 +166,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   2 new obsolescence markers
-  new changesets e5ea8f9c7314
+  new changesets e5ea8f9c7314 (1 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
--- a/tests/test-exchange-obsmarkers-case-D4.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-exchange-obsmarkers-case-D4.t	Mon Oct 22 14:46:06 2018 -0400
@@ -126,7 +126,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   2 new obsolescence markers
-  new changesets e5ea8f9c7314
+  new changesets e5ea8f9c7314 (1 drafts)
   (run 'hg update' to get a working copy)
   ## post pull state
   # obstore: main
--- a/tests/test-extdiff.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-extdiff.t	Mon Oct 22 14:46:06 2018 -0400
@@ -265,7 +265,7 @@
 
 #if windows
   $ cat > 'diff tool.bat' << EOF
-  > @$PYTHON "`pwd`/diff tool.py"
+  > @"$PYTHON" "`pwd`/diff tool.py"
   > EOF
   $ hg extdiff -p "`pwd`/diff tool.bat"
   [1]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-extension-timing.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,96 @@
+Test basic extension support
+
+  $ cat > foobar.py <<EOF
+  > import os
+  > from mercurial import commands, registrar
+  > cmdtable = {}
+  > command = registrar.command(cmdtable)
+  > configtable = {}
+  > configitem = registrar.configitem(configtable)
+  > configitem(b'tests', b'foo', default=b"Foo")
+  > def uisetup(ui):
+  >     ui.debug(b"uisetup called [debug]\\n")
+  >     ui.write(b"uisetup called\\n")
+  >     ui.status(b"uisetup called [status]\\n")
+  >     ui.flush()
+  > def reposetup(ui, repo):
+  >     ui.write(b"reposetup called for %s\\n" % os.path.basename(repo.root))
+  >     ui.write(b"ui %s= repo.ui\\n" % (ui == repo.ui and b"=" or b"!"))
+  >     ui.flush()
+  > @command(b'foo', [], b'hg foo')
+  > def foo(ui, *args, **kwargs):
+  >     foo = ui.config(b'tests', b'foo')
+  >     ui.write(foo)
+  >     ui.write(b"\\n")
+  > @command(b'bar', [], b'hg bar', norepo=True)
+  > def bar(ui, *args, **kwargs):
+  >     ui.write(b"Bar\\n")
+  > EOF
+  $ abspath=`pwd`/foobar.py
+
+  $ mkdir barfoo
+  $ cp foobar.py barfoo/__init__.py
+  $ barfoopath=`pwd`/barfoo
+
+  $ hg init a
+  $ cd a
+  $ echo foo > file
+  $ hg add file
+  $ hg commit -m 'add file'
+
+  $ echo '[extensions]' >> $HGRCPATH
+  $ echo "foobar = $abspath" >> $HGRCPATH
+
+Test extension setup timings
+
+  $ hg foo --traceback --config devel.debug.extensions=yes --debug 2>&1
+  debug.extensions: loading extensions
+  debug.extensions: - processing 1 entries
+  debug.extensions:   - loading extension: 'foobar'
+  debug.extensions:   > 'foobar' extension loaded in * (glob)
+  debug.extensions:     - validating extension tables: 'foobar'
+  debug.extensions:     - invoking registered callbacks: 'foobar'
+  debug.extensions:     > callbacks completed in * (glob)
+  debug.extensions: > loaded 1 extensions, total time * (glob)
+  debug.extensions: - loading configtable attributes
+  debug.extensions: - executing uisetup hooks
+  debug.extensions:   - running uisetup for 'foobar'
+  uisetup called [debug]
+  uisetup called
+  uisetup called [status]
+  debug.extensions:   > uisetup for 'foobar' took * (glob)
+  debug.extensions: > all uisetup took * (glob)
+  debug.extensions: - executing extsetup hooks
+  debug.extensions:   - running extsetup for 'foobar'
+  debug.extensions:   > extsetup for 'foobar' took * (glob)
+  debug.extensions: > all extsetup took * (glob)
+  debug.extensions: - executing remaining aftercallbacks
+  debug.extensions: > remaining aftercallbacks completed in * (glob)
+  debug.extensions: - loading extension registration objects
+  debug.extensions: > extension registration object loading took * (glob)
+  debug.extensions: > extension foobar take a total of * to load (glob)
+  debug.extensions: extension loading complete
+  debug.extensions: loading additional extensions
+  debug.extensions: - processing 1 entries
+  debug.extensions: > loaded 0 extensions, total time * (glob)
+  debug.extensions: - loading configtable attributes
+  debug.extensions: - executing uisetup hooks
+  debug.extensions: > all uisetup took * (glob)
+  debug.extensions: - executing extsetup hooks
+  debug.extensions: > all extsetup took * (glob)
+  debug.extensions: - executing remaining aftercallbacks
+  debug.extensions: > remaining aftercallbacks completed in * (glob)
+  debug.extensions: - loading extension registration objects
+  debug.extensions: > extension registration object loading took * (glob)
+  debug.extensions: extension loading complete
+  debug.extensions: - executing reposetup hooks
+  debug.extensions:   - running reposetup for foobar
+  reposetup called for a
+  ui == repo.ui
+  debug.extensions:   > reposetup for 'foobar' took * (glob)
+  debug.extensions: > all reposetup took * (glob)
+  Foo
+
+  $ cd ..
+
+  $ echo 'foobar = !' >> $HGRCPATH
--- a/tests/test-extension.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-extension.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,4 +1,15 @@
 Test basic extension support
+  $ cat > unflush.py <<EOF
+  > import sys
+  > from mercurial import pycompat
+  > if pycompat.ispy3:
+  >     # no changes required
+  >     sys.exit(0)
+  > with open(sys.argv[1], 'rb') as f:
+  >     data = f.read()
+  > with open(sys.argv[1], 'wb') as f:
+  >     f.write(data.replace(b', flush=True', b''))
+  > EOF
 
   $ cat > foobar.py <<EOF
   > import os
@@ -95,26 +106,29 @@
 Check that extensions are loaded in phases:
 
   $ cat > foo.py <<EOF
+  > from __future__ import print_function
   > import os
   > name = os.path.basename(__file__).rsplit('.', 1)[0]
-  > print("1) %s imported" % name)
+  > print("1) %s imported" % name, flush=True)
   > def uisetup(ui):
-  >     print("2) %s uisetup" % name)
+  >     print("2) %s uisetup" % name, flush=True)
   > def extsetup():
-  >     print("3) %s extsetup" % name)
+  >     print("3) %s extsetup" % name, flush=True)
   > def reposetup(ui, repo):
-  >    print("4) %s reposetup" % name)
+  >    print("4) %s reposetup" % name, flush=True)
   > 
+  > bytesname = name.encode('utf-8')
   > # custom predicate to check registration of functions at loading
   > from mercurial import (
   >     registrar,
   >     smartset,
   > )
   > revsetpredicate = registrar.revsetpredicate()
-  > @revsetpredicate(name, safe=True) # safe=True for query via hgweb
+  > @revsetpredicate(bytesname, safe=True) # safe=True for query via hgweb
   > def custompredicate(repo, subset, x):
   >     return smartset.baseset([r for r in subset if r in {0}])
   > EOF
+  $ $PYTHON $TESTTMP/unflush.py foo.py
 
   $ cp foo.py bar.py
   $ echo 'foo = foo.py' >> $HGRCPATH
@@ -140,12 +154,12 @@
   > from mercurial import demandimport; demandimport.enable()
   > from mercurial.hgweb import hgweb
   > from mercurial.hgweb import wsgicgi
-  > application = hgweb('.', 'test repo')
+  > application = hgweb(b'.', b'test repo')
   > wsgicgi.launch(application)
   > EOF
   $ . "$TESTDIR/cgienv"
 
-  $ PATH_INFO='/' SCRIPT_NAME='' $PYTHON hgweb.cgi \
+  $ PATH_INFO='/' SCRIPT_NAME='' "$PYTHON" hgweb.cgi \
   >    | grep '^[0-9]) ' # ignores HTML output
   1) foo imported
   1) bar imported
@@ -164,7 +178,7 @@
   $ PATH_INFO='/shortlog'
 #endif
   $ export PATH_INFO
-  $ SCRIPT_NAME='' QUERY_STRING='rev=foo() and bar()' $PYTHON hgweb.cgi \
+  $ SCRIPT_NAME='' QUERY_STRING='rev=foo() and bar()' "$PYTHON" hgweb.cgi \
   >     | grep '<a href="/rev/[0-9a-z]*">'
      <a href="/rev/c24b9ac61126">add file</a>
 
@@ -173,6 +187,13 @@
 
 Check "from __future__ import absolute_import" support for external libraries
 
+(import-checker.py reports issues for some of heredoc python code
+fragments below, because import-checker.py does not know test specific
+package hierarchy. NO_CHECK_* should be used as a limit mark of
+heredoc, in order to make import-checker.py ignore them. For
+simplicity, all python code fragments below are generated with such
+limit mark, regardless of importing module or not.)
+
 #if windows
   $ PATHSEP=";"
 #else
@@ -186,30 +207,33 @@
   $ touch $TESTTMP/libroot/mod/__init__.py
   $ echo "s = 'libroot/mod/ambig.py'" > $TESTTMP/libroot/mod/ambig.py
 
-  $ cat > $TESTTMP/libroot/mod/ambigabs.py <<EOF
-  > from __future__ import absolute_import
+  $ cat > $TESTTMP/libroot/mod/ambigabs.py <<NO_CHECK_EOF
+  > from __future__ import absolute_import, print_function
   > import ambig # should load "libroot/ambig.py"
   > s = ambig.s
-  > EOF
-  $ cat > loadabs.py <<EOF
+  > NO_CHECK_EOF
+  $ cat > loadabs.py <<NO_CHECK_EOF
   > import mod.ambigabs as ambigabs
   > def extsetup():
-  >     print('ambigabs.s=%s' % ambigabs.s)
-  > EOF
+  >     print('ambigabs.s=%s' % ambigabs.s, flush=True)
+  > NO_CHECK_EOF
+  $ $PYTHON $TESTTMP/unflush.py loadabs.py
   $ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}/libroot; hg --config extensions.loadabs=loadabs.py root)
   ambigabs.s=libroot/ambig.py
   $TESTTMP/a
 
-#if no-py3k
-  $ cat > $TESTTMP/libroot/mod/ambigrel.py <<EOF
+#if no-py3
+  $ cat > $TESTTMP/libroot/mod/ambigrel.py <<NO_CHECK_EOF
+  > from __future__ import print_function
   > import ambig # should load "libroot/mod/ambig.py"
   > s = ambig.s
-  > EOF
-  $ cat > loadrel.py <<EOF
+  > NO_CHECK_EOF
+  $ cat > loadrel.py <<NO_CHECK_EOF
   > import mod.ambigrel as ambigrel
   > def extsetup():
-  >     print('ambigrel.s=%s' % ambigrel.s)
-  > EOF
+  >     print('ambigrel.s=%s' % ambigrel.s, flush=True)
+  > NO_CHECK_EOF
+  $ $PYTHON $TESTTMP/unflush.py loadrel.py
   $ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}/libroot; hg --config extensions.loadrel=loadrel.py root)
   ambigrel.s=libroot/mod/ambig.py
   $TESTTMP/a
@@ -218,45 +242,46 @@
 Check absolute/relative import of extension specific modules
 
   $ mkdir $TESTTMP/extroot
-  $ cat > $TESTTMP/extroot/bar.py <<EOF
-  > s = 'this is extroot.bar'
-  > EOF
+  $ cat > $TESTTMP/extroot/bar.py <<NO_CHECK_EOF
+  > s = b'this is extroot.bar'
+  > NO_CHECK_EOF
   $ mkdir $TESTTMP/extroot/sub1
-  $ cat > $TESTTMP/extroot/sub1/__init__.py <<EOF
-  > s = 'this is extroot.sub1.__init__'
-  > EOF
-  $ cat > $TESTTMP/extroot/sub1/baz.py <<EOF
-  > s = 'this is extroot.sub1.baz'
-  > EOF
-  $ cat > $TESTTMP/extroot/__init__.py <<EOF
-  > s = 'this is extroot.__init__'
-  > import foo
+  $ cat > $TESTTMP/extroot/sub1/__init__.py <<NO_CHECK_EOF
+  > s = b'this is extroot.sub1.__init__'
+  > NO_CHECK_EOF
+  $ cat > $TESTTMP/extroot/sub1/baz.py <<NO_CHECK_EOF
+  > s = b'this is extroot.sub1.baz'
+  > NO_CHECK_EOF
+  $ cat > $TESTTMP/extroot/__init__.py <<NO_CHECK_EOF
+  > from __future__ import absolute_import
+  > s = b'this is extroot.__init__'
+  > from . import foo
   > def extsetup(ui):
-  >     ui.write('(extroot) ', foo.func(), '\n')
+  >     ui.write(b'(extroot) ', foo.func(), b'\n')
   >     ui.flush()
-  > EOF
+  > NO_CHECK_EOF
 
-  $ cat > $TESTTMP/extroot/foo.py <<EOF
+  $ cat > $TESTTMP/extroot/foo.py <<NO_CHECK_EOF
   > # test absolute import
   > buf = []
   > def func():
   >     # "not locals" case
   >     import extroot.bar
-  >     buf.append('import extroot.bar in func(): %s' % extroot.bar.s)
-  >     return '\n(extroot) '.join(buf)
-  > # "fromlist == ('*',)" case
+  >     buf.append(b'import extroot.bar in func(): %s' % extroot.bar.s)
+  >     return b'\n(extroot) '.join(buf)
+  > # b"fromlist == ('*',)" case
   > from extroot.bar import *
-  > buf.append('from extroot.bar import *: %s' % s)
+  > buf.append(b'from extroot.bar import *: %s' % s)
   > # "not fromlist" and "if '.' in name" case
   > import extroot.sub1.baz
-  > buf.append('import extroot.sub1.baz: %s' % extroot.sub1.baz.s)
+  > buf.append(b'import extroot.sub1.baz: %s' % extroot.sub1.baz.s)
   > # "not fromlist" and NOT "if '.' in name" case
   > import extroot
-  > buf.append('import extroot: %s' % extroot.s)
+  > buf.append(b'import extroot: %s' % extroot.s)
   > # NOT "not fromlist" and NOT "level != -1" case
   > from extroot.bar import s
-  > buf.append('from extroot.bar import s: %s' % s)
-  > EOF
+  > buf.append(b'from extroot.bar import s: %s' % s)
+  > NO_CHECK_EOF
   $ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}; hg --config extensions.extroot=$TESTTMP/extroot root)
   (extroot) from extroot.bar import *: this is extroot.bar
   (extroot) import extroot.sub1.baz: this is extroot.sub1.baz
@@ -265,10 +290,10 @@
   (extroot) import extroot.bar in func(): this is extroot.bar
   $TESTTMP/a
 
-#if no-py3k
+#if no-py3
   $ rm "$TESTTMP"/extroot/foo.*
   $ rm -Rf "$TESTTMP/extroot/__pycache__"
-  $ cat > $TESTTMP/extroot/foo.py <<EOF
+  $ cat > $TESTTMP/extroot/foo.py <<NO_CHECK_EOF
   > # test relative import
   > buf = []
   > def func():
@@ -288,7 +313,7 @@
   > # NOT "not fromlist" and NOT "level != -1" case
   > from bar import s
   > buf.append('from bar import s: %s' % s)
-  > EOF
+  > NO_CHECK_EOF
   $ hg --config extensions.extroot=$TESTTMP/extroot root
   (extroot) from bar import *: this is extroot.bar
   (extroot) import sub1.baz: this is extroot.sub1.baz
@@ -320,16 +345,16 @@
   $ touch $TESTTMP/extlibroot/lsub1/__init__.py
   $ touch $TESTTMP/extlibroot/lsub1/lsub2/__init__.py
 
-  $ cat > $TESTTMP/extlibroot/lsub1/lsub2/called.py <<EOF
+  $ cat > $TESTTMP/extlibroot/lsub1/lsub2/called.py <<NO_CHECK_EOF
   > def func():
-  >     return "this is extlibroot.lsub1.lsub2.called.func()"
-  > EOF
-  $ cat > $TESTTMP/extlibroot/lsub1/lsub2/unused.py <<EOF
+  >     return b"this is extlibroot.lsub1.lsub2.called.func()"
+  > NO_CHECK_EOF
+  $ cat > $TESTTMP/extlibroot/lsub1/lsub2/unused.py <<NO_CHECK_EOF
   > raise Exception("extlibroot.lsub1.lsub2.unused is loaded unintentionally")
-  > EOF
-  $ cat > $TESTTMP/extlibroot/lsub1/lsub2/used.py <<EOF
-  > detail = "this is extlibroot.lsub1.lsub2.used"
-  > EOF
+  > NO_CHECK_EOF
+  $ cat > $TESTTMP/extlibroot/lsub1/lsub2/used.py <<NO_CHECK_EOF
+  > detail = b"this is extlibroot.lsub1.lsub2.used"
+  > NO_CHECK_EOF
 
 Setup sub-package of "external library", which causes instantiation of
 demandmod in "recurse down the module chain" code path. Relative
@@ -337,45 +362,45 @@
 >=1 " doesn't cause instantiation of demandmod.
 
   $ mkdir -p $TESTTMP/extlibroot/recursedown/abs
-  $ cat > $TESTTMP/extlibroot/recursedown/abs/used.py <<EOF
-  > detail = "this is extlibroot.recursedown.abs.used"
-  > EOF
-  $ cat > $TESTTMP/extlibroot/recursedown/abs/__init__.py <<EOF
+  $ cat > $TESTTMP/extlibroot/recursedown/abs/used.py <<NO_CHECK_EOF
+  > detail = b"this is extlibroot.recursedown.abs.used"
+  > NO_CHECK_EOF
+  $ cat > $TESTTMP/extlibroot/recursedown/abs/__init__.py <<NO_CHECK_EOF
   > from __future__ import absolute_import
   > from extlibroot.recursedown.abs.used import detail
-  > EOF
+  > NO_CHECK_EOF
 
   $ mkdir -p $TESTTMP/extlibroot/recursedown/legacy
-  $ cat > $TESTTMP/extlibroot/recursedown/legacy/used.py <<EOF
-  > detail = "this is extlibroot.recursedown.legacy.used"
-  > EOF
-  $ cat > $TESTTMP/extlibroot/recursedown/legacy/__init__.py <<EOF
+  $ cat > $TESTTMP/extlibroot/recursedown/legacy/used.py <<NO_CHECK_EOF
+  > detail = b"this is extlibroot.recursedown.legacy.used"
+  > NO_CHECK_EOF
+  $ cat > $TESTTMP/extlibroot/recursedown/legacy/__init__.py <<NO_CHECK_EOF
   > # legacy style (level == -1) import
   > from extlibroot.recursedown.legacy.used import detail
-  > EOF
+  > NO_CHECK_EOF
 
-  $ cat > $TESTTMP/extlibroot/recursedown/__init__.py <<EOF
+  $ cat > $TESTTMP/extlibroot/recursedown/__init__.py <<NO_CHECK_EOF
   > from __future__ import absolute_import
   > from extlibroot.recursedown.abs import detail as absdetail
   > from .legacy import detail as legacydetail
-  > EOF
+  > NO_CHECK_EOF
 
 Setup package that re-exports an attribute of its submodule as the same
 name. This leaves 'shadowing.used' pointing to 'used.detail', but still
 the submodule 'used' should be somehow accessible. (issue5617)
 
   $ mkdir -p $TESTTMP/extlibroot/shadowing
-  $ cat > $TESTTMP/extlibroot/shadowing/used.py <<EOF
-  > detail = "this is extlibroot.shadowing.used"
-  > EOF
-  $ cat > $TESTTMP/extlibroot/shadowing/proxied.py <<EOF
+  $ cat > $TESTTMP/extlibroot/shadowing/used.py <<NO_CHECK_EOF
+  > detail = b"this is extlibroot.shadowing.used"
+  > NO_CHECK_EOF
+  $ cat > $TESTTMP/extlibroot/shadowing/proxied.py <<NO_CHECK_EOF
   > from __future__ import absolute_import
   > from extlibroot.shadowing.used import detail
-  > EOF
-  $ cat > $TESTTMP/extlibroot/shadowing/__init__.py <<EOF
+  > NO_CHECK_EOF
+  $ cat > $TESTTMP/extlibroot/shadowing/__init__.py <<NO_CHECK_EOF
   > from __future__ import absolute_import
   > from .used import detail as used
-  > EOF
+  > NO_CHECK_EOF
 
 Setup extension local modules to be imported with "absolute_import"
 feature.
@@ -384,33 +409,35 @@
   $ touch $TESTTMP/absextroot/xsub1/__init__.py
   $ touch $TESTTMP/absextroot/xsub1/xsub2/__init__.py
 
-  $ cat > $TESTTMP/absextroot/xsub1/xsub2/called.py <<EOF
+  $ cat > $TESTTMP/absextroot/xsub1/xsub2/called.py <<NO_CHECK_EOF
   > def func():
-  >     return "this is absextroot.xsub1.xsub2.called.func()"
-  > EOF
-  $ cat > $TESTTMP/absextroot/xsub1/xsub2/unused.py <<EOF
+  >     return b"this is absextroot.xsub1.xsub2.called.func()"
+  > NO_CHECK_EOF
+  $ cat > $TESTTMP/absextroot/xsub1/xsub2/unused.py <<NO_CHECK_EOF
   > raise Exception("absextroot.xsub1.xsub2.unused is loaded unintentionally")
-  > EOF
-  $ cat > $TESTTMP/absextroot/xsub1/xsub2/used.py <<EOF
-  > detail = "this is absextroot.xsub1.xsub2.used"
-  > EOF
+  > NO_CHECK_EOF
+  $ cat > $TESTTMP/absextroot/xsub1/xsub2/used.py <<NO_CHECK_EOF
+  > detail = b"this is absextroot.xsub1.xsub2.used"
+  > NO_CHECK_EOF
 
 Setup extension local modules to examine whether demand importing
 works as expected in "level > 1" case.
 
-  $ cat > $TESTTMP/absextroot/relimportee.py <<EOF
-  > detail = "this is absextroot.relimportee"
-  > EOF
-  $ cat > $TESTTMP/absextroot/xsub1/xsub2/relimporter.py <<EOF
+  $ cat > $TESTTMP/absextroot/relimportee.py <<NO_CHECK_EOF
+  > detail = b"this is absextroot.relimportee"
+  > NO_CHECK_EOF
+  $ cat > $TESTTMP/absextroot/xsub1/xsub2/relimporter.py <<NO_CHECK_EOF
   > from __future__ import absolute_import
+  > from mercurial import pycompat
   > from ... import relimportee
-  > detail = "this relimporter imports %r" % (relimportee.detail)
-  > EOF
+  > detail = b"this relimporter imports %r" % (
+  >     pycompat.bytestr(relimportee.detail))
+  > NO_CHECK_EOF
 
 Setup modules, which actually import extension local modules at
 runtime.
 
-  $ cat > $TESTTMP/absextroot/absolute.py << EOF
+  $ cat > $TESTTMP/absextroot/absolute.py << NO_CHECK_EOF
   > from __future__ import absolute_import
   > 
   > # import extension local modules absolutely (level = 0)
@@ -422,9 +449,9 @@
   >     result.append(used.detail)
   >     result.append(func())
   >     return result
-  > EOF
+  > NO_CHECK_EOF
 
-  $ cat > $TESTTMP/absextroot/relative.py << EOF
+  $ cat > $TESTTMP/absextroot/relative.py << NO_CHECK_EOF
   > from __future__ import absolute_import
   > 
   > # import extension local modules relatively (level == 1)
@@ -440,11 +467,11 @@
   >     result.append(func())
   >     result.append(relimporter.detail)
   >     return result
-  > EOF
+  > NO_CHECK_EOF
 
 Setup main procedure of extension.
 
-  $ cat > $TESTTMP/absextroot/__init__.py <<EOF
+  $ cat > $TESTTMP/absextroot/__init__.py <<NO_CHECK_EOF
   > from __future__ import absolute_import
   > from mercurial import registrar
   > cmdtable = {}
@@ -458,12 +485,12 @@
   > @command(b'showabsolute', [], norepo=True)
   > def showabsolute(ui, *args, **opts):
   >     from absextroot import absolute
-  >     ui.write(b'ABS: %s\n' % '\nABS: '.join(absolute.getresult()))
+  >     ui.write(b'ABS: %s\n' % b'\nABS: '.join(absolute.getresult()))
   > 
   > @command(b'showrelative', [], norepo=True)
   > def showrelative(ui, *args, **opts):
   >     from . import relative
-  >     ui.write(b'REL: %s\n' % '\nREL: '.join(relative.getresult()))
+  >     ui.write(b'REL: %s\n' % b'\nREL: '.join(relative.getresult()))
   > 
   > # import modules from external library
   > from extlibroot.lsub1.lsub2 import used as lused, unused as lunused
@@ -478,8 +505,8 @@
   >     result.append(absdetail)
   >     result.append(legacydetail)
   >     result.append(proxied.detail)
-  >     ui.write(b'LIB: %s\n' % '\nLIB: '.join(result))
-  > EOF
+  >     ui.write(b'LIB: %s\n' % b'\nLIB: '.join(result))
+  > NO_CHECK_EOF
 
 Examine module importing.
 
@@ -509,11 +536,11 @@
   $ f -q $TESTTMP/extlibroot/lsub1/lsub2/notexist.py
   $TESTTMP/extlibroot/lsub1/lsub2/notexist.py: file not found
 
-  $ cat > $TESTTMP/notexist.py <<EOF
+  $ cat > $TESTTMP/notexist.py <<NO_CHECK_EOF
   > text = 'notexist.py at root is loaded unintentionally\n'
-  > EOF
+  > NO_CHECK_EOF
 
-  $ cat > $TESTTMP/checkrelativity.py <<EOF
+  $ cat > $TESTTMP/checkrelativity.py <<NO_CHECK_EOF
   > from mercurial import registrar
   > cmdtable = {}
   > command = registrar.command(cmdtable)
@@ -528,12 +555,16 @@
   >         return 1 # unintentional success
   >     except ImportError:
   >         pass # intentional failure
-  > EOF
+  > NO_CHECK_EOF
 
   $ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}; hg --config extensions.checkrelativity=$TESTTMP/checkrelativity.py checkrelativity)
 
 #endif
 
+(Here, module importing tests are finished. Therefore, use other than
+NO_CHECK_* limit mark for heredoc python files, in order to apply
+import-checker.py or so on their contents)
+
 Make sure a broken uisetup doesn't globally break hg:
   $ cat > $TESTTMP/baduisetup.py <<EOF
   > def uisetup(ui):
@@ -547,8 +578,8 @@
       uisetup(ui)
     File "$TESTTMP/baduisetup.py", line 2, in uisetup
       1/0
-  ZeroDivisionError: integer division or modulo by zero
-  *** failed to set up extension baduisetup: integer division or modulo by zero
+  ZeroDivisionError: * by zero (glob)
+  *** failed to set up extension baduisetup: * by zero (glob)
   Mercurial Distributed SCM (version *) (glob)
   (see https://mercurial-scm.org for more information)
   
@@ -603,6 +634,8 @@
   
   options:
   
+   -T --template TEMPLATE display with template
+  
   (some details hidden, use --verbose to show complete help)
 
 
@@ -613,7 +646,7 @@
   
   options:
   
-   -T --template TEMPLATE display with template (EXPERIMENTAL)
+   -T --template TEMPLATE display with template
   
   global options ([+] can be repeated):
   
@@ -652,7 +685,7 @@
   
   options:
   
-   -T --template TEMPLATE display with template (EXPERIMENTAL)
+   -T --template TEMPLATE display with template
   
   global options ([+] can be repeated):
   
@@ -1229,11 +1262,12 @@
 
   $ mkdir hgext
   $ echo > hgext/__init__.py
-  $ cat > hgext/broken.py <<EOF
+  $ cat > hgext/broken.py <<NO_CHECK_EOF
   > "broken extension'
-  > EOF
+  > NO_CHECK_EOF
   $ cat > path.py <<EOF
-  > import os, sys
+  > import os
+  > import sys
   > sys.path.insert(0, os.environ['HGEXTPATH'])
   > EOF
   $ HGEXTPATH=`pwd`
@@ -1254,7 +1288,7 @@
   > def g():
   >     pass
   > EOF
-  $ hg --config extensions.path=./path.py help foo > /dev/null
+  $ hg --config extensions.path=./path.py help foo
   abort: no such help topic: foo
   (try 'hg help --keyword foo')
   [255]
@@ -1540,6 +1574,7 @@
   reposetup() for $TESTTMP/reposetup-test/src
   reposetup() for $TESTTMP/reposetup-test/src (chg !)
 
+#if no-extraextensions
   $ hg --cwd src debugextensions
   reposetup() for $TESTTMP/reposetup-test/src
   dodo (untested!)
@@ -1547,6 +1582,7 @@
   mq
   reposetuptest (untested!)
   strip
+#endif
 
   $ hg clone -U src clone-dst1
   reposetup() for $TESTTMP/reposetup-test/src
@@ -1683,6 +1719,7 @@
   *** failed to import extension deprecatedcmd from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo
   *** (use @command decorator to register 'deprecatedcmd')
   hg: unknown command 'deprecatedcmd'
+  (use 'hg help' for a list of commands)
   [255]
 
  the extension shouldn't be loaded at all so the mq works:
@@ -1733,19 +1770,21 @@
   $ hg init $TESTTMP/opt-unicode-default
 
   $ cat > $TESTTMP/test_unicode_default_value.py << EOF
+  > from __future__ import print_function
   > from mercurial import registrar
   > cmdtable = {}
   > command = registrar.command(cmdtable)
   > @command(b'dummy', [(b'', b'opt', u'value', u'help')], 'ext [OPTIONS]')
   > def ext(*args, **opts):
-  >     print(opts[b'opt'])
+  >     print(opts[b'opt'], flush=True)
   > EOF
+  $ $PYTHON $TESTTMP/unflush.py $TESTTMP/test_unicode_default_value.py
   $ cat > $TESTTMP/opt-unicode-default/.hg/hgrc << EOF
   > [extensions]
   > test_unicode_default_value = $TESTTMP/test_unicode_default_value.py
   > EOF
   $ hg -R $TESTTMP/opt-unicode-default dummy
-  *** failed to import extension test_unicode_default_value from $TESTTMP/test_unicode_default_value.py: unicode u'value' found in cmdtable.dummy
+  *** failed to import extension test_unicode_default_value from $TESTTMP/test_unicode_default_value.py: unicode *'value' found in cmdtable.dummy (glob)
   *** (use b'' to make it byte string)
   hg: unknown command 'dummy'
   (did you mean summary?)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-corrupt.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,83 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+  $ for i in 0 1 2 3 4; do
+  >   echo $i >> a
+  >   echo $i >> b
+  >   hg commit -A -m $i a b
+  > done
+
+use the "debugbuildannotatecache" command to build annotate cache at rev 0
+
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=0
+  fastannotate: a: 1 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+"debugbuildannotatecache" should work with broken cache (and other files would
+be built without being affected). note: linelog being broken is only noticed
+when we try to append to it.
+
+  $ echo 'CORRUPT!' >> .hg/fastannotate/default/a.m
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=1
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 2 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+  $ echo 'CANNOT REUSE!' > .hg/fastannotate/default/a.l
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=2
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 3 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+  $ rm .hg/fastannotate/default/a.m
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=3
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 4 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+  $ rm .hg/fastannotate/default/a.l
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=3
+  $ hg debugbuildannotatecache --debug --config fastannotate.mainbranch=4
+  fastannotate: a: rebuilding broken cache
+  fastannotate: a: 5 new changesets in the main branch
+  fastannotate: b: 1 new changesets in the main branch
+
+"fastannotate" should deal with file corruption as well
+
+  $ rm -rf .hg/fastannotate
+  $ hg fastannotate --debug -r 0 a
+  fastannotate: a: 1 new changesets in the main branch
+  0: 0
+
+  $ echo 'CORRUPT!' >> .hg/fastannotate/default/a.m
+  $ hg fastannotate --debug -r 0 a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 1 new changesets in the main branch
+  0: 0
+
+  $ echo 'CORRUPT!' > .hg/fastannotate/default/a.l
+  $ hg fastannotate --debug -r 1 a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 2 new changesets in the main branch
+  0: 0
+  1: 1
+
+  $ rm .hg/fastannotate/default/a.l
+  $ hg fastannotate --debug -r 1 a
+  fastannotate: a: using fast path (resolved fctx: True)
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 2 new changesets in the main branch
+  0: 0
+  1: 1
+
+  $ rm .hg/fastannotate/default/a.m
+  $ hg fastannotate --debug -r 2 a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  0: 0
+  1: 1
+  2: 2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-diffopts.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,33 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+
+changes to whitespaces
+
+  $ cat >> a << EOF
+  > 1
+  > 
+  >  
+  >  2
+  > EOF
+  $ hg commit -qAm '1'
+  $ cat > a << EOF
+  >  1
+  > 
+  > 2
+  > 
+  > 
+  > 3
+  > EOF
+  $ hg commit -m 2
+  $ hg fastannotate -wB a
+  0:  1
+  0: 
+  1: 2
+  0: 
+  1: 
+  1: 3
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-hg.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,764 @@
+(this file is backported from core hg tests/test-annotate.t)
+
+  $ cat >> $HGRCPATH << EOF
+  > [diff]
+  > git=1
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > modes=fctx
+  > forcefollow=False
+  > mainbranch=.
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+init
+
+  $ hg init repo
+  $ cd repo
+
+commit
+
+  $ echo 'a' > a
+  $ hg ci -A -m test -u nobody -d '1 0'
+  adding a
+
+annotate -c
+
+  $ hg annotate -c a
+  8435f90966e4: a
+
+annotate -cl
+
+  $ hg annotate -cl a
+  8435f90966e4:1: a
+
+annotate -d
+
+  $ hg annotate -d a
+  Thu Jan 01 00:00:01 1970 +0000: a
+
+annotate -n
+
+  $ hg annotate -n a
+  0: a
+
+annotate -nl
+
+  $ hg annotate -nl a
+  0:1: a
+
+annotate -u
+
+  $ hg annotate -u a
+  nobody: a
+
+annotate -cdnu
+
+  $ hg annotate -cdnu a
+  nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
+
+annotate -cdnul
+
+  $ hg annotate -cdnul a
+  nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
+
+annotate (JSON)
+
+  $ hg annotate -Tjson a
+  [
+   {
+    "lines": [{"line": "a\n", "rev": 0}],
+    "path": "a"
+   }
+  ]
+
+  $ hg annotate -Tjson -cdfnul a
+  [
+   {
+    "lines": [{"date": [1.0, 0], "line": "a\n", "lineno": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "path": "a", "rev": 0, "user": "nobody"}],
+    "path": "a"
+   }
+  ]
+
+  $ cat <<EOF >>a
+  > a
+  > a
+  > EOF
+  $ hg ci -ma1 -d '1 0'
+  $ hg cp a b
+  $ hg ci -mb -d '1 0'
+  $ cat <<EOF >> b
+  > b4
+  > b5
+  > b6
+  > EOF
+  $ hg ci -mb2 -d '2 0'
+
+annotate -n b
+
+  $ hg annotate -n b
+  0: a
+  1: a
+  1: a
+  3: b4
+  3: b5
+  3: b6
+
+annotate --no-follow b
+
+  $ hg annotate --no-follow b
+  2: a
+  2: a
+  2: a
+  3: b4
+  3: b5
+  3: b6
+
+annotate -nl b
+
+  $ hg annotate -nl b
+  0:1: a
+  1:2: a
+  1:3: a
+  3:4: b4
+  3:5: b5
+  3:6: b6
+
+annotate -nf b
+
+  $ hg annotate -nf b
+  0 a: a
+  1 a: a
+  1 a: a
+  3 b: b4
+  3 b: b5
+  3 b: b6
+
+annotate -nlf b
+
+  $ hg annotate -nlf b
+  0 a:1: a
+  1 a:2: a
+  1 a:3: a
+  3 b:4: b4
+  3 b:5: b5
+  3 b:6: b6
+
+  $ hg up -C 2
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat <<EOF >> b
+  > b4
+  > c
+  > b5
+  > EOF
+  $ hg ci -mb2.1 -d '2 0'
+  created new head
+  $ hg merge
+  merging b
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci -mmergeb -d '3 0'
+
+annotate after merge
+(note: the first one falls back to the vanilla annotate which does not use linelog)
+
+  $ hg annotate -nf b --debug
+  fastannotate: b: rebuilding broken cache
+  fastannotate: b: 5 new changesets in the main branch
+  0 a: a
+  1 a: a
+  1 a: a
+  3 b: b4
+  4 b: c
+  3 b: b5
+
+(difference explained below)
+
+  $ hg annotate -nf b --debug
+  fastannotate: b: using fast path (resolved fctx: False)
+  0 a: a
+  1 a: a
+  1 a: a
+  4 b: b4
+  4 b: c
+  4 b: b5
+
+annotate after merge with -l
+(fastannotate differs from annotate)
+
+  $ hg log -Gp -T '{rev}:{node}' -r '2..5'
+  @    5:64afcdf8e29e063c635be123d8d2fb160af00f7e
+  |\
+  | o  4:5fbdc1152d97597717021ad9e063061b200f146bdiff --git a/b b/b
+  | |  --- a/b
+  | |  +++ b/b
+  | |  @@ -1,3 +1,6 @@
+  | |   a
+  | |   a
+  | |   a
+  | |  +b4
+  | |  +c
+  | |  +b5
+  | |
+  o |  3:37ec9f5c3d1f99572d7075971cb4876e2139b52fdiff --git a/b b/b
+  |/   --- a/b
+  |    +++ b/b
+  |    @@ -1,3 +1,6 @@
+  |     a
+  |     a
+  |     a
+  |    +b4
+  |    +b5
+  |    +b6
+  |
+  o  2:3086dbafde1ce745abfc8d2d367847280aabae9ddiff --git a/a b/b
+  |  copy from a
+  ~  copy to b
+  
+
+(in this case, "b4", "b5" could be considered introduced by either rev 3, or rev 4.
+ and that causes the rev number difference)
+
+  $ hg annotate -nlf b --config fastannotate.modes=
+  0 a:1: a
+  1 a:2: a
+  1 a:3: a
+  3 b:4: b4
+  4 b:5: c
+  3 b:5: b5
+
+  $ hg annotate -nlf b
+  0 a:1: a
+  1 a:2: a
+  1 a:3: a
+  4 b:4: b4
+  4 b:5: c
+  4 b:6: b5
+
+  $ hg up -C 1
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg cp a b
+  $ cat <<EOF > b
+  > a
+  > z
+  > a
+  > EOF
+  $ hg ci -mc -d '3 0'
+  created new head
+  $ hg merge
+  merging b
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ cat <<EOF >> b
+  > b4
+  > c
+  > b5
+  > EOF
+  $ echo d >> b
+  $ hg ci -mmerge2 -d '4 0'
+
+annotate after rename merge
+
+  $ hg annotate -nf b
+  0 a: a
+  6 b: z
+  1 a: a
+  3 b: b4
+  4 b: c
+  3 b: b5
+  7 b: d
+
+annotate after rename merge with -l
+(fastannotate differs from annotate)
+
+  $ hg log -Gp -T '{rev}:{node}' -r '0+1+6+7'
+  @    7:6284bb6c38fef984a929862a53bbc71ce9eafa81diff --git a/b b/b
+  |\   --- a/b
+  | :  +++ b/b
+  | :  @@ -1,3 +1,7 @@
+  | :   a
+  | :   z
+  | :   a
+  | :  +b4
+  | :  +c
+  | :  +b5
+  | :  +d
+  | :
+  o :  6:b80e3e32f75a6a67cd4ac85496a11511e9112816diff --git a/a b/b
+  :/   copy from a
+  :    copy to b
+  :    --- a/a
+  :    +++ b/b
+  :    @@ -1,3 +1,3 @@
+  :    -a (?)
+  :     a
+  :    +z
+  :     a
+  :    -a (?)
+  :
+  o  1:762f04898e6684ff713415f7b8a8d53d33f96c92diff --git a/a b/a
+  |  --- a/a
+  |  +++ b/a
+  |  @@ -1,1 +1,3 @@
+  |   a
+  |  +a
+  |  +a
+  |
+  o  0:8435f90966e442695d2ded29fdade2bac5ad8065diff --git a/a b/a
+     new file mode 100644
+     --- /dev/null
+     +++ b/a
+     @@ -0,0 +1,1 @@
+     +a
+  
+
+(note on question marks:
+ the upstream bdiff change (96f2f50d923f+3633403888ae+8c0c75aa3ff4+5c4e2636c1a9
+ +38ed54888617) alters the output so deletion is not always at the end of the
+ output. for example:
+ | a | b | old | new | # old: e1d6aa0e4c3a, new: 8836f13e3c5b
+ |-------------------|
+ | a | a |  a  | -a  |
+ | a | z | +z  |  a  |
+ | a | a |  a  | +z  |
+ |   |   | -a  |  a  |
+ |-------------------|
+ | a | a |     a     |
+ | a | a |     a     |
+ | a |   |    -a     |
+ this leads to more question marks below)
+
+(rev 1 adds two "a"s and rev 6 deletes one "a".
+ the "a" that rev 6 deletes could be either the first or the second "a" of those two "a"s added by rev 1.
+ and that causes the line number difference)
+
+  $ hg annotate -nlf b --config fastannotate.modes=
+  0 a:1: a
+  6 b:2: z
+  1 a:3: a
+  3 b:4: b4
+  4 b:5: c
+  3 b:5: b5
+  7 b:7: d
+
+  $ hg annotate -nlf b
+  0 a:1: a (?)
+  1 a:2: a (?)
+  6 b:2: z
+  1 a:2: a (?)
+  1 a:3: a (?)
+  3 b:4: b4
+  4 b:5: c
+  3 b:5: b5
+  7 b:7: d
+
+Issue2807: alignment of line numbers with -l
+(fastannotate differs from annotate, same reason as above)
+
+  $ echo more >> b
+  $ hg ci -mmore -d '5 0'
+  $ echo more >> b
+  $ hg ci -mmore -d '6 0'
+  $ echo more >> b
+  $ hg ci -mmore -d '7 0'
+  $ hg annotate -nlf b
+   0 a: 1: a (?)
+   1 a: 2: a (?)
+   6 b: 2: z
+   1 a: 2: a (?)
+   1 a: 3: a (?)
+   3 b: 4: b4
+   4 b: 5: c
+   3 b: 5: b5
+   7 b: 7: d
+   8 b: 8: more
+   9 b: 9: more
+  10 b:10: more
+
+linkrev vs rev
+
+  $ hg annotate -r tip -n a
+  0: a
+  1: a
+  1: a
+
+linkrev vs rev with -l
+
+  $ hg annotate -r tip -nl a
+  0:1: a
+  1:2: a
+  1:3: a
+
+Issue589: "undelete" sequence leads to crash
+
+annotate was crashing when trying to --follow something
+
+like A -> B -> A
+
+generate ABA rename configuration
+
+  $ echo foo > foo
+  $ hg add foo
+  $ hg ci -m addfoo
+  $ hg rename foo bar
+  $ hg ci -m renamefoo
+  $ hg rename bar foo
+  $ hg ci -m renamebar
+
+annotate after ABA with follow
+
+  $ hg annotate --follow foo
+  foo: foo
+
+missing file
+
+  $ hg ann nosuchfile
+  abort: nosuchfile: no such file in rev e9e6b4fa872f
+  [255]
+
+annotate file without '\n' on last line
+
+  $ printf "" > c
+  $ hg ci -A -m test -u nobody -d '1 0'
+  adding c
+  $ hg annotate c
+  $ printf "a\nb" > c
+  $ hg ci -m test
+  $ hg annotate c
+  [0-9]+: a (re)
+  [0-9]+: b (re)
+
+Issue3841: check annotation of the file of which filelog includes
+merging between the revision and its ancestor
+
+to reproduce the situation with recent Mercurial, this script uses (1)
+"hg debugsetparents" to merge without ancestor check by "hg merge",
+and (2) the extension to allow filelog merging between the revision
+and its ancestor by overriding "repo._filecommit".
+
+  $ cat > ../legacyrepo.py <<EOF
+  > from mercurial import error, node
+  > def reposetup(ui, repo):
+  >     class legacyrepo(repo.__class__):
+  >         def _filecommit(self, fctx, manifest1, manifest2,
+  >                         linkrev, tr, changelist):
+  >             fname = fctx.path()
+  >             text = fctx.data()
+  >             flog = self.file(fname)
+  >             fparent1 = manifest1.get(fname, node.nullid)
+  >             fparent2 = manifest2.get(fname, node.nullid)
+  >             meta = {}
+  >             copy = fctx.renamed()
+  >             if copy and copy[0] != fname:
+  >                 raise error.Abort('copying is not supported')
+  >             if fparent2 != node.nullid:
+  >                 changelist.append(fname)
+  >                 return flog.add(text, meta, tr, linkrev,
+  >                                 fparent1, fparent2)
+  >             raise error.Abort('only merging is supported')
+  >     repo.__class__ = legacyrepo
+  > EOF
+
+  $ cat > baz <<EOF
+  > 1
+  > 2
+  > 3
+  > 4
+  > 5
+  > EOF
+  $ hg add baz
+  $ hg commit -m "baz:0"
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2
+  > 3
+  > 4
+  > 5
+  > EOF
+  $ hg commit -m "baz:1"
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2 baz:2
+  > 3
+  > 4
+  > 5
+  > EOF
+  $ hg debugsetparents 17 17
+  $ hg --config extensions.legacyrepo=../legacyrepo.py  commit -m "baz:2"
+  $ hg debugindexdot baz
+  digraph G {
+  	-1 -> 0
+  	0 -> 1
+  	1 -> 2
+  	1 -> 2
+  }
+  $ hg annotate baz
+  17: 1 baz:1
+  18: 2 baz:2
+  16: 3
+  16: 4
+  16: 5
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2 baz:2
+  > 3 baz:3
+  > 4
+  > 5
+  > EOF
+  $ hg commit -m "baz:3"
+
+  $ cat > baz <<EOF
+  > 1 baz:1
+  > 2 baz:2
+  > 3 baz:3
+  > 4 baz:4
+  > 5
+  > EOF
+  $ hg debugsetparents 19 18
+  $ hg --config extensions.legacyrepo=../legacyrepo.py  commit -m "baz:4"
+  $ hg debugindexdot baz
+  digraph G {
+  	-1 -> 0
+  	0 -> 1
+  	1 -> 2
+  	1 -> 2
+  	2 -> 3
+  	3 -> 4
+  	2 -> 4
+  }
+  $ hg annotate baz
+  17: 1 baz:1
+  18: 2 baz:2
+  19: 3 baz:3
+  20: 4 baz:4
+  16: 5
+
+annotate clean file
+
+  $ hg annotate -ncr "wdir()" foo
+  11 472b18db256d : foo
+
+annotate modified file
+
+  $ echo foofoo >> foo
+  $ hg annotate -r "wdir()" foo
+  11 : foo
+  20+: foofoo
+
+  $ hg annotate -cr "wdir()" foo
+  472b18db256d : foo
+  b6bedd5477e7+: foofoo
+
+  $ hg annotate -ncr "wdir()" foo
+  11 472b18db256d : foo
+  20 b6bedd5477e7+: foofoo
+
+  $ hg annotate --debug -ncr "wdir()" foo
+  11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
+  20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
+
+  $ hg annotate -udr "wdir()" foo
+  test Thu Jan 01 00:00:00 1970 +0000: foo
+  test [A-Za-z0-9:+ ]+: foofoo (re)
+
+  $ hg annotate -ncr "wdir()" -Tjson foo
+  [
+   {
+    "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": "ffffffffffffffffffffffffffffffffffffffff", "rev": 2147483647}],
+    "path": "foo"
+   }
+  ]
+
+annotate added file
+
+  $ echo bar > bar
+  $ hg add bar
+  $ hg annotate -ncr "wdir()" bar
+  20 b6bedd5477e7+: bar
+
+annotate renamed file
+
+  $ hg rename foo renamefoo2
+  $ hg annotate -ncr "wdir()" renamefoo2
+  11 472b18db256d : foo
+  20 b6bedd5477e7+: foofoo
+
+annotate missing file
+
+  $ rm baz
+  $ hg annotate -ncr "wdir()" baz
+  abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
+  abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
+  [255]
+
+annotate removed file
+
+  $ hg rm baz
+  $ hg annotate -ncr "wdir()" baz
+  abort: $TESTTMP/repo/baz: $ENOENT$ (windows !)
+  abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
+  [255]
+
+Test annotate with whitespace options
+
+  $ cd ..
+  $ hg init repo-ws
+  $ cd repo-ws
+  $ cat > a <<EOF
+  > aa
+  > 
+  > b b
+  > EOF
+  $ hg ci -Am "adda"
+  adding a
+  $ sed 's/EOL$//g' > a <<EOF
+  > a  a
+  > 
+  >  EOL
+  > b  b
+  > EOF
+  $ hg ci -m "changea"
+
+Annotate with no option
+
+  $ hg annotate a
+  1: a  a
+  0: 
+  1:  
+  1: b  b
+
+Annotate with --ignore-space-change
+
+  $ hg annotate --ignore-space-change a
+  1: a  a
+  1: 
+  0:  
+  0: b  b
+
+Annotate with --ignore-all-space
+
+  $ hg annotate --ignore-all-space a
+  0: a  a
+  0: 
+  1:  
+  0: b  b
+
+Annotate with --ignore-blank-lines (similar to no options case)
+
+  $ hg annotate --ignore-blank-lines a
+  1: a  a
+  0: 
+  1:  
+  1: b  b
+
+  $ cd ..
+
+Annotate with linkrev pointing to another branch
+------------------------------------------------
+
+create history with a filerev whose linkrev points to another branch
+
+  $ hg init branchedlinkrev
+  $ cd branchedlinkrev
+  $ echo A > a
+  $ hg commit -Am 'contentA'
+  adding a
+  $ echo B >> a
+  $ hg commit -m 'contentB'
+  $ hg up --rev 'desc(contentA)'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo unrelated > unrelated
+  $ hg commit -Am 'unrelated'
+  adding unrelated
+  created new head
+  $ hg graft -r 'desc(contentB)'
+  grafting 1:fd27c222e3e6 "contentB"
+  $ echo C >> a
+  $ hg commit -m 'contentC'
+  $ echo W >> a
+  $ hg log -G
+  @  changeset:   4:072f1e8df249
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     contentC
+  |
+  o  changeset:   3:ff38df03cc4b
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     contentB
+  |
+  o  changeset:   2:62aaf3f6fc06
+  |  parent:      0:f0932f74827e
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     unrelated
+  |
+  | o  changeset:   1:fd27c222e3e6
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     contentB
+  |
+  o  changeset:   0:f0932f74827e
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     contentA
+  
+
+Annotate should list ancestor of starting revision only
+
+  $ hg annotate a
+  0: A
+  3: B
+  4: C
+
+  $ hg annotate a -r 'wdir()'
+  0 : A
+  3 : B
+  4 : C
+  4+: W
+
+Even when the starting revision is the linkrev-shadowed one:
+
+  $ hg annotate a -r 3
+  0: A
+  3: B
+
+  $ cd ..
+
+Issue5360: Deleted chunk in p1 of a merge changeset
+
+  $ hg init repo-5360
+  $ cd repo-5360
+  $ echo 1 > a
+  $ hg commit -A a -m 1
+  $ echo 2 >> a
+  $ hg commit -m 2
+  $ echo a > a
+  $ hg commit -m a
+  $ hg update '.^' -q
+  $ echo 3 >> a
+  $ hg commit -m 3 -q
+  $ hg merge 2 -q
+  $ cat > a << EOF
+  > b
+  > 1
+  > 2
+  > 3
+  > a
+  > EOF
+  $ hg resolve --mark -q
+  $ hg commit -m m
+  $ hg annotate a
+  4: b
+  0: 1
+  1: 2
+  3: 3
+  2: a
+
+  $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-perfhack.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,182 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > perfhack=1
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+  $ hg init repo
+  $ cd repo
+
+a simple merge case
+
+  $ echo 1 > a
+  $ hg commit -qAm 'append 1'
+  $ echo 2 >> a
+  $ hg commit -m 'append 2'
+  $ echo 3 >> a
+  $ hg commit -m 'append 3'
+  $ hg up 1 -q
+  $ cat > a << EOF
+  > 0
+  > 1
+  > 2
+  > EOF
+  $ hg commit -qm 'insert 0'
+  $ hg merge 2 -q
+  $ echo 4 >> a
+  $ hg commit -m merge
+  $ hg log -G -T '{rev}: {desc}'
+  @    4: merge
+  |\
+  | o  3: insert 0
+  | |
+  o |  2: append 3
+  |/
+  o  1: append 2
+  |
+  o  0: append 1
+  
+  $ hg fastannotate a
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 0 a
+  0: 1
+  $ hg fastannotate -r 1 a
+  0: 1
+  1: 2
+  $ hg fastannotate -udnclf a
+  test 3 d641cb51f61e Thu Jan 01 00:00:00 1970 +0000 a:1: 0
+  test 0 4994017376d3 Thu Jan 01 00:00:00 1970 +0000 a:1: 1
+  test 1 e940cb6d9a06 Thu Jan 01 00:00:00 1970 +0000 a:2: 2
+  test 2 26162a884ba6 Thu Jan 01 00:00:00 1970 +0000 a:3: 3
+  test 4 3ad7bcd2815f Thu Jan 01 00:00:00 1970 +0000 a:5: 4
+  $ hg fastannotate --linear a
+  3: 0
+  0: 1
+  1: 2
+  4: 3
+  4: 4
+
+incrementally updating
+
+  $ hg fastannotate -r 0 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  $ hg fastannotate -r 0 a --debug --rebuild
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 3 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+
+rebuild happens automatically if unable to update
+
+  $ hg fastannotate -r 2 a --debug
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+
+config option "fastannotate.mainbranch"
+
+  $ hg fastannotate -r 1 --rebuild --config fastannotate.mainbranch=tip a --debug
+  fastannotate: a: 4 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+
+rename
+
+  $ hg mv a b
+  $ cat > b << EOF
+  > 0
+  > 11
+  > 3
+  > 44
+  > EOF
+  $ hg commit -m b -q
+  $ hg fastannotate -ncf --long-hash b
+  3 d641cb51f61e331c44654104301f8154d7865c89 a: 0
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 11
+  2 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a: 3
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 44
+  $ hg fastannotate -r 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a
+  0: 1
+  1: 2
+  2: 3
+
+fastannotate --deleted
+
+  $ hg fastannotate --deleted -nf b
+  3 a:  0
+  5 b:  11
+  0 a: -1
+  1 a: -2
+  2 a:  3
+  5 b:  44
+  4 a: -4
+  $ hg fastannotate --deleted -r 3 -nf a
+  3 a:  0
+  0 a:  1
+  1 a:  2
+
+file and directories with ".l", ".m" suffixes
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+  $ mkdir a.l b.m c.lock a.l.hg b.hg
+  $ for i in a b c d d.l d.m a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a; do
+  >   echo $i > $i
+  > done
+  $ hg add . -q
+  $ hg commit -m init
+  $ hg fastannotate a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a d.l d.m a b c d
+  0: a
+  0: a.l.hg/a
+  0: a.l/a
+  0: b
+  0: b.hg/a
+  0: b.m/a
+  0: c
+  0: c.lock/a
+  0: d
+  0: d.l
+  0: d.m
+
+empty file
+
+  $ touch empty
+  $ hg commit -A empty -m empty
+  $ hg fastannotate empty
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-protocol.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,211 @@
+  $ cat >> $HGRCPATH << EOF
+  > [ui]
+  > ssh = "$PYTHON" "$TESTDIR/dummyssh"
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > mainbranch=@
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+setup the server repo
+
+  $ hg init repo-server
+  $ cd repo-server
+  $ cat >> .hg/hgrc << EOF
+  > [fastannotate]
+  > server=1
+  > EOF
+  $ for i in 1 2 3 4; do
+  >   echo $i >> a
+  >   hg commit -A -m $i a
+  > done
+  $ [ -d .hg/fastannotate ]
+  [1]
+  $ hg bookmark @
+  $ cd ..
+
+setup the local repo
+
+  $ hg clone 'ssh://user@dummy/repo-server' repo-local -q
+  $ cd repo-local
+  $ cat >> .hg/hgrc << EOF
+  > [fastannotate]
+  > client=1
+  > clientfetchthreshold=0
+  > EOF
+  $ [ -d .hg/fastannotate ]
+  [1]
+  $ hg fastannotate a --debug
+  running * (glob)
+  sending hello command
+  sending between command
+  remote: * (glob) (?)
+  remote: capabilities: * (glob)
+  remote: * (glob) (?)
+  sending protocaps command
+  fastannotate: requesting 1 files
+  sending getannotate command
+  fastannotate: writing 112 bytes to fastannotate/default/a.l
+  fastannotate: writing 94 bytes to fastannotate/default/a.m
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+
+the cache could be reused and no download is necessary
+
+  $ hg fastannotate a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+
+if the client agrees where the head of the master branch is, no re-download
+happens even if the client has more commits
+
+  $ echo 5 >> a
+  $ hg commit -m 5
+  $ hg bookmark -r 3 @ -f
+  $ hg fastannotate a --debug
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+if the client has a different "@" (head of the master branch) and "@" is ahead
+of the server, the server can detect things are unchanged and does not return
+full contents (not that there is no "writing ... to fastannotate"), but the
+client can also build things up on its own (causing diverge)
+
+  $ hg bookmark -r 4 @ -f
+  $ hg fastannotate a --debug
+  running * (glob)
+  sending hello command
+  sending between command
+  remote: * (glob) (?)
+  remote: capabilities: * (glob)
+  remote: * (glob) (?)
+  sending protocaps command
+  fastannotate: requesting 1 files
+  sending getannotate command
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+if the client has a different "@" which is behind the server. no download is
+necessary
+
+  $ hg fastannotate a --debug --config fastannotate.mainbranch=2
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+define fastannotate on-disk paths
+
+  $ p1=.hg/fastannotate/default
+  $ p2=../repo-server/.hg/fastannotate/default
+
+revert bookmark change so the client is behind the server
+
+  $ hg bookmark -r 2 @ -f
+
+in the "fctx" mode with the "annotate" command, the client also downloads the
+cache. but not in the (default) "fastannotate" mode.
+
+  $ rm $p1/a.l $p1/a.m
+  $ hg annotate a --debug | grep 'fastannotate: writing'
+  [1]
+  $ hg annotate a --config fastannotate.modes=fctx --debug | grep 'fastannotate: writing' | sort
+  fastannotate: writing 112 bytes to fastannotate/default/a.l
+  fastannotate: writing 94 bytes to fastannotate/default/a.m
+
+the fastannotate cache (built server-side, downloaded client-side) in two repos
+have the same content (because the client downloads from the server)
+
+  $ diff $p1/a.l $p2/a.l
+  $ diff $p1/a.m $p2/a.m
+
+in the "fctx" mode, the client could also build the cache locally
+
+  $ hg annotate a --config fastannotate.modes=fctx --debug --config fastannotate.mainbranch=4 | grep fastannotate
+  fastannotate: requesting 1 files
+  fastannotate: a: 1 new changesets in the main branch
+
+the server would rebuild broken cache automatically
+
+  $ cp $p2/a.m $p2/a.m.bak
+  $ echo BROKEN1 > $p1/a.m
+  $ echo BROKEN2 > $p2/a.m
+  $ hg fastannotate a --debug | grep 'fastannotate: writing' | sort
+  fastannotate: writing 112 bytes to fastannotate/default/a.l
+  fastannotate: writing 94 bytes to fastannotate/default/a.m
+  $ diff $p1/a.m $p2/a.m
+  $ diff $p2/a.m $p2/a.m.bak
+
+use the "debugbuildannotatecache" command to build annotate cache
+
+  $ rm -rf $p1 $p2
+  $ hg --cwd ../repo-server debugbuildannotatecache a --debug
+  fastannotate: a: 4 new changesets in the main branch
+  $ hg --cwd ../repo-local debugbuildannotatecache a --debug
+  running * (glob)
+  sending hello command
+  sending between command
+  remote: * (glob) (?)
+  remote: capabilities: * (glob)
+  remote: * (glob) (?)
+  sending protocaps command
+  fastannotate: requesting 1 files
+  sending getannotate command
+  fastannotate: writing * (glob)
+  fastannotate: writing * (glob)
+  $ diff $p1/a.l $p2/a.l
+  $ diff $p1/a.m $p2/a.m
+
+with the clientfetchthreshold config option, the client can build up the cache
+without downloading from the server
+
+  $ rm -rf $p1
+  $ hg fastannotate a --debug --config fastannotate.clientfetchthreshold=10
+  fastannotate: a: 3 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+if the fastannotate directory is not writable, the fctx mode still works
+
+  $ rm -rf $p1
+  $ touch $p1
+  $ hg annotate a --debug --traceback --config fastannotate.modes=fctx
+  fastannotate: a: cache broken and deleted
+  fastannotate: prefetch failed: * (glob)
+  fastannotate: a: cache broken and deleted
+  fastannotate: falling back to the vanilla annotate: * (glob)
+  0: 1
+  1: 2
+  2: 3
+  3: 4
+  4: 5
+
+with serverbuildondemand=False, the server will not build anything
+
+  $ cat >> ../repo-server/.hg/hgrc <<EOF
+  > [fastannotate]
+  > serverbuildondemand=False
+  > EOF
+  $ rm -rf $p1 $p2
+  $ hg fastannotate a --debug | grep 'fastannotate: writing'
+  [1]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-renames.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,168 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > [fastannotate]
+  > mainbranch=main
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+
+add or rename files on top of the master branch
+
+  $ echo a1 > a
+  $ echo b1 > b
+  $ hg commit -qAm 1
+  $ hg bookmark -i main
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: 1 new changesets in the main branch
+  0 b: b1
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: 1 new changesets in the main branch
+  0 a: a1
+  $ echo a2 >> a
+  $ cat > b << EOF
+  > b0
+  > b1
+  > EOF
+  $ hg mv a t
+  $ hg mv b a
+  $ hg mv t b
+  $ hg commit -m 'swap names'
+
+existing linelogs are not helpful with such renames in side branches
+
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: linelog cannot help in annotating this revision
+  0 a: a1
+  1 b: a2
+
+move main branch forward, rebuild should happen
+
+  $ hg bookmark -i main -r . -q
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: cache broken and deleted
+  fastannotate: b: 2 new changesets in the main branch
+  0 a: a1
+  1 b: a2
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: using fast path (resolved fctx: True)
+  0 a: a1
+  1 b: a2
+
+for rev 0, the existing linelog is still useful for a, but not for b
+
+  $ hg fastannotate --debug -nf a -r 0
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  $ hg fastannotate --debug -nf b -r 0
+  fastannotate: b: linelog cannot help in annotating this revision
+  0 b: b1
+
+a rebuild can also be triggered if "the main branch last time" mismatches
+
+  $ echo a3 >> a
+  $ hg commit -m a3
+  $ cat >> b << EOF
+  > b3
+  > b4
+  > EOF
+  $ hg commit -m b4
+  $ hg bookmark -i main -q
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: using fast path (resolved fctx: True)
+  1 a: b0
+  0 b: b1
+  2 a: a3
+
+linelog can be updated without being helpful
+
+  $ hg mv a t
+  $ hg mv b a
+  $ hg mv t b
+  $ hg commit -m 'swap names again'
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: 1 new changesets in the main branch
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  2 a: a3
+
+move main branch forward again, rebuilds are one-time
+
+  $ hg bookmark -i main -q
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 4 new changesets in the main branch
+  0 a: a1
+  1 b: a2
+  3 b: b3
+  3 b: b4
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: cache broken and deleted
+  fastannotate: b: 4 new changesets in the main branch
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  1 b: a2
+  3 b: b3
+  3 b: b4
+  $ hg fastannotate --debug -nf b
+  fastannotate: b: using fast path (resolved fctx: True)
+  1 a: b0
+  0 b: b1
+  2 a: a3
+
+list changeset hashes to improve readability
+
+  $ hg log -T '{rev}:{node}\n'
+  4:980e1ab8c516350172928fba95b49ede3b643dca
+  3:14e123fedad9f491f5dde0beca2a767625a0a93a
+  2:96495c41e4c12218766f78cdf244e768d7718b0f
+  1:35c2b781234c994896aba36bd3245d3104e023df
+  0:653e95416ebb5dbcc25bbc7f75568c9e01f7bd2f
+
+annotate a revision not in the linelog. linelog cannot be used, but does not get rebuilt either
+
+  $ hg fastannotate --debug -nf a -r 96495c41e4c12218766f78cdf244e768d7718b0f
+  fastannotate: a: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a -r 2
+  fastannotate: a: linelog cannot help in annotating this revision
+  1 a: b0
+  0 b: b1
+  2 a: a3
+  $ hg fastannotate --debug -nf a -r .
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  1 b: a2
+  3 b: b3
+  3 b: b4
+
+annotate an ancient revision where the path matches. linelog can be used
+
+  $ hg fastannotate --debug -nf a -r 0
+  fastannotate: a: using fast path (resolved fctx: True)
+  0 a: a1
+  $ hg fastannotate --debug -nf a -r 653e95416ebb5dbcc25bbc7f75568c9e01f7bd2f
+  fastannotate: a: using fast path (resolved fctx: False)
+  0 a: a1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate-revmap.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,198 @@
+from __future__ import absolute_import, print_function
+
+import os
+import tempfile
+
+from mercurial import (
+    pycompat,
+    util,
+)
+
+from hgext.fastannotate import error, revmap
+
+if pycompat.ispy3:
+    xrange = range
+
+def genhsh(i):
+    return chr(i) + b'\0' * 19
+
+def gettemppath():
+    fd, path = tempfile.mkstemp()
+    os.close(fd)
+    os.unlink(path)
+    return path
+
+def ensure(condition):
+    if not condition:
+        raise RuntimeError('Unexpected')
+
+def testbasicreadwrite():
+    path = gettemppath()
+
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 0)
+    for i in xrange(5):
+        ensure(rm.rev2hsh(i) is None)
+    ensure(rm.hsh2rev(b'\0' * 20) is None)
+
+    paths = ['', 'a', None, 'b', 'b', 'c', 'c', None, 'a', 'b', 'a', 'a']
+    for i in xrange(1, 5):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i]) == i)
+
+    ensure(rm.maxrev == 4)
+    for i in xrange(1, 5):
+        ensure(rm.hsh2rev(genhsh(i)) == i)
+        ensure(rm.rev2hsh(i) == genhsh(i))
+
+    # re-load and verify
+    rm.flush()
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 4)
+    for i in xrange(1, 5):
+        ensure(rm.hsh2rev(genhsh(i)) == i)
+        ensure(rm.rev2hsh(i) == genhsh(i))
+        ensure(bool(rm.rev2flag(i) & revmap.sidebranchflag) == bool(i & 1))
+
+    # append without calling save() explicitly
+    for i in xrange(5, 12):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i],
+                         flush=True) == i)
+
+    # re-load and verify
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 11)
+    for i in xrange(1, 12):
+        ensure(rm.hsh2rev(genhsh(i)) == i)
+        ensure(rm.rev2hsh(i) == genhsh(i))
+        ensure(rm.rev2path(i) == paths[i] or paths[i - 1])
+        ensure(bool(rm.rev2flag(i) & revmap.sidebranchflag) == bool(i & 1))
+
+    os.unlink(path)
+
+    # missing keys
+    ensure(rm.rev2hsh(12) is None)
+    ensure(rm.rev2hsh(0) is None)
+    ensure(rm.rev2hsh(-1) is None)
+    ensure(rm.rev2flag(12) is None)
+    ensure(rm.rev2path(12) is None)
+    ensure(rm.hsh2rev(b'\1' * 20) is None)
+
+    # illformed hash (not 20 bytes)
+    try:
+        rm.append(b'\0')
+        ensure(False)
+    except Exception:
+        pass
+
+def testcorruptformat():
+    path = gettemppath()
+
+    # incorrect header
+    with open(path, 'w') as f:
+        f.write(b'NOT A VALID HEADER')
+    try:
+        revmap.revmap(path)
+        ensure(False)
+    except error.CorruptedFileError:
+        pass
+
+    # rewrite the file
+    os.unlink(path)
+    rm = revmap.revmap(path)
+    rm.append(genhsh(0), flush=True)
+
+    rm = revmap.revmap(path)
+    ensure(rm.maxrev == 1)
+
+    # corrupt the file by appending a byte
+    size = os.stat(path).st_size
+    with open(path, 'a') as f:
+        f.write('\xff')
+    try:
+        revmap.revmap(path)
+        ensure(False)
+    except error.CorruptedFileError:
+        pass
+
+    # corrupt the file by removing the last byte
+    ensure(size > 0)
+    with open(path, 'w') as f:
+        f.truncate(size - 1)
+    try:
+        revmap.revmap(path)
+        ensure(False)
+    except error.CorruptedFileError:
+        pass
+
+    os.unlink(path)
+
+def testcopyfrom():
+    path = gettemppath()
+    rm = revmap.revmap(path)
+    for i in xrange(1, 10):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=str(i // 3)) == i)
+    rm.flush()
+
+    # copy rm to rm2
+    rm2 = revmap.revmap()
+    rm2.copyfrom(rm)
+    path2 = gettemppath()
+    rm2.path = path2
+    rm2.flush()
+
+    # two files should be the same
+    ensure(len(set(util.readfile(p) for p in [path, path2])) == 1)
+
+    os.unlink(path)
+    os.unlink(path2)
+
+class fakefctx(object):
+    def __init__(self, node, path=None):
+        self._node = node
+        self._path = path
+
+    def node(self):
+        return self._node
+
+    def path(self):
+        return self._path
+
+def testcontains():
+    path = gettemppath()
+
+    rm = revmap.revmap(path)
+    for i in xrange(1, 5):
+        ensure(rm.append(genhsh(i), sidebranch=(i & 1)) == i)
+
+    for i in xrange(1, 5):
+        ensure(((genhsh(i), None) in rm) == ((i & 1) == 0))
+        ensure((fakefctx(genhsh(i)) in rm) == ((i & 1) == 0))
+    for i in xrange(5, 10):
+        ensure(fakefctx(genhsh(i)) not in rm)
+        ensure((genhsh(i), None) not in rm)
+
+    # "contains" checks paths
+    rm = revmap.revmap()
+    for i in xrange(1, 5):
+        ensure(rm.append(genhsh(i), path=str(i // 2)) == i)
+    for i in xrange(1, 5):
+        ensure(fakefctx(genhsh(i), path=str(i // 2)) in rm)
+        ensure(fakefctx(genhsh(i), path='a') not in rm)
+
+def testlastnode():
+    path = gettemppath()
+    ensure(revmap.getlastnode(path) is None)
+    rm = revmap.revmap(path)
+    ensure(revmap.getlastnode(path) is None)
+    for i in xrange(1, 10):
+        hsh = genhsh(i)
+        rm.append(hsh, path=str(i // 2), flush=True)
+        ensure(revmap.getlastnode(path) == hsh)
+        rm2 = revmap.revmap(path)
+        ensure(rm2.rev2hsh(rm2.maxrev) == hsh)
+
+testbasicreadwrite()
+testcorruptformat()
+testcopyfrom()
+testcontains()
+testlastnode()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-fastannotate.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,263 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fastannotate=
+  > EOF
+
+  $ HGMERGE=true; export HGMERGE
+
+  $ hg init repo
+  $ cd repo
+
+a simple merge case
+
+  $ echo 1 > a
+  $ hg commit -qAm 'append 1'
+  $ echo 2 >> a
+  $ hg commit -m 'append 2'
+  $ echo 3 >> a
+  $ hg commit -m 'append 3'
+  $ hg up 1 -q
+  $ cat > a << EOF
+  > 0
+  > 1
+  > 2
+  > EOF
+  $ hg commit -qm 'insert 0'
+  $ hg merge 2 -q
+  $ echo 4 >> a
+  $ hg commit -m merge
+  $ hg log -G -T '{rev}: {desc}'
+  @    4: merge
+  |\
+  | o  3: insert 0
+  | |
+  o |  2: append 3
+  |/
+  o  1: append 2
+  |
+  o  0: append 1
+  
+  $ hg fastannotate a
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 0 a
+  0: 1
+  $ hg fastannotate -r 1 a
+  0: 1
+  1: 2
+  $ hg fastannotate -udnclf a
+  test 3 d641cb51f61e Thu Jan 01 00:00:00 1970 +0000 a:1: 0
+  test 0 4994017376d3 Thu Jan 01 00:00:00 1970 +0000 a:1: 1
+  test 1 e940cb6d9a06 Thu Jan 01 00:00:00 1970 +0000 a:2: 2
+  test 2 26162a884ba6 Thu Jan 01 00:00:00 1970 +0000 a:3: 3
+  test 4 3ad7bcd2815f Thu Jan 01 00:00:00 1970 +0000 a:5: 4
+  $ hg fastannotate --linear a
+  3: 0
+  0: 1
+  1: 2
+  4: 3
+  4: 4
+
+incrementally updating
+
+  $ hg fastannotate -r 0 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  $ hg fastannotate -r 0 a --debug --rebuild
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 3 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: 1 new changesets in the main branch
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+  $ hg fastannotate -r 1 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  0: 1
+  1: 2
+
+rebuild happens automatically if unable to update
+
+  $ hg fastannotate -r 2 a --debug
+  fastannotate: a: cache broken and deleted
+  fastannotate: a: 3 new changesets in the main branch
+  0: 1
+  1: 2
+  2: 3
+
+config option "fastannotate.mainbranch"
+
+  $ hg fastannotate -r 1 --rebuild --config fastannotate.mainbranch=tip a --debug
+  fastannotate: a: 4 new changesets in the main branch
+  0: 1
+  1: 2
+  $ hg fastannotate -r 4 a --debug
+  fastannotate: a: using fast path (resolved fctx: True)
+  3: 0
+  0: 1
+  1: 2
+  2: 3
+  4: 4
+
+config option "fastannotate.modes"
+
+  $ hg annotate -r 1 --debug a
+  0: 1
+  1: 2
+  $ hg annotate --config fastannotate.modes=fctx -r 1 --debug a
+  fastannotate: a: using fast path (resolved fctx: False)
+  0: 1
+  1: 2
+  $ hg fastannotate --config fastannotate.modes=fctx -h -q
+  hg: unknown command 'fastannotate'
+  (did you mean *) (glob)
+  [255]
+
+rename
+
+  $ hg mv a b
+  $ cat > b << EOF
+  > 0
+  > 11
+  > 3
+  > 44
+  > EOF
+  $ hg commit -m b -q
+  $ hg fastannotate -ncf --long-hash b
+  3 d641cb51f61e331c44654104301f8154d7865c89 a: 0
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 11
+  2 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a: 3
+  5 d44dade239915bc82b91e4556b1257323f8e5824 b: 44
+  $ hg fastannotate -r 26162a884ba60e8c87bf4e0d6bb8efcc6f711a4e a
+  0: 1
+  1: 2
+  2: 3
+
+fastannotate --deleted
+
+  $ hg fastannotate --deleted -nf b
+  3 a:  0
+  5 b:  11
+  0 a: -1
+  1 a: -2
+  2 a:  3
+  5 b:  44
+  4 a: -4
+  $ hg fastannotate --deleted -r 3 -nf a
+  3 a:  0
+  0 a:  1
+  1 a:  2
+
+file and directories with ".l", ".m" suffixes
+
+  $ cd ..
+  $ hg init repo2
+  $ cd repo2
+
+  $ mkdir a.l b.m c.lock a.l.hg b.hg
+  $ for i in a b c d d.l d.m a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a; do
+  >   echo $i > $i
+  > done
+  $ hg add . -q
+  $ hg commit -m init
+  $ hg fastannotate a.l/a b.m/a c.lock/a a.l.hg/a b.hg/a d.l d.m a b c d
+  0: a
+  0: a.l.hg/a
+  0: a.l/a
+  0: b
+  0: b.hg/a
+  0: b.m/a
+  0: c
+  0: c.lock/a
+  0: d
+  0: d.l
+  0: d.m
+
+empty file
+
+  $ touch empty
+  $ hg commit -A empty -m empty
+  $ hg fastannotate empty
+
+json format
+
+  $ hg fastannotate -Tjson -cludn b a empty
+  [
+   {
+    "date": [0.0, 0],
+    "line": "a\n",
+    "line_number": 1,
+    "node": "1fd620b16252aecb54c6aa530dff5ed6e6ec3d21",
+    "rev": 0,
+    "user": "test"
+   },
+   {
+    "date": [0.0, 0],
+    "line": "b\n",
+    "line_number": 1,
+    "node": "1fd620b16252aecb54c6aa530dff5ed6e6ec3d21",
+    "rev": 0,
+    "user": "test"
+   }
+  ]
+
+  $ hg fastannotate -Tjson -cludn empty
+  [
+  ]
+  $ hg fastannotate -Tjson --no-content -n a
+  [
+   {
+    "rev": 0
+   }
+  ]
+
+working copy
+
+  $ echo a >> a
+  $ hg fastannotate -r 'wdir()' a
+  abort: cannot update linelog to wdir()
+  (set fastannotate.mainbranch)
+  [255]
+  $ cat >> $HGRCPATH << EOF
+  > [fastannotate]
+  > mainbranch = .
+  > EOF
+  $ hg fastannotate -r 'wdir()' a
+  0 : a
+  1+: a
+  $ hg fastannotate -cludn -r 'wdir()' a
+  test 0 1fd620b16252  Thu Jan 01 00:00:00 1970 +0000:1: a
+  test 1 720582f5bdb6+ *:2: a (glob)
+  $ hg fastannotate -cludn -r 'wdir()' -Tjson a
+  [
+   {
+    "date": [0.0, 0],
+    "line": "a\n",
+    "line_number": 1,
+    "node": "1fd620b16252aecb54c6aa530dff5ed6e6ec3d21",
+    "rev": 0,
+    "user": "test"
+   },
+   {
+    "date": [*, 0], (glob)
+    "line": "a\n",
+    "line_number": 2,
+    "node": null,
+    "rev": null,
+    "user": "test"
+   }
+  ]
--- a/tests/test-filebranch.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-filebranch.t	Mon Oct 22 14:46:06 2018 -0400
@@ -41,7 +41,7 @@
 
 We shouldn't have anything but n state here:
 
-  $ hg debugstate --nodates | grep -v "^n"
+  $ hg debugstate --no-dates | grep -v "^n"
   [1]
 
 Merging:
@@ -141,6 +141,6 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 4 changesets, 10 total revisions
+  checked 4 changesets with 10 changes to 4 files
 
   $ cd ..
--- a/tests/test-fileset-generated.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-fileset-generated.t	Mon Oct 22 14:46:06 2018 -0400
@@ -2,15 +2,15 @@
 
 Set up history and working copy
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 1
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 1
   $ hg addremove -q --similarity 0
   $ hg commit -m first
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 2
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 2
   $ hg addremove -q --similarity 0
   $ hg commit -m second
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 wc
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 wc
   $ hg addremove -q --similarity 0
   $ hg forget *_*_*-untracked
   $ rm *_*_missing-*
@@ -187,11 +187,11 @@
   undeleting missing_content2_missing-untracked
 
   $ hg revert 'set:deleted()'
+  forgetting content1_missing_missing-tracked
+  forgetting missing_missing_missing-tracked
   reverting content1_content1_missing-tracked
   reverting content1_content2_missing-tracked
-  forgetting content1_missing_missing-tracked
   reverting missing_content2_missing-tracked
-  forgetting missing_missing_missing-tracked
 
   $ hg revert 'set:unknown()'
 
--- a/tests/test-fileset.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-fileset.t	Mon Oct 22 14:46:06 2018 -0400
@@ -18,13 +18,19 @@
 
   $ fileset -v a1
   (symbol 'a1')
+  * matcher:
+  <patternmatcher patterns='(?:a1$)'>
   a1
   $ fileset -v 'a*'
   (symbol 'a*')
+  * matcher:
+  <patternmatcher patterns='(?:a[^/]*$)'>
   a1
   a2
   $ fileset -v '"re:a\d"'
   (string 're:a\\d')
+  * matcher:
+  <patternmatcher patterns='(?:a\\d)'>
   a1
   a2
   $ fileset -v '!re:"a\d"'
@@ -32,6 +38,10 @@
     (kindpat
       (symbol 're')
       (string 'a\\d')))
+  * matcher:
+  <predicatenmatcher
+    pred=<not
+      <patternmatcher patterns='(?:a\\d)'>>>
   b1
   b2
   $ fileset -v 'path:a1 or glob:b?'
@@ -42,10 +52,12 @@
     (kindpat
       (symbol 'glob')
       (symbol 'b?')))
+  * matcher:
+  <patternmatcher patterns='(?:a1(?:/|$)|b.$)'>
   a1
   b1
   b2
-  $ fileset -v 'a1 or a2'
+  $ fileset -v --no-show-matcher 'a1 or a2'
   (or
     (symbol 'a1')
     (symbol 'a2'))
@@ -97,6 +109,15 @@
       None))
   hg: parse error: can't use negate operator in this context
   [255]
+  $ fileset -p parsed 'a, b, c'
+  * parsed:
+  (list
+    (symbol 'a')
+    (symbol 'b')
+    (symbol 'c'))
+  hg: parse error: can't use a list in this context
+  (see 'hg help "filesets.x or y"')
+  [255]
 
   $ fileset '"path":.'
   hg: parse error: not a symbol
@@ -114,6 +135,183 @@
   hg: parse error: invalid pattern kind: foo
   [255]
 
+Show parsed tree at stages:
+
+  $ fileset -p unknown a
+  abort: invalid stage name: unknown
+  [255]
+
+  $ fileset -p parsed 'path:a1 or glob:b?'
+  * parsed:
+  (or
+    (kindpat
+      (symbol 'path')
+      (symbol 'a1'))
+    (kindpat
+      (symbol 'glob')
+      (symbol 'b?')))
+  a1
+  b1
+  b2
+
+  $ fileset -p all -s 'a1 or a2 or (grep("b") & clean())'
+  * parsed:
+  (or
+    (symbol 'a1')
+    (symbol 'a2')
+    (group
+      (and
+        (func
+          (symbol 'grep')
+          (string 'b'))
+        (func
+          (symbol 'clean')
+          None))))
+  * analyzed:
+  (or
+    (symbol 'a1')
+    (symbol 'a2')
+    (and
+      (func
+        (symbol 'grep')
+        (string 'b'))
+      (withstatus
+        (func
+          (symbol 'clean')
+          None)
+        (string 'clean'))))
+  * optimized:
+  (or
+    (patterns
+      (symbol 'a1')
+      (symbol 'a2'))
+    (and
+      (withstatus
+        (func
+          (symbol 'clean')
+          None)
+        (string 'clean'))
+      (func
+        (symbol 'grep')
+        (string 'b'))))
+  * matcher:
+  <unionmatcher matchers=[
+    <patternmatcher patterns='(?:a1$|a2$)'>,
+    <intersectionmatcher
+      m1=<predicatenmatcher pred=clean>,
+      m2=<predicatenmatcher pred=grep('b')>>]>
+  a1
+  a2
+  b1
+  b2
+
+Union of basic patterns:
+
+  $ fileset -p optimized -s -r. 'a1 or a2 or path:b1'
+  * optimized:
+  (patterns
+    (symbol 'a1')
+    (symbol 'a2')
+    (kindpat
+      (symbol 'path')
+      (symbol 'b1')))
+  * matcher:
+  <patternmatcher patterns='(?:a1$|a2$|b1(?:/|$))'>
+  a1
+  a2
+  b1
+
+OR expression should be reordered by weight:
+
+  $ fileset -p optimized -s -r. 'grep("a") or a1 or grep("b") or b2'
+  * optimized:
+  (or
+    (patterns
+      (symbol 'a1')
+      (symbol 'b2'))
+    (func
+      (symbol 'grep')
+      (string 'a'))
+    (func
+      (symbol 'grep')
+      (string 'b')))
+  * matcher:
+  <unionmatcher matchers=[
+    <patternmatcher patterns='(?:a1$|b2$)'>,
+    <predicatenmatcher pred=grep('a')>,
+    <predicatenmatcher pred=grep('b')>]>
+  a1
+  a2
+  b1
+  b2
+
+Use differencematcher for 'x and not y':
+
+  $ fileset -p optimized -s 'a* and not a1'
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (symbol 'a1'))
+  * matcher:
+  <differencematcher
+    m1=<patternmatcher patterns='(?:a[^/]*$)'>,
+    m2=<patternmatcher patterns='(?:a1$)'>>
+  a2
+
+  $ fileset -p optimized -s '!binary() and a*'
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (func
+      (symbol 'binary')
+      None))
+  * matcher:
+  <differencematcher
+    m1=<patternmatcher patterns='(?:a[^/]*$)'>,
+    m2=<predicatenmatcher pred=binary>>
+  a1
+  a2
+
+'x - y' is rewritten to 'x and not y' first so the operands can be reordered:
+
+  $ fileset -p analyzed -p optimized -s 'a* - a1'
+  * analyzed:
+  (and
+    (symbol 'a*')
+    (not
+      (symbol 'a1')))
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (symbol 'a1'))
+  * matcher:
+  <differencematcher
+    m1=<patternmatcher patterns='(?:a[^/]*$)'>,
+    m2=<patternmatcher patterns='(?:a1$)'>>
+  a2
+
+  $ fileset -p analyzed -p optimized -s 'binary() - a*'
+  * analyzed:
+  (and
+    (func
+      (symbol 'binary')
+      None)
+    (not
+      (symbol 'a*')))
+  * optimized:
+  (and
+    (not
+      (symbol 'a*'))
+    (func
+      (symbol 'binary')
+      None))
+  * matcher:
+  <intersectionmatcher
+    m1=<predicatenmatcher
+      pred=<not
+        <patternmatcher patterns='(?:a[^/]*$)'>>>,
+    m2=<predicatenmatcher pred=binary>>
+
 Test files status
 
   $ rm a1
@@ -180,6 +378,156 @@
   b2
   c1
 
+Test insertion of status hints
+
+  $ fileset -p optimized 'added()'
+  * optimized:
+  (withstatus
+    (func
+      (symbol 'added')
+      None)
+    (string 'added'))
+  c1
+
+  $ fileset -p optimized 'a* & removed()'
+  * optimized:
+  (and
+    (symbol 'a*')
+    (withstatus
+      (func
+        (symbol 'removed')
+        None)
+      (string 'removed')))
+  a2
+
+  $ fileset -p optimized 'a* - removed()'
+  * optimized:
+  (minus
+    (symbol 'a*')
+    (withstatus
+      (func
+        (symbol 'removed')
+        None)
+      (string 'removed')))
+  a1
+
+  $ fileset -p analyzed -p optimized '(added() + removed()) - a*'
+  * analyzed:
+  (and
+    (withstatus
+      (or
+        (func
+          (symbol 'added')
+          None)
+        (func
+          (symbol 'removed')
+          None))
+      (string 'added removed'))
+    (not
+      (symbol 'a*')))
+  * optimized:
+  (and
+    (not
+      (symbol 'a*'))
+    (withstatus
+      (or
+        (func
+          (symbol 'added')
+          None)
+        (func
+          (symbol 'removed')
+          None))
+      (string 'added removed')))
+  c1
+
+  $ fileset -p optimized 'a* + b* + added() + unknown()'
+  * optimized:
+  (withstatus
+    (or
+      (patterns
+        (symbol 'a*')
+        (symbol 'b*'))
+      (func
+        (symbol 'added')
+        None)
+      (func
+        (symbol 'unknown')
+        None))
+    (string 'added unknown'))
+  a1
+  a2
+  b1
+  b2
+  c1
+  c3
+
+  $ fileset -p analyzed -p optimized 'removed() & missing() & a*'
+  * analyzed:
+  (and
+    (withstatus
+      (and
+        (func
+          (symbol 'removed')
+          None)
+        (func
+          (symbol 'missing')
+          None))
+      (string 'removed missing'))
+    (symbol 'a*'))
+  * optimized:
+  (and
+    (symbol 'a*')
+    (withstatus
+      (and
+        (func
+          (symbol 'removed')
+          None)
+        (func
+          (symbol 'missing')
+          None))
+      (string 'removed missing')))
+
+  $ fileset -p optimized 'clean() & revs(0, added())'
+  * optimized:
+  (and
+    (withstatus
+      (func
+        (symbol 'clean')
+        None)
+      (string 'clean'))
+    (func
+      (symbol 'revs')
+      (list
+        (symbol '0')
+        (withstatus
+          (func
+            (symbol 'added')
+            None)
+          (string 'added')))))
+  b1
+
+  $ fileset -p optimized 'clean() & status(null, 0, b* & added())'
+  * optimized:
+  (and
+    (withstatus
+      (func
+        (symbol 'clean')
+        None)
+      (string 'clean'))
+    (func
+      (symbol 'status')
+      (list
+        (symbol 'null')
+        (symbol '0')
+        (and
+          (symbol 'b*')
+          (withstatus
+            (func
+              (symbol 'added')
+              None)
+            (string 'added'))))))
+  b1
+
 Test files properties
 
   >>> open('bin', 'wb').write(b'\0a') and None
@@ -194,6 +542,19 @@
   $ fileset 'binary()'
   bin
 
+  $ fileset -p optimized -s 'binary() and b*'
+  * optimized:
+  (and
+    (symbol 'b*')
+    (func
+      (symbol 'binary')
+      None))
+  * matcher:
+  <intersectionmatcher
+    m1=<patternmatcher patterns='(?:b[^/]*$)'>,
+    m2=<predicatenmatcher pred=binary>>
+  bin
+
   $ fileset 'grep("b{1}")'
   .hgignore
   b1
@@ -231,7 +592,7 @@
   [255]
   $ fileset '(1k, 2k)'
   hg: parse error: can't use a list in this context
-  (see hg help "filesets.x or y")
+  (see 'hg help "filesets.x or y"')
   [255]
   $ fileset 'size(1k)'
   1k
--- a/tests/test-fix-topology.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-fix-topology.t	Mon Oct 22 14:46:06 2018 -0400
@@ -9,7 +9,7 @@
   > sys.stdout.write(sys.stdin.read().upper())
   > EOF
   $ TESTLINES="foo\nbar\nbaz\n"
-  $ printf $TESTLINES | $PYTHON $UPPERCASEPY
+  $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY
   FOO
   BAR
   BAZ
@@ -22,7 +22,7 @@
   > [extensions]
   > fix =
   > [fix]
-  > uppercase-whole-file:command=$PYTHON $UPPERCASEPY
+  > uppercase-whole-file:command="$PYTHON" $UPPERCASEPY
   > uppercase-whole-file:fileset=set:**
   > EOF
 
--- a/tests/test-fix.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-fix.t	Mon Oct 22 14:46:06 2018 -0400
@@ -22,32 +22,32 @@
   >     sys.stdout.write(line)
   > EOF
   $ TESTLINES="foo\nbar\nbaz\nqux\n"
-  $ printf $TESTLINES | $PYTHON $UPPERCASEPY
+  $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY
   foo
   bar
   baz
   qux
-  $ printf $TESTLINES | $PYTHON $UPPERCASEPY all
+  $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY all
   FOO
   BAR
   BAZ
   QUX
-  $ printf $TESTLINES | $PYTHON $UPPERCASEPY 1-1
+  $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY 1-1
   FOO
   bar
   baz
   qux
-  $ printf $TESTLINES | $PYTHON $UPPERCASEPY 1-2
+  $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY 1-2
   FOO
   BAR
   baz
   qux
-  $ printf $TESTLINES | $PYTHON $UPPERCASEPY 2-3
+  $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY 2-3
   foo
   BAR
   BAZ
   qux
-  $ printf $TESTLINES | $PYTHON $UPPERCASEPY 2-2 4-4
+  $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY 2-2 4-4
   foo
   BAR
   baz
@@ -65,9 +65,9 @@
   > evolution.createmarkers=True
   > evolution.allowunstable=True
   > [fix]
-  > uppercase-whole-file:command=$PYTHON $UPPERCASEPY all
+  > uppercase-whole-file:command="$PYTHON" $UPPERCASEPY all
   > uppercase-whole-file:fileset=set:**.whole
-  > uppercase-changed-lines:command=$PYTHON $UPPERCASEPY
+  > uppercase-changed-lines:command="$PYTHON" $UPPERCASEPY
   > uppercase-changed-lines:linerange={first}-{last}
   > uppercase-changed-lines:fileset=set:**.changed
   > EOF
@@ -502,12 +502,13 @@
 
   $ cd ..
 
-When a fixer prints to stderr, we assume that it has failed. We should show the
-error messages to the user, and we should not let the failing fixer affect the
-file it was fixing (many code formatters might emit error messages on stderr
-and nothing on stdout, which would cause us the clear the file). We show the
-user which fixer failed and which revision, but we assume that the fixer will
-print the filename if it is relevant.
+When a fixer prints to stderr, we don't assume that it has failed. We show the
+error messages to the user, and we still let the fixer affect the file it was
+fixing if its exit code is zero. Some code formatters might emit error messages
+on stderr and nothing on stdout, which would cause us the clear the file,
+except that they also exit with a non-zero code. We show the user which fixer
+emitted the stderr, and which revision, but we assume that the fixer will print
+the filename if it is relevant (since the issue may be non-specific).
 
   $ hg init showstderr
   $ cd showstderr
@@ -515,17 +516,37 @@
   $ printf "hello\n" > hello.txt
   $ hg add
   adding hello.txt
-  $ cat >> $TESTTMP/cmd.sh <<'EOF'
+  $ cat > $TESTTMP/fail.sh <<'EOF'
   > printf 'HELLO\n'
   > printf "$@: some\nerror" >&2
+  > exit 0 # success despite the stderr output
   > EOF
-  $ hg --config "fix.fail:command=sh $TESTTMP/cmd.sh {rootpath}" \
+  $ hg --config "fix.fail:command=sh $TESTTMP/fail.sh {rootpath}" \
   >    --config "fix.fail:fileset=hello.txt" \
   >    fix --working-dir
   [wdir] fail: hello.txt: some
   [wdir] fail: error
   $ cat hello.txt
-  hello
+  HELLO
+
+  $ printf "goodbye\n" > hello.txt
+  $ cat > $TESTTMP/work.sh <<'EOF'
+  > printf 'GOODBYE\n'
+  > printf "$@: some\nerror\n" >&2
+  > exit 42 # success despite the stdout output
+  > EOF
+  $ hg --config "fix.fail:command=sh $TESTTMP/work.sh {rootpath}" \
+  >    --config "fix.fail:fileset=hello.txt" \
+  >    fix --working-dir
+  [wdir] fail: hello.txt: some
+  [wdir] fail: error
+  $ cat hello.txt
+  goodbye
+
+  $ hg --config "fix.fail:command=exit 42" \
+  >    --config "fix.fail:fileset=hello.txt" \
+  >    fix --working-dir
+  [wdir] fail: exited with status 42
 
   $ cd ..
 
@@ -830,9 +851,9 @@
   
   $ hg fix -r 0:2
   $ hg log --graph --template '{node|shortest} {files}'
-  o  3801 bar.whole
+  o  b4e2 bar.whole
   |
-  o  38cc
+  o  59f4
   |
   | @  bc05 bar.whole
   | |
--- a/tests/test-flagprocessor.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-flagprocessor.t	Mon Oct 22 14:46:06 2018 -0400
@@ -206,6 +206,8 @@
     File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
       validatehash,
     File "*/mercurial/revlog.py", line *, in addflagprocessor (glob)
+      _insertflagprocessor(flag, processor, _flagprocessors)
+    File "*/mercurial/revlog.py", line *, in _insertflagprocessor (glob)
       raise error.Abort(msg)
   Abort: cannot register multiple processors on flag '0x8'.
   *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
--- a/tests/test-fncache.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-fncache.t	Mon Oct 22 14:46:06 2018 -0400
@@ -41,7 +41,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 3 files
 
   $ rm .hg/store/fncache
 
@@ -53,7 +53,7 @@
    warning: revlog 'data/a.i' not in fncache!
    warning: revlog 'data/a.i.hg/c.i' not in fncache!
    warning: revlog 'data/a.i/b.i' not in fncache!
-  3 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 3 files
   3 warnings encountered!
   hint: run "hg debugrebuildfncache" to recover from corrupt fncache
 
@@ -70,7 +70,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 3 files
 
   $ cd ..
 
@@ -88,6 +88,7 @@
   .hg/00manifest.i
   .hg/cache
   .hg/cache/branch2-served
+  .hg/cache/manifestfulltextcache (reporevlogstore !)
   .hg/cache/rbc-names-v1
   .hg/cache/rbc-revs-v1
   .hg/data
@@ -121,6 +122,7 @@
   .hg/00changelog.i
   .hg/cache
   .hg/cache/branch2-served
+  .hg/cache/manifestfulltextcache (reporevlogstore !)
   .hg/cache/rbc-names-v1
   .hg/cache/rbc-revs-v1
   .hg/dirstate
@@ -303,13 +305,13 @@
   > def trwrapper(orig, self, *args, **kwargs):
   >     tr = orig(self, *args, **kwargs)
   >     def fail(tr):
-  >         raise error.Abort("forced transaction failure")
+  >         raise error.Abort(b"forced transaction failure")
   >     # zzz prefix to ensure it sorted after store.write
-  >     tr.addfinalize('zzz-forcefails', fail)
+  >     tr.addfinalize(b'zzz-forcefails', fail)
   >     return tr
   > 
   > def abortwrapper(orig, self, *args, **kwargs):
-  >     raise error.Abort("forced transaction failure")
+  >     raise error.Abort(b"forced transaction failure")
   > 
   > def uisetup(ui):
   >     extensions.wrapfunction(localrepo.localrepository, 'transaction',
@@ -338,7 +340,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   $ cat .hg/store/fncache
   data/y.i
 
@@ -446,20 +448,20 @@
 
   $ cat > fncacheloadwarn.py << EOF
   > from __future__ import absolute_import
-  > from mercurial import extensions, store
+  > from mercurial import extensions, localrepo
   > 
   > def extsetup(ui):
   >     def wrapstore(orig, requirements, *args):
   >         store = orig(requirements, *args)
-  >         if 'store' in requirements and 'fncache' in requirements:
+  >         if b'store' in requirements and b'fncache' in requirements:
   >             instrumentfncachestore(store, ui)
   >         return store
-  >     extensions.wrapfunction(store, 'store', wrapstore)
+  >     extensions.wrapfunction(localrepo, 'makestore', wrapstore)
   > 
   > def instrumentfncachestore(fncachestore, ui):
   >     class instrumentedfncache(type(fncachestore.fncache)):
   >         def _load(self):
-  >             ui.warn('fncache load triggered!\n')
+  >             ui.warn(b'fncache load triggered!\n')
   >             super(instrumentedfncache, self)._load()
   >     fncachestore.fncache.__class__ = instrumentedfncache
   > EOF
--- a/tests/test-gendoc.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-gendoc.t	Mon Oct 22 14:46:06 2018 -0400
@@ -8,7 +8,7 @@
   $ { echo C; ls "$TESTDIR/../i18n"/*.po | sort; } | while read PO; do
   >     LOCALE=`basename "$PO" .po`
   >     echo "% extracting documentation from $LOCALE"
-  >     LANGUAGE=$LOCALE $PYTHON "$TESTDIR/../doc/gendoc.py" >> gendoc-$LOCALE.txt 2> /dev/null || exit
+  >     LANGUAGE=$LOCALE "$PYTHON" "$TESTDIR/../doc/gendoc.py" >> gendoc-$LOCALE.txt 2> /dev/null || exit
   > 
   >     if [ $LOCALE != C ]; then
   >         if [ ! -f $TESTDIR/test-gendoc-$LOCALE.t ]; then
--- a/tests/test-generaldelta.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-generaldelta.t	Mon Oct 22 14:46:06 2018 -0400
@@ -267,7 +267,7 @@
        51       4        3       50    prev        356        594        611   1.02862       611         0    0.00000
        52       4        4       51      p1         58        640        669   1.04531       669         0    0.00000
        53       5        1       -1    base          0          0          0   0.00000         0         0    0.00000
-       54       5        2       53      p1        376        640        376   0.58750       376         0    0.00000
+       54       6        1       -1    base        369        640        369   0.57656       369         0    0.00000
   $ hg clone --pull source-repo --config experimental.maxdeltachainspan=2800 relax-chain --config format.generaldelta=yes
   requesting all changes
   adding changesets
@@ -279,61 +279,61 @@
   14 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg -R relax-chain debugdeltachain -m
       rev  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
-        0       1        1       -1    base         46         45         46   1.02222        46         0    0.00000
-        1       1        2        0      p1         57         90        103   1.14444       103         0    0.00000
-        2       1        3        1      p1         57        135        160   1.18519       160         0    0.00000
-        3       1        4        2      p1         57        180        217   1.20556       217         0    0.00000
-        4       1        5        3      p1         57        225        274   1.21778       274         0    0.00000
-        5       1        6        4      p1         57        270        331   1.22593       331         0    0.00000
-        6       2        1       -1    base         46         45         46   1.02222        46         0    0.00000
-        7       2        2        6      p1         57         90        103   1.14444       103         0    0.00000
-        8       2        3        7      p1         57        135        160   1.18519       160         0    0.00000
-        9       2        4        8      p1         57        180        217   1.20556       217         0    0.00000
-       10       2        5        9      p1         58        226        275   1.21681       275         0    0.00000
-       11       2        6       10      p1         58        272        333   1.22426       333         0    0.00000
-       12       2        7       11      p1         58        318        391   1.22956       391         0    0.00000
-       13       2        8       12      p1         58        364        449   1.23352       449         0    0.00000
-       14       2        9       13      p1         58        410        507   1.23659       507         0    0.00000
-       15       2       10       14      p1         58        456        565   1.23904       565         0    0.00000
-       16       2       11       15      p1         58        502        623   1.24104       623         0    0.00000
-       17       2       12       16      p1         58        548        681   1.24270       681         0    0.00000
-       18       3        1       -1    base         47         46         47   1.02174        47         0    0.00000
-       19       3        2       18      p1         58         92        105   1.14130       105         0    0.00000
-       20       3        3       19      p1         58        138        163   1.18116       163         0    0.00000
-       21       3        4       20      p1         58        184        221   1.20109       221         0    0.00000
-       22       3        5       21      p1         58        230        279   1.21304       279         0    0.00000
-       23       3        6       22      p1         58        276        337   1.22101       337         0    0.00000
-       24       3        7       23      p1         58        322        395   1.22671       395         0    0.00000
-       25       3        8       24      p1         58        368        453   1.23098       453         0    0.00000
-       26       3        9       25      p1         58        414        511   1.23430       511         0    0.00000
-       27       3       10       26      p1         58        460        569   1.23696       569         0    0.00000
-       28       3       11       27      p1         58        506        627   1.23913       627         0    0.00000
-       29       3       12       28      p1         58        552        685   1.24094       685         0    0.00000
-       30       3       13       29      p1         58        598        743   1.24247       743         0    0.00000
-       31       3       14       30      p1         58        644        801   1.24379       801         0    0.00000
-       32       3       15       31      p1         58        690        859   1.24493       859         0    0.00000
-       33       3       16       32      p1         58        736        917   1.24592       917         0    0.00000
-       34       3       17       33      p1         58        782        975   1.24680       975         0    0.00000
-       35       3       18       34      p1         58        828       1033   1.24758      1033         0    0.00000
-       36       3       19       35      p1         58        874       1091   1.24828      1091         0    0.00000
-       37       3       20       36      p1         58        920       1149   1.24891      1149         0    0.00000
-       38       3       21       37      p1         58        966       1207   1.24948      1207         0    0.00000
-       39       3       22       38      p1         58       1012       1265   1.25000      1265         0    0.00000
-       40       3       23       39      p1         58       1058       1323   1.25047      1323         0    0.00000
-       41       3       24       40      p1         58       1104       1381   1.25091      1381         0    0.00000
-       42       3       25       41      p1         58       1150       1439   1.25130      1439         0    0.00000
-       43       3       26       42      p1         58       1196       1497   1.25167      1497         0    0.00000
-       44       3       27       43      p1         58       1242       1555   1.25201      1555         0    0.00000
-       45       3       28       44      p1         58       1288       1613   1.25233      1613         0    0.00000
-       46       3       29       45      p1         58       1334       1671   1.25262      1671         0    0.00000
-       47       3       30       46      p1         58       1380       1729   1.25290      1729         0    0.00000
-       48       3       31       47      p1         58       1426       1787   1.25316      1787         0    0.00000
-       49       4        1       -1    base        197        316        197   0.62342       197         0    0.00000
-       50       4        2       49      p1         58        362        255   0.70442       255         0    0.00000
-       51       2       13       17      p1         58        594        739   1.24411      2781      2042    2.76319
-       52       5        1       -1    base        369        640        369   0.57656       369         0    0.00000
-       53       6        1       -1    base          0          0          0   0.00000         0         0    0.00000
-       54       6        2       53      p1        376        640        376   0.58750       376         0    0.00000
+        0       1        1       -1    base         47         46         47   1.02174        47         0    0.00000
+        1       1        2        0      p1         58         92        105   1.14130       105         0    0.00000
+        2       1        3        1      p1         58        138        163   1.18116       163         0    0.00000
+        3       1        4        2      p1         58        184        221   1.20109       221         0    0.00000
+        4       1        5        3      p1         58        230        279   1.21304       279         0    0.00000
+        5       1        6        4      p1         58        276        337   1.22101       337         0    0.00000
+        6       1        7        5      p1         58        322        395   1.22671       395         0    0.00000
+        7       1        8        6      p1         58        368        453   1.23098       453         0    0.00000
+        8       1        9        7      p1         58        414        511   1.23430       511         0    0.00000
+        9       1       10        8      p1         58        460        569   1.23696       569         0    0.00000
+       10       1       11        9      p1         58        506        627   1.23913       627         0    0.00000
+       11       1       12       10      p1         58        552        685   1.24094       685         0    0.00000
+       12       1       13       11      p1         58        598        743   1.24247       743         0    0.00000
+       13       1       14       12      p1         58        644        801   1.24379       801         0    0.00000
+       14       1       15       13      p1         58        690        859   1.24493       859         0    0.00000
+       15       1       16       14      p1         58        736        917   1.24592       917         0    0.00000
+       16       1       17       15      p1         58        782        975   1.24680       975         0    0.00000
+       17       1       18       16      p1         58        828       1033   1.24758      1033         0    0.00000
+       18       1       19       17      p1         58        874       1091   1.24828      1091         0    0.00000
+       19       1       20       18      p1         58        920       1149   1.24891      1149         0    0.00000
+       20       1       21       19      p1         58        966       1207   1.24948      1207         0    0.00000
+       21       1       22       20      p1         58       1012       1265   1.25000      1265         0    0.00000
+       22       1       23       21      p1         58       1058       1323   1.25047      1323         0    0.00000
+       23       1       24       22      p1         58       1104       1381   1.25091      1381         0    0.00000
+       24       1       25       23      p1         58       1150       1439   1.25130      1439         0    0.00000
+       25       1       26       24      p1         58       1196       1497   1.25167      1497         0    0.00000
+       26       1       27       25      p1         58       1242       1555   1.25201      1555         0    0.00000
+       27       1       28       26      p1         58       1288       1613   1.25233      1613         0    0.00000
+       28       1       29       27      p1         58       1334       1671   1.25262      1671         0    0.00000
+       29       1       30       28      p1         58       1380       1729   1.25290      1729         0    0.00000
+       30       1       31       29      p1         58       1426       1787   1.25316      1787         0    0.00000
+       31       2        1       -1    base         46         45         46   1.02222        46         0    0.00000
+       32       2        2       31      p1         57         90        103   1.14444       103         0    0.00000
+       33       2        3       32      p1         57        135        160   1.18519       160         0    0.00000
+       34       2        4       33      p1         57        180        217   1.20556       217         0    0.00000
+       35       2        5       34      p1         57        225        274   1.21778       274         0    0.00000
+       36       2        6       35      p1         57        270        331   1.22593       331         0    0.00000
+       37       2        7       36      p1         58        316        389   1.23101       389         0    0.00000
+       38       2        8       37      p1         58        362        447   1.23481       447         0    0.00000
+       39       3        1       -1    base         46         45         46   1.02222        46         0    0.00000
+       40       3        2       39      p1         57         90        103   1.14444       103         0    0.00000
+       41       3        3       40      p1         57        135        160   1.18519       160         0    0.00000
+       42       3        4       41      p1         57        180        217   1.20556       217         0    0.00000
+       43       3        5       42      p1         58        226        275   1.21681       275         0    0.00000
+       44       3        6       43      p1         58        272        333   1.22426       333         0    0.00000
+       45       3        7       44      p1         58        318        391   1.22956       391         0    0.00000
+       46       3        8       45      p1         58        364        449   1.23352       449         0    0.00000
+       47       3        9       46      p1         58        410        507   1.23659       507         0    0.00000
+       48       3       10       47      p1         58        456        565   1.23904       565         0    0.00000
+       49       3       11       48      p1         58        502        623   1.24104       623         0    0.00000
+       50       3       12       49      p1         58        548        681   1.24270       681         0    0.00000
+       51       3       13       50      p1         58        594        739   1.24411       739         0    0.00000
+       52       3       14       51      p1         58        640        797   1.24531       797         0    0.00000
+       53       4        1       -1    base          0          0          0   0.00000         0         0    0.00000
+       54       5        1       -1    base        369        640        369   0.57656       369         0    0.00000
   $ hg clone --pull source-repo --config experimental.maxdeltachainspan=0 noconst-chain --config format.generaldelta=yes
   requesting all changes
   adding changesets
@@ -345,58 +345,58 @@
   14 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg -R noconst-chain debugdeltachain -m
       rev  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
-        0       1        1       -1    base         46         45         46   1.02222        46         0    0.00000
-        1       1        2        0      p1         57         90        103   1.14444       103         0    0.00000
-        2       1        3        1      p1         57        135        160   1.18519       160         0    0.00000
-        3       1        4        2      p1         57        180        217   1.20556       217         0    0.00000
-        4       1        5        3      p1         57        225        274   1.21778       274         0    0.00000
-        5       1        6        4      p1         57        270        331   1.22593       331         0    0.00000
-        6       2        1       -1    base         46         45         46   1.02222        46         0    0.00000
-        7       2        2        6      p1         57         90        103   1.14444       103         0    0.00000
-        8       2        3        7      p1         57        135        160   1.18519       160         0    0.00000
-        9       2        4        8      p1         57        180        217   1.20556       217         0    0.00000
-       10       2        5        9      p1         58        226        275   1.21681       275         0    0.00000
-       11       2        6       10      p1         58        272        333   1.22426       333         0    0.00000
-       12       2        7       11      p1         58        318        391   1.22956       391         0    0.00000
-       13       2        8       12      p1         58        364        449   1.23352       449         0    0.00000
-       14       2        9       13      p1         58        410        507   1.23659       507         0    0.00000
-       15       2       10       14      p1         58        456        565   1.23904       565         0    0.00000
-       16       2       11       15      p1         58        502        623   1.24104       623         0    0.00000
-       17       2       12       16      p1         58        548        681   1.24270       681         0    0.00000
-       18       3        1       -1    base         47         46         47   1.02174        47         0    0.00000
-       19       3        2       18      p1         58         92        105   1.14130       105         0    0.00000
-       20       3        3       19      p1         58        138        163   1.18116       163         0    0.00000
-       21       3        4       20      p1         58        184        221   1.20109       221         0    0.00000
-       22       3        5       21      p1         58        230        279   1.21304       279         0    0.00000
-       23       3        6       22      p1         58        276        337   1.22101       337         0    0.00000
-       24       3        7       23      p1         58        322        395   1.22671       395         0    0.00000
-       25       3        8       24      p1         58        368        453   1.23098       453         0    0.00000
-       26       3        9       25      p1         58        414        511   1.23430       511         0    0.00000
-       27       3       10       26      p1         58        460        569   1.23696       569         0    0.00000
-       28       3       11       27      p1         58        506        627   1.23913       627         0    0.00000
-       29       3       12       28      p1         58        552        685   1.24094       685         0    0.00000
-       30       3       13       29      p1         58        598        743   1.24247       743         0    0.00000
-       31       3       14       30      p1         58        644        801   1.24379       801         0    0.00000
-       32       3       15       31      p1         58        690        859   1.24493       859         0    0.00000
-       33       3       16       32      p1         58        736        917   1.24592       917         0    0.00000
-       34       3       17       33      p1         58        782        975   1.24680       975         0    0.00000
-       35       3       18       34      p1         58        828       1033   1.24758      1033         0    0.00000
-       36       3       19       35      p1         58        874       1091   1.24828      1091         0    0.00000
-       37       3       20       36      p1         58        920       1149   1.24891      1149         0    0.00000
-       38       3       21       37      p1         58        966       1207   1.24948      1207         0    0.00000
-       39       3       22       38      p1         58       1012       1265   1.25000      1265         0    0.00000
-       40       3       23       39      p1         58       1058       1323   1.25047      1323         0    0.00000
-       41       3       24       40      p1         58       1104       1381   1.25091      1381         0    0.00000
-       42       3       25       41      p1         58       1150       1439   1.25130      1439         0    0.00000
-       43       3       26       42      p1         58       1196       1497   1.25167      1497         0    0.00000
-       44       3       27       43      p1         58       1242       1555   1.25201      1555         0    0.00000
-       45       3       28       44      p1         58       1288       1613   1.25233      1613         0    0.00000
-       46       3       29       45      p1         58       1334       1671   1.25262      1671         0    0.00000
-       47       3       30       46      p1         58       1380       1729   1.25290      1729         0    0.00000
-       48       3       31       47      p1         58       1426       1787   1.25316      1787         0    0.00000
-       49       1        7        5      p1         58        316        389   1.23101      2857      2468    6.34447
-       50       1        8       49      p1         58        362        447   1.23481      2915      2468    5.52125
-       51       2       13       17      p1         58        594        739   1.24411      2642      1903    2.57510
-       52       2       14       51      p1         58        640        797   1.24531      2700      1903    2.38770
+        0       1        1       -1    base         47         46         47   1.02174        47         0    0.00000
+        1       1        2        0      p1         58         92        105   1.14130       105         0    0.00000
+        2       1        3        1      p1         58        138        163   1.18116       163         0    0.00000
+        3       1        4        2      p1         58        184        221   1.20109       221         0    0.00000
+        4       1        5        3      p1         58        230        279   1.21304       279         0    0.00000
+        5       1        6        4      p1         58        276        337   1.22101       337         0    0.00000
+        6       1        7        5      p1         58        322        395   1.22671       395         0    0.00000
+        7       1        8        6      p1         58        368        453   1.23098       453         0    0.00000
+        8       1        9        7      p1         58        414        511   1.23430       511         0    0.00000
+        9       1       10        8      p1         58        460        569   1.23696       569         0    0.00000
+       10       1       11        9      p1         58        506        627   1.23913       627         0    0.00000
+       11       1       12       10      p1         58        552        685   1.24094       685         0    0.00000
+       12       1       13       11      p1         58        598        743   1.24247       743         0    0.00000
+       13       1       14       12      p1         58        644        801   1.24379       801         0    0.00000
+       14       1       15       13      p1         58        690        859   1.24493       859         0    0.00000
+       15       1       16       14      p1         58        736        917   1.24592       917         0    0.00000
+       16       1       17       15      p1         58        782        975   1.24680       975         0    0.00000
+       17       1       18       16      p1         58        828       1033   1.24758      1033         0    0.00000
+       18       1       19       17      p1         58        874       1091   1.24828      1091         0    0.00000
+       19       1       20       18      p1         58        920       1149   1.24891      1149         0    0.00000
+       20       1       21       19      p1         58        966       1207   1.24948      1207         0    0.00000
+       21       1       22       20      p1         58       1012       1265   1.25000      1265         0    0.00000
+       22       1       23       21      p1         58       1058       1323   1.25047      1323         0    0.00000
+       23       1       24       22      p1         58       1104       1381   1.25091      1381         0    0.00000
+       24       1       25       23      p1         58       1150       1439   1.25130      1439         0    0.00000
+       25       1       26       24      p1         58       1196       1497   1.25167      1497         0    0.00000
+       26       1       27       25      p1         58       1242       1555   1.25201      1555         0    0.00000
+       27       1       28       26      p1         58       1288       1613   1.25233      1613         0    0.00000
+       28       1       29       27      p1         58       1334       1671   1.25262      1671         0    0.00000
+       29       1       30       28      p1         58       1380       1729   1.25290      1729         0    0.00000
+       30       1       31       29      p1         58       1426       1787   1.25316      1787         0    0.00000
+       31       2        1       -1    base         46         45         46   1.02222        46         0    0.00000
+       32       2        2       31      p1         57         90        103   1.14444       103         0    0.00000
+       33       2        3       32      p1         57        135        160   1.18519       160         0    0.00000
+       34       2        4       33      p1         57        180        217   1.20556       217         0    0.00000
+       35       2        5       34      p1         57        225        274   1.21778       274         0    0.00000
+       36       2        6       35      p1         57        270        331   1.22593       331         0    0.00000
+       37       2        7       36      p1         58        316        389   1.23101       389         0    0.00000
+       38       2        8       37      p1         58        362        447   1.23481       447         0    0.00000
+       39       3        1       -1    base         46         45         46   1.02222        46         0    0.00000
+       40       3        2       39      p1         57         90        103   1.14444       103         0    0.00000
+       41       3        3       40      p1         57        135        160   1.18519       160         0    0.00000
+       42       3        4       41      p1         57        180        217   1.20556       217         0    0.00000
+       43       3        5       42      p1         58        226        275   1.21681       275         0    0.00000
+       44       3        6       43      p1         58        272        333   1.22426       333         0    0.00000
+       45       3        7       44      p1         58        318        391   1.22956       391         0    0.00000
+       46       3        8       45      p1         58        364        449   1.23352       449         0    0.00000
+       47       3        9       46      p1         58        410        507   1.23659       507         0    0.00000
+       48       3       10       47      p1         58        456        565   1.23904       565         0    0.00000
+       49       3       11       48      p1         58        502        623   1.24104       623         0    0.00000
+       50       3       12       49      p1         58        548        681   1.24270       681         0    0.00000
+       51       3       13       50      p1         58        594        739   1.24411       739         0    0.00000
+       52       3       14       51      p1         58        640        797   1.24531       797         0    0.00000
        53       4        1       -1    base          0          0          0   0.00000         0         0    0.00000
-       54       4        2       53      p1        376        640        376   0.58750       376         0    0.00000
+       54       5        1       -1    base        369        640        369   0.57656       369         0    0.00000
--- a/tests/test-globalopts.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-globalopts.t	Mon Oct 22 14:46:06 2018 -0400
@@ -267,6 +267,7 @@
 #if no-chg
   $ hg --cwd c --config x --traceback id 2>&1 | grep -i 'traceback'
   Traceback (most recent call last):
+  Traceback (most recent call last): (py3 !)
 #else
 Traceback for '--config' errors not supported with chg.
   $ hg --cwd c --config x --traceback id 2>&1 | grep -i 'traceback'
@@ -296,82 +297,128 @@
   
   list of commands:
   
-   add           add the specified files on the next commit
-   addremove     add all new files, delete all missing files
-   annotate      show changeset information by line for each file
-   archive       create an unversioned archive of a repository revision
+  Repository creation:
+  
+   clone         make a copy of an existing repository
+   init          create a new repository in the given directory
+  
+  Remote repository management:
+  
+   incoming      show new changesets found in source
+   outgoing      show changesets not found in the destination
+   paths         show aliases for remote repositories
+   pull          pull changes from the specified source
+   push          push changes to the specified destination
+   serve         start stand-alone webserver
+  
+  Change creation:
+  
+   commit        commit the specified files or all outstanding changes
+  
+  Change manipulation:
+  
    backout       reverse effect of earlier changeset
-   bisect        subdivision search of changesets
+   graft         copy changes from other branches onto the current branch
+   merge         merge another revision into working directory
+  
+  Change organization:
+  
    bookmarks     create a new bookmark or list existing bookmarks
    branch        set or show the current branch name
    branches      list repository named branches
-   bundle        create a bundle file
+   phase         set or show the current phase name
+   tag           add one or more tags for the current or given revision
+   tags          list repository tags
+  
+  File content management:
+  
+   annotate      show changeset information by line for each file
    cat           output the current or given revision of files
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   config        show combined config settings from all hgrc files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
+   grep          search revision history for a pattern in specified files
+  
+  Change navigation:
+  
+   bisect        subdivision search of changesets
+   heads         show branch heads
+   identify      identify the working directory or specified revision
+   log           show revision history of entire repository or files
+  
+  Working directory management:
+  
+   add           add the specified files on the next commit
+   addremove     add all new files, delete all missing files
    files         list tracked files
    forget        forget the specified files on the next commit
-   graft         copy changes from other branches onto the current branch
-   grep          search revision history for a pattern in specified files
-   heads         show branch heads
-   help          show help for a given topic or a help overview
-   identify      identify the working directory or specified revision
-   import        import an ordered set of patches
-   incoming      show new changesets found in source
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   manifest      output the current or given revision of the project manifest
-   merge         merge another revision into working directory
-   outgoing      show changesets not found in the destination
-   paths         show aliases for remote repositories
-   phase         set or show the current phase name
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   recover       roll back an interrupted transaction
    remove        remove the specified files on the next commit
    rename        rename files; equivalent of copy + remove
    resolve       redo merges or set/view the merge status of files
    revert        restore files to their checkout state
    root          print the root (top) of the current working directory
-   serve         start stand-alone webserver
    status        show changed files in the working directory
    summary       summarize working directory state
-   tag           add one or more tags for the current or given revision
-   tags          list repository tags
+   update        update working directory (or switch revisions)
+  
+  Change import/export:
+  
+   archive       create an unversioned archive of a repository revision
+   bundle        create a bundle file
+   export        dump the header and diffs for one or more changesets
+   import        import an ordered set of patches
    unbundle      apply one or more bundle files
-   update        update working directory (or switch revisions)
+  
+  Repository maintenance:
+  
+   manifest      output the current or given revision of the project manifest
+   recover       roll back an interrupted transaction
    verify        verify the integrity of the repository
+  
+  Help:
+  
+   config        show combined config settings from all hgrc files
+   help          show help for a given topic or a help overview
    version       output version and copyright information
   
   additional help topics:
   
-   bundlespec    Bundle File Formats
+  Mercurial identifiers:
+  
+   filesets      Specifying File Sets
+   hgignore      Syntax for Mercurial Ignore Files
+   patterns      File Name Patterns
+   revisions     Specifying Revisions
+   urls          URL Paths
+  
+  Mercurial output:
+  
    color         Colorizing Outputs
+   dates         Date Formats
+   diffs         Diff Formats
+   templating    Template Usage
+  
+  Mercurial configuration:
+  
    config        Configuration Files
-   dates         Date Formats
-   deprecated    Deprecated Features
-   diffs         Diff Formats
    environment   Environment Variables
    extensions    Using Additional Features
-   filesets      Specifying File Sets
    flags         Command-line flags
-   glossary      Glossary
-   hgignore      Syntax for Mercurial Ignore Files
    hgweb         Configuring hgweb
-   internals     Technical implementation topics
    merge-tools   Merge Tools
    pager         Pager Support
-   patterns      File Name Patterns
+  
+  Concepts:
+  
+   bundlespec    Bundle File Formats
+   glossary      Glossary
    phases        Working with Phases
-   revisions     Specifying Revisions
+   subrepos      Subrepositories
+  
+  Miscellaneous:
+  
+   deprecated    Deprecated Features
+   internals     Technical implementation topics
    scripting     Using Mercurial from scripts and automation
-   subrepos      Subrepositories
-   templating    Template Usage
-   urls          URL Paths
   
   (use 'hg help -v' to show built-in aliases and global options)
 
@@ -380,82 +427,128 @@
   
   list of commands:
   
-   add           add the specified files on the next commit
-   addremove     add all new files, delete all missing files
-   annotate      show changeset information by line for each file
-   archive       create an unversioned archive of a repository revision
+  Repository creation:
+  
+   clone         make a copy of an existing repository
+   init          create a new repository in the given directory
+  
+  Remote repository management:
+  
+   incoming      show new changesets found in source
+   outgoing      show changesets not found in the destination
+   paths         show aliases for remote repositories
+   pull          pull changes from the specified source
+   push          push changes to the specified destination
+   serve         start stand-alone webserver
+  
+  Change creation:
+  
+   commit        commit the specified files or all outstanding changes
+  
+  Change manipulation:
+  
    backout       reverse effect of earlier changeset
-   bisect        subdivision search of changesets
+   graft         copy changes from other branches onto the current branch
+   merge         merge another revision into working directory
+  
+  Change organization:
+  
    bookmarks     create a new bookmark or list existing bookmarks
    branch        set or show the current branch name
    branches      list repository named branches
-   bundle        create a bundle file
+   phase         set or show the current phase name
+   tag           add one or more tags for the current or given revision
+   tags          list repository tags
+  
+  File content management:
+  
+   annotate      show changeset information by line for each file
    cat           output the current or given revision of files
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   config        show combined config settings from all hgrc files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
+   grep          search revision history for a pattern in specified files
+  
+  Change navigation:
+  
+   bisect        subdivision search of changesets
+   heads         show branch heads
+   identify      identify the working directory or specified revision
+   log           show revision history of entire repository or files
+  
+  Working directory management:
+  
+   add           add the specified files on the next commit
+   addremove     add all new files, delete all missing files
    files         list tracked files
    forget        forget the specified files on the next commit
-   graft         copy changes from other branches onto the current branch
-   grep          search revision history for a pattern in specified files
-   heads         show branch heads
-   help          show help for a given topic or a help overview
-   identify      identify the working directory or specified revision
-   import        import an ordered set of patches
-   incoming      show new changesets found in source
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   manifest      output the current or given revision of the project manifest
-   merge         merge another revision into working directory
-   outgoing      show changesets not found in the destination
-   paths         show aliases for remote repositories
-   phase         set or show the current phase name
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   recover       roll back an interrupted transaction
    remove        remove the specified files on the next commit
    rename        rename files; equivalent of copy + remove
    resolve       redo merges or set/view the merge status of files
    revert        restore files to their checkout state
    root          print the root (top) of the current working directory
-   serve         start stand-alone webserver
    status        show changed files in the working directory
    summary       summarize working directory state
-   tag           add one or more tags for the current or given revision
-   tags          list repository tags
+   update        update working directory (or switch revisions)
+  
+  Change import/export:
+  
+   archive       create an unversioned archive of a repository revision
+   bundle        create a bundle file
+   export        dump the header and diffs for one or more changesets
+   import        import an ordered set of patches
    unbundle      apply one or more bundle files
-   update        update working directory (or switch revisions)
+  
+  Repository maintenance:
+  
+   manifest      output the current or given revision of the project manifest
+   recover       roll back an interrupted transaction
    verify        verify the integrity of the repository
+  
+  Help:
+  
+   config        show combined config settings from all hgrc files
+   help          show help for a given topic or a help overview
    version       output version and copyright information
   
   additional help topics:
   
-   bundlespec    Bundle File Formats
+  Mercurial identifiers:
+  
+   filesets      Specifying File Sets
+   hgignore      Syntax for Mercurial Ignore Files
+   patterns      File Name Patterns
+   revisions     Specifying Revisions
+   urls          URL Paths
+  
+  Mercurial output:
+  
    color         Colorizing Outputs
+   dates         Date Formats
+   diffs         Diff Formats
+   templating    Template Usage
+  
+  Mercurial configuration:
+  
    config        Configuration Files
-   dates         Date Formats
-   deprecated    Deprecated Features
-   diffs         Diff Formats
    environment   Environment Variables
    extensions    Using Additional Features
-   filesets      Specifying File Sets
    flags         Command-line flags
-   glossary      Glossary
-   hgignore      Syntax for Mercurial Ignore Files
    hgweb         Configuring hgweb
-   internals     Technical implementation topics
    merge-tools   Merge Tools
    pager         Pager Support
-   patterns      File Name Patterns
+  
+  Concepts:
+  
+   bundlespec    Bundle File Formats
+   glossary      Glossary
    phases        Working with Phases
-   revisions     Specifying Revisions
+   subrepos      Subrepositories
+  
+  Miscellaneous:
+  
+   deprecated    Deprecated Features
+   internals     Technical implementation topics
    scripting     Using Mercurial from scripts and automation
-   subrepos      Subrepositories
-   templating    Template Usage
-   urls          URL Paths
   
   (use 'hg help -v' to show built-in aliases and global options)
 
--- a/tests/test-glog-beautifygraph.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-glog-beautifygraph.t	Mon Oct 22 14:46:06 2018 -0400
@@ -80,52 +80,8 @@
   >   hg commit -Aqd "$rev 0" -m "($rev) $msg"
   > }
 
-  $ cat > printrevset.py <<EOF
-  > from __future__ import absolute_import
-  > from mercurial import (
-  >   cmdutil,
-  >   commands,
-  >   extensions,
-  >   logcmdutil,
-  >   revsetlang,
-  >   smartset,
-  > )
-  > 
-  > from mercurial.utils import (
-  >   stringutil,
-  > )
-  > 
-  > def logrevset(repo, pats, opts):
-  >     revs = logcmdutil._initialrevs(repo, opts)
-  >     if not revs:
-  >         return None
-  >     match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
-  >     return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
-  > 
-  > def uisetup(ui):
-  >     def printrevset(orig, repo, pats, opts):
-  >         revs, filematcher = orig(repo, pats, opts)
-  >         if opts.get(b'print_revset'):
-  >             expr = logrevset(repo, pats, opts)
-  >             if expr:
-  >                 tree = revsetlang.parse(expr)
-  >                 tree = revsetlang.analyze(tree)
-  >             else:
-  >                 tree = []
-  >             ui = repo.ui
-  >             ui.write(b'%r\n' % (opts.get(b'rev', []),))
-  >             ui.write(revsetlang.prettyformat(tree) + b'\n')
-  >             ui.write(stringutil.prettyrepr(revs) + b'\n')
-  >             revs = smartset.baseset()  # display no revisions
-  >         return revs, filematcher
-  >     extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
-  >     aliases, entry = cmdutil.findcmd(b'log', commands.table)
-  >     entry[1].append((b'', b'print-revset', False,
-  >                      b'print generated revset and exit (DEPRECATED)'))
-  > EOF
-
   $ echo "[extensions]" >> $HGRCPATH
-  $ echo "printrevset=`pwd`/printrevset.py" >> $HGRCPATH
+  $ echo "printrevset=$TESTDIR/printrevset.py" >> $HGRCPATH
   $ echo "beautifygraph=" >> $HGRCPATH
 
 Set a default of narrow-text UTF-8.
@@ -1853,7 +1809,7 @@
 
 Test glob expansion of pats
 
-  $ expandglobs=`$PYTHON -c "import mercurial.util; \
+  $ expandglobs=`"$PYTHON" -c "import mercurial.util; \
   >   print(mercurial.util.expandglobs and 'true' or 'false')"`
   $ if [ $expandglobs = "true" ]; then
   >    testlog 'a*';
@@ -2043,7 +1999,7 @@
     <spanset- 0:7>,
     <matchfiles patterns=[], include=['set:copied()'] exclude=[], default='relpath', rev=2147483647>>
   $ testlog -r "sort(file('set:copied()'), -rev)"
-  ["sort(file('set:copied()'), -rev)"]
+  ['sort(file(\'set:copied()\'), -rev)']
   []
   <filteredset
     <fullreposet- 0:7>,
--- a/tests/test-glog-topological.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-glog-topological.t	Mon Oct 22 14:46:06 2018 -0400
@@ -16,7 +16,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  new changesets bfaf4b5cbf01:916f1afdef90
+  new changesets bfaf4b5cbf01:916f1afdef90 (9 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
   $ hg log -G
--- a/tests/test-glog.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-glog.t	Mon Oct 22 14:46:06 2018 -0400
@@ -81,49 +81,8 @@
   >   hg commit -Aqd "$rev 0" -m "($rev) $msg"
   > }
 
-  $ cat > printrevset.py <<EOF
-  > from __future__ import absolute_import
-  > from mercurial import (
-  >   cmdutil,
-  >   commands,
-  >   extensions,
-  >   logcmdutil,
-  >   revsetlang,
-  >   smartset,
-  > )
-  > from mercurial.utils import stringutil
-  > 
-  > def logrevset(repo, pats, opts):
-  >     revs = logcmdutil._initialrevs(repo, opts)
-  >     if not revs:
-  >         return None
-  >     match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
-  >     return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
-  > 
-  > def uisetup(ui):
-  >     def printrevset(orig, repo, pats, opts):
-  >         revs, filematcher = orig(repo, pats, opts)
-  >         if opts.get(b'print_revset'):
-  >             expr = logrevset(repo, pats, opts)
-  >             if expr:
-  >                 tree = revsetlang.parse(expr)
-  >                 tree = revsetlang.analyze(tree)
-  >             else:
-  >                 tree = []
-  >             ui = repo.ui
-  >             ui.write(b'%r\n' % (opts.get(b'rev', []),))
-  >             ui.write(revsetlang.prettyformat(tree) + b'\n')
-  >             ui.write(stringutil.prettyrepr(revs) + b'\n')
-  >             revs = smartset.baseset()  # display no revisions
-  >         return revs, filematcher
-  >     extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
-  >     aliases, entry = cmdutil.findcmd(b'log', commands.table)
-  >     entry[1].append((b'', b'print-revset', False,
-  >                      b'print generated revset and exit (DEPRECATED)'))
-  > EOF
-
   $ echo "[extensions]" >> $HGRCPATH
-  $ echo "printrevset=`pwd`/printrevset.py" >> $HGRCPATH
+  $ echo "printrevset=$TESTDIR/printrevset.py" >> $HGRCPATH
 
   $ hg init repo
   $ cd repo
@@ -1700,7 +1659,7 @@
 
 Test glob expansion of pats
 
-  $ expandglobs=`$PYTHON -c "import mercurial.util; \
+  $ expandglobs=`"$PYTHON" -c "import mercurial.util; \
   >   print(mercurial.util.expandglobs and 'true' or 'false')"`
   $ if [ $expandglobs = "true" ]; then
   >    testlog 'a*';
@@ -1890,7 +1849,7 @@
     <spanset- 0:7>,
     <matchfiles patterns=[], include=['set:copied()'] exclude=[], default='relpath', rev=2147483647>>
   $ testlog -r "sort(file('set:copied()'), -rev)"
-  ["sort(file('set:copied()'), -rev)"]
+  ['sort(file(\'set:copied()\'), -rev)']
   []
   <filteredset
     <fullreposet- 0:7>,
--- a/tests/test-graft.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-graft.t	Mon Oct 22 14:46:06 2018 -0400
@@ -237,7 +237,7 @@
   # To mark files as resolved:  hg resolve --mark FILE
   
   # To continue:    hg graft --continue
-  # To abort:       hg update --clean . (warning: this will discard uncommitted changes)
+  # To abort:       hg graft --abort
   
 
 Commit while interrupted should fail:
@@ -699,8 +699,24 @@
   summary:     2
   
 ... grafts of grafts unfortunately can't
-  $ hg graft -q 13
+  $ hg graft -q 13 --debug
+  scanning for duplicate grafts
+  grafting 13:7a4785234d87 "2"
+    searching for copies back to rev 12
+    unmatched files in other (from topological common ancestor):
+     g
+    unmatched files new in both:
+     b
+  resolving manifests
+   branchmerge: True, force: True, partial: False
+   ancestor: b592ea63bb0c, local: 7e61b508e709+, remote: 7a4785234d87
+  starting 4 threads for background file closing (?)
+  committing files:
+  b
   warning: can't find ancestor for 'b' copied from 'a'!
+  reusing manifest form p1 (listed files actually unchanged)
+  committing changelog
+  updating the branch cache
   $ hg log -r 'destination(13)'
 All copies of a cset
   $ hg log -r 'origin(13) or destination(origin(13))'
@@ -731,7 +747,7 @@
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     2
   
-  changeset:   22:d1cb6591fa4b
+  changeset:   22:3a4e92d81b97
   branch:      dev
   tag:         tip
   user:        foo
@@ -743,8 +759,8 @@
 
   $ hg graft 'origin(13) or destination(origin(13))'
   skipping ancestor revision 21:7e61b508e709
-  skipping ancestor revision 22:d1cb6591fa4b
-  skipping revision 2:5c095ad7e90f (already grafted to 22:d1cb6591fa4b)
+  skipping ancestor revision 22:3a4e92d81b97
+  skipping revision 2:5c095ad7e90f (already grafted to 22:3a4e92d81b97)
   grafting 7:ef0ef43d49e7 "2"
   warning: can't find ancestor for 'b' copied from 'a'!
   grafting 13:7a4785234d87 "2"
@@ -758,7 +774,7 @@
   $ hg graft 19 0 6
   skipping ungraftable merge revision 6
   skipping ancestor revision 0:68795b066622
-  skipping already grafted revision 19:9627f653b421 (22:d1cb6591fa4b also has origin 2:5c095ad7e90f)
+  skipping already grafted revision 19:9627f653b421 (22:3a4e92d81b97 also has origin 2:5c095ad7e90f)
   [255]
   $ hg graft 19 0 6 --force
   skipping ungraftable merge revision 6
@@ -773,12 +789,12 @@
   $ hg ci -m 28
   $ hg backout 28
   reverting a
-  changeset 29:53177ba928f6 backs out changeset 28:50a516bb8b57
+  changeset 29:9d95e865b00c backs out changeset 28:cc20d29aec8d
   $ hg graft 28
-  skipping ancestor revision 28:50a516bb8b57
+  skipping ancestor revision 28:cc20d29aec8d
   [255]
   $ hg graft 28 --force
-  grafting 28:50a516bb8b57 "28"
+  grafting 28:cc20d29aec8d "28"
   merging a
   $ cat a
   abc
@@ -788,7 +804,7 @@
   $ echo def > a
   $ hg ci -m 31
   $ hg graft 28 --force --tool internal:fail
-  grafting 28:50a516bb8b57 "28"
+  grafting 28:cc20d29aec8d "28"
   abort: unresolved conflicts, can't continue
   (use 'hg resolve' and 'hg graft --continue')
   [255]
@@ -801,7 +817,7 @@
   (no more unresolved files)
   continue: hg graft --continue
   $ hg graft -c
-  grafting 28:50a516bb8b57 "28"
+  grafting 28:cc20d29aec8d "28"
   $ cat a
   abc
 
@@ -822,8 +838,8 @@
   $ hg tag -f something
   $ hg graft -qr 27
   $ hg graft -f 27
-  grafting 27:ed6c7e54e319 "28"
-  note: graft of 27:ed6c7e54e319 created no changes to commit
+  grafting 27:17d42b8f5d50 "28"
+  note: graft of 27:17d42b8f5d50 created no changes to commit
 
   $ cd ..
 
@@ -1863,7 +1879,7 @@
   adding manifests
   adding file changes
   added 11 changesets with 9 changes to 8 files (+4 heads)
-  new changesets 9092f1db7931:6b98ff0062dd
+  new changesets 9092f1db7931:6b98ff0062dd (6 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up 9
   5 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -1878,7 +1894,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets 311dfc6cf3bf
+  new changesets 311dfc6cf3bf (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
 
   $ hg graft --abort
--- a/tests/test-grep.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-grep.t	Mon Oct 22 14:46:06 2018 -0400
@@ -18,7 +18,7 @@
 pattern error
 
   $ hg grep '**test**'
-  grep: invalid match pattern: nothing to repeat
+  grep: invalid match pattern: nothing to repeat* (glob)
   [1]
 
 simple
@@ -43,17 +43,17 @@
 simple templated
 
   $ hg grep port -r tip:0 \
-  > -T '{file}:{rev}:{node|short}:{texts % "{if(matched, text|upper, text)}"}\n'
+  > -T '{path}:{rev}:{node|short}:{texts % "{if(matched, text|upper, text)}"}\n'
   port:4:914fa752cdea:exPORT
   port:4:914fa752cdea:vaPORTight
   port:4:914fa752cdea:imPORT/exPORT
 
-  $ hg grep port -r tip:0 -T '{file}:{rev}:{texts}\n'
+  $ hg grep port -r tip:0 -T '{path}:{rev}:{texts}\n'
   port:4:export
   port:4:vaportight
   port:4:import/export
 
-  $ hg grep port -r tip:0 -T '{file}:{tags}:{texts}\n'
+  $ hg grep port -r tip:0 -T '{path}:{tags}:{texts}\n'
   port:tip:export
   port:tip:vaportight
   port:tip:import/export
@@ -64,27 +64,27 @@
   [
    {
     "date": [4, 0],
-    "file": "port",
-    "line_number": 1,
+    "lineno": 1,
     "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
+    "path": "port",
     "rev": 4,
     "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
    },
    {
     "date": [4, 0],
-    "file": "port",
-    "line_number": 2,
+    "lineno": 2,
     "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
+    "path": "port",
     "rev": 4,
     "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
     "user": "spam"
    },
    {
     "date": [4, 0],
-    "file": "port",
-    "line_number": 3,
+    "lineno": 3,
     "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
+    "path": "port",
     "rev": 4,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -97,9 +97,9 @@
   [
    {
     "date": [4, 0],
-    "file": "port",
-    "line_number": 1,
+    "lineno": 1,
     "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
+    "path": "port",
     "rev": 4,
     "user": "spam"
    }
@@ -125,9 +125,9 @@
    {
     "change": "-",
     "date": [4, 0],
-    "file": "port",
-    "line_number": 4,
+    "lineno": 4,
     "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
+    "path": "port",
     "rev": 4,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -135,9 +135,9 @@
    {
     "change": "+",
     "date": [3, 0],
-    "file": "port",
-    "line_number": 4,
+    "lineno": 4,
     "node": "95040cfd017d658c536071c6290230a613c4c2a6",
+    "path": "port",
     "rev": 3,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
     "user": "eggs"
@@ -145,9 +145,9 @@
    {
     "change": "-",
     "date": [2, 0],
-    "file": "port",
-    "line_number": 1,
+    "lineno": 1,
     "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
     "rev": 2,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -155,9 +155,9 @@
    {
     "change": "-",
     "date": [2, 0],
-    "file": "port",
-    "line_number": 2,
+    "lineno": 2,
     "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
     "rev": 2,
     "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -165,9 +165,9 @@
    {
     "change": "+",
     "date": [2, 0],
-    "file": "port",
-    "line_number": 1,
+    "lineno": 1,
     "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
     "rev": 2,
     "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -175,9 +175,9 @@
    {
     "change": "+",
     "date": [2, 0],
-    "file": "port",
-    "line_number": 2,
+    "lineno": 2,
     "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
     "rev": 2,
     "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
     "user": "spam"
@@ -185,9 +185,9 @@
    {
     "change": "+",
     "date": [2, 0],
-    "file": "port",
-    "line_number": 3,
+    "lineno": 3,
     "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
+    "path": "port",
     "rev": 2,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -195,9 +195,9 @@
    {
     "change": "+",
     "date": [1, 0],
-    "file": "port",
-    "line_number": 2,
+    "lineno": 2,
     "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
+    "path": "port",
     "rev": 1,
     "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
     "user": "eggs"
@@ -205,9 +205,9 @@
    {
     "change": "+",
     "date": [0, 0],
-    "file": "port",
-    "line_number": 1,
+    "lineno": 1,
     "node": "f31323c9217050ba245ee8b537c713ec2e8ab226",
+    "path": "port",
     "rev": 0,
     "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
     "user": "spam"
@@ -300,7 +300,7 @@
 
 match in last "line" without newline
 
-  $ $PYTHON -c 'fp = open("noeol", "wb"); fp.write(b"no infinite loop"); fp.close();'
+  $ "$PYTHON" -c 'fp = open("noeol", "wb"); fp.write(b"no infinite loop"); fp.close();'
   $ hg ci -Amnoeol
   adding noeol
   $ hg grep -r tip:0 loop
@@ -481,9 +481,9 @@
   [
    {
     "date": [0, 0],
-    "file": "file2",
-    "line_number": 1,
+    "lineno": 1,
     "node": "ffffffffffffffffffffffffffffffffffffffff",
+    "path": "file2",
     "rev": 2147483647,
     "texts": [{"matched": true, "text": "some"}, {"matched": false, "text": " text"}],
     "user": "test"
@@ -491,3 +491,17 @@
   ]
 
   $ cd ..
+
+test -rMULTIREV with --all-files
+
+  $ cd sng
+  $ hg rm um
+  $ hg commit -m "deletes um"
+  $ hg grep -r "0:2" "unmod" --all-files
+  um:0:unmod
+  um:1:unmod
+  $ hg grep -r "0:2" "unmod" --all-files um
+  um:0:unmod
+  um:1:unmod
+  $ cd ..
+
--- a/tests/test-hardlinks.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hardlinks.t	Mon Oct 22 14:46:06 2018 -0400
@@ -11,7 +11,7 @@
 
   $ nlinksdir()
   > {
-  >     find "$@" -type f | $PYTHON $TESTTMP/nlinks.py
+  >     find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
   > }
 
 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
@@ -19,13 +19,14 @@
   $ cat > linkcp.py <<EOF
   > from __future__ import absolute_import
   > import sys
-  > from mercurial import util
-  > util.copyfiles(sys.argv[1], sys.argv[2], hardlink=True)
+  > from mercurial import pycompat, util
+  > util.copyfiles(pycompat.fsencode(sys.argv[1]),
+  >                pycompat.fsencode(sys.argv[2]), hardlink=True)
   > EOF
 
   $ linkcp()
   > {
-  >     $PYTHON $TESTTMP/linkcp.py $1 $2
+  >     "$PYTHON" $TESTTMP/linkcp.py $1 $2
   > }
 
 Prepare repo r1:
@@ -151,7 +152,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 2 files
 
   $ cd r3
   $ hg push
@@ -181,7 +182,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 2 files
 
 
   $ cd r1
@@ -241,6 +242,7 @@
   2 r4/.hg/cache/checkisexec (execbit !)
   ? r4/.hg/cache/checklink-target (glob) (symlink !)
   2 r4/.hg/cache/checknoexec (execbit !)
+  2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   2 r4/.hg/dirstate
@@ -291,6 +293,7 @@
   2 r4/.hg/cache/checkisexec (execbit !)
   2 r4/.hg/cache/checklink-target (symlink !)
   2 r4/.hg/cache/checknoexec (execbit !)
+  2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   1 r4/.hg/dirstate
--- a/tests/test-help.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-help.t	Mon Oct 22 14:46:06 2018 -0400
@@ -53,162 +53,254 @@
   
   list of commands:
   
-   add           add the specified files on the next commit
-   addremove     add all new files, delete all missing files
-   annotate      show changeset information by line for each file
-   archive       create an unversioned archive of a repository revision
+  Repository creation:
+  
+   clone         make a copy of an existing repository
+   init          create a new repository in the given directory
+  
+  Remote repository management:
+  
+   incoming      show new changesets found in source
+   outgoing      show changesets not found in the destination
+   paths         show aliases for remote repositories
+   pull          pull changes from the specified source
+   push          push changes to the specified destination
+   serve         start stand-alone webserver
+  
+  Change creation:
+  
+   commit        commit the specified files or all outstanding changes
+  
+  Change manipulation:
+  
    backout       reverse effect of earlier changeset
-   bisect        subdivision search of changesets
+   graft         copy changes from other branches onto the current branch
+   merge         merge another revision into working directory
+  
+  Change organization:
+  
    bookmarks     create a new bookmark or list existing bookmarks
    branch        set or show the current branch name
    branches      list repository named branches
-   bundle        create a bundle file
+   phase         set or show the current phase name
+   tag           add one or more tags for the current or given revision
+   tags          list repository tags
+  
+  File content management:
+  
+   annotate      show changeset information by line for each file
    cat           output the current or given revision of files
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   config        show combined config settings from all hgrc files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
+   grep          search revision history for a pattern in specified files
+  
+  Change navigation:
+  
+   bisect        subdivision search of changesets
+   heads         show branch heads
+   identify      identify the working directory or specified revision
+   log           show revision history of entire repository or files
+  
+  Working directory management:
+  
+   add           add the specified files on the next commit
+   addremove     add all new files, delete all missing files
    files         list tracked files
    forget        forget the specified files on the next commit
-   graft         copy changes from other branches onto the current branch
-   grep          search revision history for a pattern in specified files
-   heads         show branch heads
-   help          show help for a given topic or a help overview
-   identify      identify the working directory or specified revision
-   import        import an ordered set of patches
-   incoming      show new changesets found in source
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   manifest      output the current or given revision of the project manifest
-   merge         merge another revision into working directory
-   outgoing      show changesets not found in the destination
-   paths         show aliases for remote repositories
-   phase         set or show the current phase name
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   recover       roll back an interrupted transaction
    remove        remove the specified files on the next commit
    rename        rename files; equivalent of copy + remove
    resolve       redo merges or set/view the merge status of files
    revert        restore files to their checkout state
    root          print the root (top) of the current working directory
-   serve         start stand-alone webserver
    status        show changed files in the working directory
    summary       summarize working directory state
-   tag           add one or more tags for the current or given revision
-   tags          list repository tags
+   update        update working directory (or switch revisions)
+  
+  Change import/export:
+  
+   archive       create an unversioned archive of a repository revision
+   bundle        create a bundle file
+   export        dump the header and diffs for one or more changesets
+   import        import an ordered set of patches
    unbundle      apply one or more bundle files
-   update        update working directory (or switch revisions)
+  
+  Repository maintenance:
+  
+   manifest      output the current or given revision of the project manifest
+   recover       roll back an interrupted transaction
    verify        verify the integrity of the repository
+  
+  Help:
+  
+   config        show combined config settings from all hgrc files
+   help          show help for a given topic or a help overview
    version       output version and copyright information
   
   additional help topics:
   
-   bundlespec    Bundle File Formats
+  Mercurial identifiers:
+  
+   filesets      Specifying File Sets
+   hgignore      Syntax for Mercurial Ignore Files
+   patterns      File Name Patterns
+   revisions     Specifying Revisions
+   urls          URL Paths
+  
+  Mercurial output:
+  
    color         Colorizing Outputs
+   dates         Date Formats
+   diffs         Diff Formats
+   templating    Template Usage
+  
+  Mercurial configuration:
+  
    config        Configuration Files
-   dates         Date Formats
-   deprecated    Deprecated Features
-   diffs         Diff Formats
    environment   Environment Variables
    extensions    Using Additional Features
-   filesets      Specifying File Sets
    flags         Command-line flags
-   glossary      Glossary
-   hgignore      Syntax for Mercurial Ignore Files
    hgweb         Configuring hgweb
-   internals     Technical implementation topics
    merge-tools   Merge Tools
    pager         Pager Support
-   patterns      File Name Patterns
+  
+  Concepts:
+  
+   bundlespec    Bundle File Formats
+   glossary      Glossary
    phases        Working with Phases
-   revisions     Specifying Revisions
+   subrepos      Subrepositories
+  
+  Miscellaneous:
+  
+   deprecated    Deprecated Features
+   internals     Technical implementation topics
    scripting     Using Mercurial from scripts and automation
-   subrepos      Subrepositories
-   templating    Template Usage
-   urls          URL Paths
   
   (use 'hg help -v' to show built-in aliases and global options)
 
   $ hg -q help
-   add           add the specified files on the next commit
-   addremove     add all new files, delete all missing files
-   annotate      show changeset information by line for each file
-   archive       create an unversioned archive of a repository revision
+  Repository creation:
+  
+   clone         make a copy of an existing repository
+   init          create a new repository in the given directory
+  
+  Remote repository management:
+  
+   incoming      show new changesets found in source
+   outgoing      show changesets not found in the destination
+   paths         show aliases for remote repositories
+   pull          pull changes from the specified source
+   push          push changes to the specified destination
+   serve         start stand-alone webserver
+  
+  Change creation:
+  
+   commit        commit the specified files or all outstanding changes
+  
+  Change manipulation:
+  
    backout       reverse effect of earlier changeset
-   bisect        subdivision search of changesets
+   graft         copy changes from other branches onto the current branch
+   merge         merge another revision into working directory
+  
+  Change organization:
+  
    bookmarks     create a new bookmark or list existing bookmarks
    branch        set or show the current branch name
    branches      list repository named branches
-   bundle        create a bundle file
+   phase         set or show the current phase name
+   tag           add one or more tags for the current or given revision
+   tags          list repository tags
+  
+  File content management:
+  
+   annotate      show changeset information by line for each file
    cat           output the current or given revision of files
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   config        show combined config settings from all hgrc files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
+   grep          search revision history for a pattern in specified files
+  
+  Change navigation:
+  
+   bisect        subdivision search of changesets
+   heads         show branch heads
+   identify      identify the working directory or specified revision
+   log           show revision history of entire repository or files
+  
+  Working directory management:
+  
+   add           add the specified files on the next commit
+   addremove     add all new files, delete all missing files
    files         list tracked files
    forget        forget the specified files on the next commit
-   graft         copy changes from other branches onto the current branch
-   grep          search revision history for a pattern in specified files
-   heads         show branch heads
-   help          show help for a given topic or a help overview
-   identify      identify the working directory or specified revision
-   import        import an ordered set of patches
-   incoming      show new changesets found in source
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   manifest      output the current or given revision of the project manifest
-   merge         merge another revision into working directory
-   outgoing      show changesets not found in the destination
-   paths         show aliases for remote repositories
-   phase         set or show the current phase name
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   recover       roll back an interrupted transaction
    remove        remove the specified files on the next commit
    rename        rename files; equivalent of copy + remove
    resolve       redo merges or set/view the merge status of files
    revert        restore files to their checkout state
    root          print the root (top) of the current working directory
-   serve         start stand-alone webserver
    status        show changed files in the working directory
    summary       summarize working directory state
-   tag           add one or more tags for the current or given revision
-   tags          list repository tags
+   update        update working directory (or switch revisions)
+  
+  Change import/export:
+  
+   archive       create an unversioned archive of a repository revision
+   bundle        create a bundle file
+   export        dump the header and diffs for one or more changesets
+   import        import an ordered set of patches
    unbundle      apply one or more bundle files
-   update        update working directory (or switch revisions)
+  
+  Repository maintenance:
+  
+   manifest      output the current or given revision of the project manifest
+   recover       roll back an interrupted transaction
    verify        verify the integrity of the repository
+  
+  Help:
+  
+   config        show combined config settings from all hgrc files
+   help          show help for a given topic or a help overview
    version       output version and copyright information
   
   additional help topics:
   
-   bundlespec    Bundle File Formats
+  Mercurial identifiers:
+  
+   filesets      Specifying File Sets
+   hgignore      Syntax for Mercurial Ignore Files
+   patterns      File Name Patterns
+   revisions     Specifying Revisions
+   urls          URL Paths
+  
+  Mercurial output:
+  
    color         Colorizing Outputs
+   dates         Date Formats
+   diffs         Diff Formats
+   templating    Template Usage
+  
+  Mercurial configuration:
+  
    config        Configuration Files
-   dates         Date Formats
-   deprecated    Deprecated Features
-   diffs         Diff Formats
    environment   Environment Variables
    extensions    Using Additional Features
-   filesets      Specifying File Sets
    flags         Command-line flags
-   glossary      Glossary
-   hgignore      Syntax for Mercurial Ignore Files
    hgweb         Configuring hgweb
-   internals     Technical implementation topics
    merge-tools   Merge Tools
    pager         Pager Support
-   patterns      File Name Patterns
+  
+  Concepts:
+  
+   bundlespec    Bundle File Formats
+   glossary      Glossary
    phases        Working with Phases
-   revisions     Specifying Revisions
+   subrepos      Subrepositories
+  
+  Miscellaneous:
+  
+   deprecated    Deprecated Features
+   internals     Technical implementation topics
    scripting     Using Mercurial from scripts and automation
-   subrepos      Subrepositories
-   templating    Template Usage
-   urls          URL Paths
 
 Test extension help:
   $ hg help extensions --config extensions.rebase= --config extensions.children=
@@ -262,6 +354,7 @@
        censor        erase file content at a given revision
        churn         command to display statistics about repository history
        clonebundles  advertise pre-generated bundles to seed clones
+       closehead     close arbitrary heads without checking them out first
        convert       import revisions from foreign VCS repositories into
                      Mercurial
        eol           automatically manage newlines in repository files
@@ -637,6 +730,7 @@
    -I --include PATTERN [+] include names matching the given patterns
    -X --exclude PATTERN [+] exclude names matching the given patterns
    -S --subrepos            recurse into subrepositories
+   -T --template TEMPLATE   display with template
   
   (some details hidden, use --verbose to show complete help)
 
@@ -652,29 +746,7 @@
 
   $ hg skjdfks
   hg: unknown command 'skjdfks'
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help' for a list of commands)
   [255]
 
 Typoed command gives suggestion
@@ -830,55 +902,87 @@
   
   list of commands:
   
-   add           add the specified files on the next commit
-   addremove     add all new files, delete all missing files
-   annotate      show changeset information by line for each file
-   archive       create an unversioned archive of a repository revision
+  Repository creation:
+  
+   clone         make a copy of an existing repository
+   init          create a new repository in the given directory
+  
+  Remote repository management:
+  
+   incoming      show new changesets found in source
+   outgoing      show changesets not found in the destination
+   paths         show aliases for remote repositories
+   pull          pull changes from the specified source
+   push          push changes to the specified destination
+   serve         start stand-alone webserver
+  
+  Change creation:
+  
+   commit        commit the specified files or all outstanding changes
+  
+  Change manipulation:
+  
    backout       reverse effect of earlier changeset
-   bisect        subdivision search of changesets
+   graft         copy changes from other branches onto the current branch
+   merge         merge another revision into working directory
+  
+  Change organization:
+  
    bookmarks     create a new bookmark or list existing bookmarks
    branch        set or show the current branch name
    branches      list repository named branches
-   bundle        create a bundle file
+   phase         set or show the current phase name
+   tag           add one or more tags for the current or given revision
+   tags          list repository tags
+  
+  File content management:
+  
+   annotate      show changeset information by line for each file
    cat           output the current or given revision of files
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   config        show combined config settings from all hgrc files
    copy          mark files as copied for the next commit
    diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
+   grep          search revision history for a pattern in specified files
+  
+  Change navigation:
+  
+   bisect        subdivision search of changesets
+   heads         show branch heads
+   identify      identify the working directory or specified revision
+   log           show revision history of entire repository or files
+  
+  Working directory management:
+  
+   add           add the specified files on the next commit
+   addremove     add all new files, delete all missing files
    files         list tracked files
    forget        forget the specified files on the next commit
-   graft         copy changes from other branches onto the current branch
-   grep          search revision history for a pattern in specified files
-   heads         show branch heads
-   help          show help for a given topic or a help overview
-   identify      identify the working directory or specified revision
-   import        import an ordered set of patches
-   incoming      show new changesets found in source
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   manifest      output the current or given revision of the project manifest
-   merge         merge another revision into working directory
-   outgoing      show changesets not found in the destination
-   paths         show aliases for remote repositories
-   phase         set or show the current phase name
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   recover       roll back an interrupted transaction
    remove        remove the specified files on the next commit
    rename        rename files; equivalent of copy + remove
    resolve       redo merges or set/view the merge status of files
    revert        restore files to their checkout state
    root          print the root (top) of the current working directory
-   serve         start stand-alone webserver
    status        show changed files in the working directory
    summary       summarize working directory state
-   tag           add one or more tags for the current or given revision
-   tags          list repository tags
+   update        update working directory (or switch revisions)
+  
+  Change import/export:
+  
+   archive       create an unversioned archive of a repository revision
+   bundle        create a bundle file
+   export        dump the header and diffs for one or more changesets
+   import        import an ordered set of patches
    unbundle      apply one or more bundle files
-   update        update working directory (or switch revisions)
+  
+  Repository maintenance:
+  
+   manifest      output the current or given revision of the project manifest
+   recover       roll back an interrupted transaction
    verify        verify the integrity of the repository
+  
+  Help:
+  
+   config        show combined config settings from all hgrc files
+   help          show help for a given topic or a help overview
    version       output version and copyright information
   
   enabled extensions:
@@ -887,29 +991,43 @@
   
   additional help topics:
   
-   bundlespec    Bundle File Formats
+  Mercurial identifiers:
+  
+   filesets      Specifying File Sets
+   hgignore      Syntax for Mercurial Ignore Files
+   patterns      File Name Patterns
+   revisions     Specifying Revisions
+   urls          URL Paths
+  
+  Mercurial output:
+  
    color         Colorizing Outputs
+   dates         Date Formats
+   diffs         Diff Formats
+   templating    Template Usage
+  
+  Mercurial configuration:
+  
    config        Configuration Files
-   dates         Date Formats
-   deprecated    Deprecated Features
-   diffs         Diff Formats
    environment   Environment Variables
    extensions    Using Additional Features
-   filesets      Specifying File Sets
    flags         Command-line flags
-   glossary      Glossary
-   hgignore      Syntax for Mercurial Ignore Files
    hgweb         Configuring hgweb
-   internals     Technical implementation topics
    merge-tools   Merge Tools
    pager         Pager Support
-   patterns      File Name Patterns
+  
+  Concepts:
+  
+   bundlespec    Bundle File Formats
+   glossary      Glossary
    phases        Working with Phases
-   revisions     Specifying Revisions
+   subrepos      Subrepositories
+  
+  Miscellaneous:
+  
+   deprecated    Deprecated Features
+   internals     Technical implementation topics
    scripting     Using Mercurial from scripts and automation
-   subrepos      Subrepositories
-   templating    Template Usage
-   urls          URL Paths
   
   (use 'hg help -v' to show built-in aliases and global options)
 
@@ -960,12 +1078,17 @@
                  retrieves a bundle from a repo
    debugignore   display the combined ignore pattern and information about
                  ignored files
-   debugindex    dump the contents of an index file
+   debugindex    dump index data for a storage primitive
    debugindexdot
                  dump an index DAG as a graphviz dot file
+   debugindexstats
+                 show stats related to the changelog index
    debuginstall  test Mercurial installation
    debugknown    test whether node ids are known to a repo
    debuglocks    show or modify state of locks
+   debugmanifestfulltextcache
+                 show, clear or amend the contents of the manifest fulltext
+                 cache
    debugmergestate
                  print merge state
    debugnamecomplete
@@ -989,6 +1112,8 @@
                  rebuild the fncache file
    debugrename   dump rename information
    debugrevlog   show data and statistics about a revlog
+   debugrevlogindex
+                 dump the contents of a revlog index
    debugrevspec  parse and apply a revision specification
    debugserve    run a server with advanced settings
    debugsetparents
@@ -1027,12 +1152,17 @@
   
        bundle2       Bundle2
        bundles       Bundles
+       cbor          CBOR
        censor        Censor
        changegroups  Changegroups
        config        Config Registrar
        requirements  Repository Requirements
        revlogs       Revision Logs
        wireprotocol  Wire Protocol
+       wireprotocolrpc
+                     Wire Protocol RPC
+       wireprotocolv2
+                     Wire Protocol Version 2
 
 sub-topics can be accessed
 
@@ -1046,8 +1176,8 @@
   
       There are 3 versions of changegroups: "1", "2", and "3". From a high-
       level, versions "1" and "2" are almost exactly the same, with the only
-      difference being an additional item in the *delta header*.  Version "3"
-      adds support for revlog flags in the *delta header* and optionally
+      difference being an additional item in the *delta header*. Version "3"
+      adds support for storage flags in the *delta header* and optionally
       exchanging treemanifests (enabled by setting an option on the
       "changegroup" part in the bundle2).
   
@@ -1170,6 +1300,27 @@
       changegroup. This allows the delta to be expressed against any parent,
       which can result in smaller deltas and more efficient encoding of data.
   
+      The *flags* field holds bitwise flags affecting the processing of revision
+      data. The following flags are defined:
+  
+      32768
+         Censored revision. The revision's fulltext has been replaced by censor
+         metadata. May only occur on file revisions.
+  
+      16384
+         Ellipsis revision. Revision hash does not match data (likely due to
+         rewritten parents).
+  
+      8192
+         Externally stored. The revision fulltext contains "key:value" "\n"
+         delimited metadata defining an object stored elsewhere. Used by the LFS
+         extension.
+  
+      For historical reasons, the integer values are identical to revlog version
+      1 per-revision storage flags and correspond to bits being set in this
+      2-byte field. Bits were allocated starting from the most-significant bit,
+      hence the reverse ordering and allocation of these flags.
+  
       Changeset Segment
       =================
   
@@ -1357,6 +1508,55 @@
       "smtp.host"
           Host name of mail server, e.g. "mail.example.com".
   
+
+Test section name with dot
+
+  $ hg help config.ui.username
+      "ui.username"
+          The committer of a changeset created when running "commit". Typically
+          a person's name and email address, e.g. "Fred Widget
+          <fred@example.com>". Environment variables in the username are
+          expanded.
+  
+          (default: "$EMAIL" or "username@hostname". If the username in hgrc is
+          empty, e.g. if the system admin set "username =" in the system hgrc,
+          it has to be specified manually or in a different hgrc file)
+  
+
+  $ hg help config.annotate.git
+  abort: help section not found: config.annotate.git
+  [255]
+
+  $ hg help config.update.check
+      "commands.update.check"
+          Determines what level of checking 'hg update' will perform before
+          moving to a destination revision. Valid values are "abort", "none",
+          "linear", and "noconflict". "abort" always fails if the working
+          directory has uncommitted changes. "none" performs no checking, and
+          may result in a merge with uncommitted changes. "linear" allows any
+          update as long as it follows a straight line in the revision history,
+          and may trigger a merge with uncommitted changes. "noconflict" will
+          allow any update which would not trigger a merge with uncommitted
+          changes, if any are present. (default: "linear")
+  
+
+  $ hg help config.commands.update.check
+      "commands.update.check"
+          Determines what level of checking 'hg update' will perform before
+          moving to a destination revision. Valid values are "abort", "none",
+          "linear", and "noconflict". "abort" always fails if the working
+          directory has uncommitted changes. "none" performs no checking, and
+          may result in a merge with uncommitted changes. "linear" allows any
+          update as long as it follows a straight line in the revision history,
+          and may trigger a merge with uncommitted changes. "noconflict" will
+          allow any update which would not trigger a merge with uncommitted
+          changes, if any are present. (default: "linear")
+  
+
+  $ hg help config.ommands.update.check
+  abort: help section not found: config.ommands.update.check
+  [255]
+
 Unrelated trailing paragraphs shouldn't be included
 
   $ hg help config.extramsg | grep '^$'
@@ -1377,6 +1577,14 @@
   $ hg help config.type | egrep '^$'|wc -l
   \s*3 (re)
 
+  $ hg help config.profiling.type.ls
+          "profiling.type.ls"
+            Use Python's built-in instrumenting profiler. This profiler works on
+            all platforms, but each line number it reports is the first line of
+            a function. This restriction makes it difficult to identify the
+            expensive parts of a non-trivial function.
+  
+
 Separate sections from subsections
 
   $ hg help config.format | egrep '^    ("|-)|^\s*$' | uniq
@@ -1433,19 +1641,19 @@
   > from mercurial import help
   > 
   > def rewrite(ui, topic, doc):
-  >     return doc + '\nhelphook1\n'
+  >     return doc + b'\nhelphook1\n'
   > 
   > def extsetup(ui):
-  >     help.addtopichook('revisions', rewrite)
+  >     help.addtopichook(b'revisions', rewrite)
   > EOF
   $ cat > helphook2.py <<EOF
   > from mercurial import help
   > 
   > def rewrite(ui, topic, doc):
-  >     return doc + '\nhelphook2\n'
+  >     return doc + b'\nhelphook2\n'
   > 
   > def extsetup(ui):
-  >     help.addtopichook('revisions', rewrite)
+  >     help.addtopichook(b'revisions', rewrite)
   > EOF
   $ echo '[extensions]' >> $HGRCPATH
   $ echo "helphook1 = `pwd`/helphook1.py" >> $HGRCPATH
@@ -1499,7 +1707,7 @@
   Commands:
   $ hg help -c commit > /dev/null
   $ hg help -e -c commit > /dev/null
-  $ hg help -e commit > /dev/null
+  $ hg help -e commit
   abort: no such help topic: commit
   (try 'hg help --keyword commit')
   [255]
@@ -1572,7 +1780,7 @@
   > '''
   > from __future__ import absolute_import
   > from mercurial import commands, help
-  > testtopic = """This paragraph is never omitted (for topic).
+  > testtopic = b"""This paragraph is never omitted (for topic).
   > 
   > .. container:: verbose
   > 
@@ -1582,8 +1790,8 @@
   > This paragraph is never omitted, too (for topic)
   > """
   > def extsetup(ui):
-  >     help.helptable.append((["topic-containing-verbose"],
-  >                            "This is the topic to test omit indicating.",
+  >     help.helptable.append(([b"topic-containing-verbose"],
+  >                            b"This is the topic to test omit indicating.",
   >                            lambda ui: testtopic))
   > EOF
   $ echo '[extensions]' >> $HGRCPATH
@@ -1727,15 +1935,15 @@
 This tests that section lookup by translated string isn't broken by
 such str.lower().
 
-  $ $PYTHON <<EOF
+  $ "$PYTHON" <<EOF
   > def escape(s):
-  >     return ''.join('\u%x' % ord(uc) for uc in s.decode('cp932'))
+  >     return b''.join(b'\\u%x' % ord(uc) for uc in s.decode('cp932'))
   > # translation of "record" in ja_JP.cp932
-  > upper = "\x8bL\x98^"
+  > upper = b"\x8bL\x98^"
   > # str.lower()-ed section name should be treated as different one
-  > lower = "\x8bl\x98^"
-  > with open('ambiguous.py', 'w') as fp:
-  >     fp.write("""# ambiguous section names in ja_JP.cp932
+  > lower = b"\x8bl\x98^"
+  > with open('ambiguous.py', 'wb') as fp:
+  >     fp.write(b"""# ambiguous section names in ja_JP.cp932
   > u'''summary of extension
   > 
   > %s
@@ -1761,9 +1969,10 @@
   > ambiguous = ./ambiguous.py
   > EOF
 
-  $ $PYTHON <<EOF | sh
-  > upper = "\x8bL\x98^"
-  > print("hg --encoding cp932 help -e ambiguous.%s" % upper)
+  $ "$PYTHON" <<EOF | sh
+  > from mercurial import pycompat
+  > upper = b"\x8bL\x98^"
+  > pycompat.stdout.write(b"hg --encoding cp932 help -e ambiguous.%s\n" % upper)
   > EOF
   \x8bL\x98^ (esc)
   ----
@@ -1771,9 +1980,10 @@
   Upper name should show only this message
   
 
-  $ $PYTHON <<EOF | sh
-  > lower = "\x8bl\x98^"
-  > print("hg --encoding cp932 help -e ambiguous.%s" % lower)
+  $ "$PYTHON" <<EOF | sh
+  > from mercurial import pycompat
+  > lower = b"\x8bl\x98^"
+  > pycompat.stdout.write(b"hg --encoding cp932 help -e ambiguous.%s\n" % lower)
   > EOF
   \x8bl\x98^ (esc)
   ----
@@ -1848,18 +2058,26 @@
         This implies premerge. Therefore, files aren't dumped, if premerge runs
         successfully. Use :forcedump to forcibly write files out.
   
+        (actual capabilities: binary, symlink)
+  
       ":fail"
         Rather than attempting to merge files that were modified on both
         branches, it marks them as unresolved. The resolve command must be used
         to resolve these conflicts.
   
+        (actual capabilities: binary, symlink)
+  
       ":forcedump"
         Creates three versions of the files as same as :dump, but omits
         premerge.
   
+        (actual capabilities: binary, symlink)
+  
       ":local"
         Uses the local 'p1()' version of files as the merged version.
   
+        (actual capabilities: binary, symlink)
+  
       ":merge"
         Uses the internal non-interactive simple merge algorithm for merging
         files. It will fail if there are any conflicts and leave markers in the
@@ -1883,10 +2101,14 @@
       ":other"
         Uses the other 'p2()' version of files as the merged version.
   
+        (actual capabilities: binary, symlink)
+  
       ":prompt"
         Asks the user which of the local 'p1()' or the other 'p2()' version to
         keep as the merged version.
   
+        (actual capabilities: binary, symlink)
+  
       ":tagmerge"
         Uses the internal tag merge algorithm (experimental).
   
@@ -1896,7 +2118,8 @@
         markers are inserted.
   
       Internal tools are always available and do not require a GUI but will by
-      default not handle symlinks or binary files.
+      default not handle symlinks or binary files. See next section for detail
+      about "actual capabilities" described above.
   
       Choosing a merge tool
       =====================
@@ -1911,8 +2134,7 @@
          must be executable by the shell.
       3. If the filename of the file to be merged matches any of the patterns in
          the merge-patterns configuration section, the first usable merge tool
-         corresponding to a matching pattern is used. Here, binary capabilities
-         of the merge tool are not considered.
+         corresponding to a matching pattern is used.
       4. If ui.merge is set it will be considered next. If the value is not the
          name of a configured tool, the specified value is used and must be
          executable by the shell. Otherwise the named tool is used if it is
@@ -1925,6 +2147,27 @@
          internal ":merge" is used.
       8. Otherwise, ":prompt" is used.
   
+      For historical reason, Mercurial treats merge tools as below while
+      examining rules above.
+  
+      step specified via  binary symlink
+      ----------------------------------
+      1.   --tool         o/o    o/o
+      2.   HGMERGE        o/o    o/o
+      3.   merge-patterns o/o(*) x/?(*)
+      4.   ui.merge       x/?(*) x/?(*)
+  
+      Each capability column indicates Mercurial behavior for internal/external
+      merge tools at examining each rule.
+  
+      - "o": "assume that a tool has capability"
+      - "x": "assume that a tool does not have capability"
+      - "?": "check actual capability of a tool"
+  
+      If "merge.strict-capability-check" configuration is true, Mercurial checks
+      capabilities of merge tools strictly in (*) cases above (= each capability
+      column becomes "?/?"). It is false by default for backward compatibility.
+  
       Note:
          After selecting a merge program, Mercurial will by default attempt to
          merge the files using a simple merge algorithm first. Only if it
@@ -1948,7 +2191,7 @@
 Test usage of section marks in help documents
 
   $ cd "$TESTDIR"/../doc
-  $ $PYTHON check-seclevel.py
+  $ "$PYTHON" check-seclevel.py
   $ cd $TESTTMP
 
 #if serve
@@ -3221,6 +3464,13 @@
   Bundles
   </td></tr>
   <tr><td>
+  <a href="/help/internals.cbor">
+  cbor
+  </a>
+  </td><td>
+  CBOR
+  </td></tr>
+  <tr><td>
   <a href="/help/internals.censor">
   censor
   </a>
@@ -3262,6 +3512,20 @@
   </td><td>
   Wire Protocol
   </td></tr>
+  <tr><td>
+  <a href="/help/internals.wireprotocolrpc">
+  wireprotocolrpc
+  </a>
+  </td><td>
+  Wire Protocol RPC
+  </td></tr>
+  <tr><td>
+  <a href="/help/internals.wireprotocolv2">
+  wireprotocolv2
+  </a>
+  </td><td>
+  Wire Protocol Version 2
+  </td></tr>
   
   
   
@@ -3332,8 +3596,8 @@
   <p>
   There are 3 versions of changegroups: &quot;1&quot;, &quot;2&quot;, and &quot;3&quot;. From a
   high-level, versions &quot;1&quot; and &quot;2&quot; are almost exactly the same, with the
-  only difference being an additional item in the *delta header*.  Version
-  &quot;3&quot; adds support for revlog flags in the *delta header* and optionally
+  only difference being an additional item in the *delta header*. Version
+  &quot;3&quot; adds support for storage flags in the *delta header* and optionally
   exchanging treemanifests (enabled by setting an option on the
   &quot;changegroup&quot; part in the bundle2).
   </p>
@@ -3479,6 +3743,24 @@
   changegroup. This allows the delta to be expressed against any parent,
   which can result in smaller deltas and more efficient encoding of data.
   </p>
+  <p>
+  The *flags* field holds bitwise flags affecting the processing of revision
+  data. The following flags are defined:
+  </p>
+  <dl>
+   <dt>32768
+   <dd>Censored revision. The revision's fulltext has been replaced by censor metadata. May only occur on file revisions.
+   <dt>16384
+   <dd>Ellipsis revision. Revision hash does not match data (likely due to rewritten parents).
+   <dt>8192
+   <dd>Externally stored. The revision fulltext contains &quot;key:value&quot; &quot;\n&quot; delimited metadata defining an object stored elsewhere. Used by the LFS extension.
+  </dl>
+  <p>
+  For historical reasons, the integer values are identical to revlog version 1
+  per-revision storage flags and correspond to bits being set in this 2-byte
+  field. Bits were allocated starting from the most-significant bit, hence the
+  reverse ordering and allocation of these flags.
+  </p>
   <h2>Changeset Segment</h2>
   <p>
   The *changeset segment* consists of a single *delta group* holding
--- a/tests/test-hghave.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hghave.t	Mon Oct 22 14:46:06 2018 -0400
@@ -22,8 +22,10 @@
   > EOF
   $ ( \
   > testrepohgenv; \
-  > $PYTHON $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE test-hghaveaddon.t \
+  > "$PYTHON" $TESTDIR/run-tests.py -j 1 \
+  >    $HGTEST_RUN_TESTS_PURE test-hghaveaddon.t \
   > )
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
 
@@ -35,9 +37,9 @@
 (terminate with exit code 2 at failure of importing hghaveaddon.py)
 
   $ rm hghaveaddon.*
-  $ cat > hghaveaddon.py <<EOF
+  $ cat > hghaveaddon.py <<NO_CHECK_EOF
   > importing this file should cause syntax error
-  > EOF
+  > NO_CHECK_EOF
 
   $ hghave custom
   failed to import hghaveaddon.py from '.': invalid syntax (hghaveaddon.py, line 1)
--- a/tests/test-hgignore.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hgignore.t	Mon Oct 22 14:46:06 2018 -0400
@@ -19,7 +19,7 @@
   > f.close()
   > EOF
 
-  $ $PYTHON makeignore.py
+  $ "$PYTHON" makeignore.py
 
 Should display baz only:
 
--- a/tests/test-hgrc.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hgrc.t	Mon Oct 22 14:46:06 2018 -0400
@@ -58,7 +58,7 @@
   unexpected leading whitespace
   [255]
 
-  $ $PYTHON -c "from __future__ import print_function; print('[foo]\nbar = a\n b\n c \n  de\n fg \nbaz = bif cb \n')" \
+  $ "$PYTHON" -c "from __future__ import print_function; print('[foo]\nbar = a\n b\n c \n  de\n fg \nbaz = bif cb \n')" \
   > > $HGRC
   $ hg showconfig foo
   foo.bar=a\nb\nc\nde\nfg
--- a/tests/test-hgweb-commands.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hgweb-commands.t	Mon Oct 22 14:46:06 2018 -0400
@@ -26,7 +26,7 @@
   $ hg ci -Ambranch
   $ hg branch unstable
   marked working directory as branch unstable
-  >>> open('msg', 'wb').write('branch commit with null character: \0\n')
+  >>> open('msg', 'wb').write(b'branch commit with null character: \0\n') and None
   $ hg ci -l msg
   $ rm msg
 
@@ -36,7 +36,7 @@
   > stable.width = 3
   > stable.color = FF0000
   > [websub]
-  > append = s|(.*)|\1(websub)|
+  > append = s|(.+)|\1(websub)|
   > EOF
 
   $ hg serve --config server.uncompressed=False -n test -p $HGPORT -d --pid-file=hg.pid -E errors.log
@@ -2183,7 +2183,7 @@
   
   batch
   branchmap
-  $USUAL_BUNDLE2_CAPS_SERVER$
+  $USUAL_BUNDLE2_CAPS$
   changegroupsubset
   compression=$BUNDLE2_COMPRESSIONS$
   getbundle
@@ -2276,13 +2276,13 @@
   > from mercurial import demandimport; demandimport.enable()
   > from mercurial.hgweb import hgweb
   > from mercurial.hgweb import wsgicgi
-  > app = hgweb('.', 'test')
+  > app = hgweb(b'.', b'test')
   > wsgicgi.launch(app)
   > HGWEB
   $ . "$TESTDIR/cgienv"
   $ PATH_INFO=/bookmarks; export PATH_INFO
   $ QUERY_STRING='style=raw'
-  $ $PYTHON hgweb.cgi | grep -v ETag:
+  $ "$PYTHON" hgweb.cgi | grep -v ETag:
   Status: 200 Script output follows\r (esc)
   Content-Type: text/plain; charset=ascii\r (esc)
   \r (esc)
@@ -2291,7 +2291,7 @@
 
   $ PATH_INFO=/; export PATH_INFO
   $ QUERY_STRING='cmd=listkeys&namespace=bookmarks'
-  $ $PYTHON hgweb.cgi
+  $ "$PYTHON" hgweb.cgi
   Status: 200 Script output follows\r (esc)
   Content-Type: application/mercurial-0.1\r (esc)
   Content-Length: 0\r (esc)
@@ -2301,7 +2301,7 @@
 
   $ PATH_INFO=/log; export PATH_INFO
   $ QUERY_STRING='rev=babar'
-  $ $PYTHON hgweb.cgi > search
+  $ "$PYTHON" hgweb.cgi > search
   $ grep Status search
   Status: 200 Script output follows\r (esc)
 
@@ -2309,7 +2309,7 @@
 
   $ PATH_INFO=/summary; export PATH_INFO
   $ QUERY_STRING='style=monoblue'; export QUERY_STRING
-  $ $PYTHON hgweb.cgi > summary.out
+  $ "$PYTHON" hgweb.cgi > summary.out
   $ grep "^Status" summary.out
   Status: 200 Script output follows\r (esc)
 
@@ -2320,7 +2320,7 @@
 
   $ PATH_INFO=/rev/5; export PATH_INFO
   $ QUERY_STRING='style=raw'
-  $ $PYTHON hgweb.cgi #> search
+  $ "$PYTHON" hgweb.cgi #> search
   Status: 404 Not Found\r (esc)
   ETag: W/"*"\r (glob) (esc)
   Content-Type: text/plain; charset=ascii\r (esc)
@@ -2334,7 +2334,7 @@
 
   $ PATH_INFO=/rev/4; export PATH_INFO
   $ QUERY_STRING='style=raw'
-  $ $PYTHON hgweb.cgi #> search
+  $ "$PYTHON" hgweb.cgi #> search
   Status: 404 Not Found\r (esc)
   ETag: W/"*"\r (glob) (esc)
   Content-Type: text/plain; charset=ascii\r (esc)
@@ -2362,11 +2362,11 @@
   $ hg phase --force --secret 0
   $ PATH_INFO=/graph/; export PATH_INFO
   $ QUERY_STRING=''
-  $ $PYTHON hgweb.cgi | grep Status
+  $ "$PYTHON" hgweb.cgi | grep Status
   Status: 200 Script output follows\r (esc)
 (check rendered revision)
   $ QUERY_STRING='style=raw'
-  $ $PYTHON hgweb.cgi | grep -v ETag
+  $ "$PYTHON" hgweb.cgi | grep -v ETag
   Status: 200 Script output follows\r (esc)
   Content-Type: text/plain; charset=ascii\r (esc)
   \r (esc)
--- a/tests/test-hgweb-json.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hgweb-json.t	Mon Oct 22 14:46:06 2018 -0400
@@ -588,6 +588,187 @@
     "node": "cc725e08502a79dd1eda913760fbe06ed7a9abc7"
   }
 
+shortlog is displayed by default (issue5978)
+
+  $ request '?style=json'
+  200 Script output follows
+  
+  {
+    "changeset_count": 10,
+    "changesets": [
+      {
+        "bookmarks": [],
+        "branch": "default",
+        "date": [
+          0.0,
+          0
+        ],
+        "desc": "merge test-branch into default",
+        "node": "cc725e08502a79dd1eda913760fbe06ed7a9abc7",
+        "parents": [
+          "ceed296fe500c3fac9541e31dad860cb49c89e45",
+          "ed66c30e87eb65337c05a4229efaa5f1d5285a90"
+        ],
+        "phase": "draft",
+        "tags": [
+          "tip"
+        ],
+        "user": "test"
+      },
+      {
+        "bookmarks": [],
+        "branch": "test-branch",
+        "date": [
+          0.0,
+          0
+        ],
+        "desc": "another commit in test-branch",
+        "node": "ed66c30e87eb65337c05a4229efaa5f1d5285a90",
+        "parents": [
+          "6ab967a8ab3489227a83f80e920faa039a71819f"
+        ],
+        "phase": "draft",
+        "tags": [],
+        "user": "test"
+      },
+      {
+        "bookmarks": [],
+        "branch": "test-branch",
+        "date": [
+          0.0,
+          0
+        ],
+        "desc": "create test branch",
+        "node": "6ab967a8ab3489227a83f80e920faa039a71819f",
+        "parents": [
+          "06e557f3edf66faa1ccaba5dd8c203c21cc79f1e"
+        ],
+        "phase": "draft",
+        "tags": [],
+        "user": "test"
+      },
+      {
+        "bookmarks": [
+          "bookmark2"
+        ],
+        "branch": "default",
+        "date": [
+          0.0,
+          0
+        ],
+        "desc": "create tag2",
+        "node": "ceed296fe500c3fac9541e31dad860cb49c89e45",
+        "parents": [
+          "f2890a05fea49bfaf9fb27ed5490894eba32da78"
+        ],
+        "phase": "draft",
+        "tags": [],
+        "user": "test"
+      },
+      {
+        "bookmarks": [],
+        "branch": "default",
+        "date": [
+          0.0,
+          0
+        ],
+        "desc": "another commit to da/foo",
+        "node": "f2890a05fea49bfaf9fb27ed5490894eba32da78",
+        "parents": [
+          "93a8ce14f89156426b7fa981af8042da53f03aa0"
+        ],
+        "phase": "draft",
+        "tags": [
+          "tag2"
+        ],
+        "user": "test"
+      },
+      {
+        "bookmarks": [],
+        "branch": "default",
+        "date": [
+          0.0,
+          0
+        ],
+        "desc": "create tag",
+        "node": "93a8ce14f89156426b7fa981af8042da53f03aa0",
+        "parents": [
+          "78896eb0e102174ce9278438a95e12543e4367a7"
+        ],
+        "phase": "public",
+        "tags": [],
+        "user": "test"
+      },
+      {
+        "bookmarks": [],
+        "branch": "default",
+        "date": [
+          0.0,
+          0
+        ],
+        "desc": "move foo",
+        "node": "78896eb0e102174ce9278438a95e12543e4367a7",
+        "parents": [
+          "8d7c456572acf3557e8ed8a07286b10c408bcec5"
+        ],
+        "phase": "public",
+        "tags": [
+          "tag1"
+        ],
+        "user": "test"
+      },
+      {
+        "bookmarks": [
+          "bookmark1"
+        ],
+        "branch": "default",
+        "date": [
+          0.0,
+          0
+        ],
+        "desc": "modify da/foo",
+        "node": "8d7c456572acf3557e8ed8a07286b10c408bcec5",
+        "parents": [
+          "f8bbb9024b10f93cdbb8d940337398291d40dea8"
+        ],
+        "phase": "public",
+        "tags": [],
+        "user": "test"
+      },
+      {
+        "bookmarks": [],
+        "branch": "default",
+        "date": [
+          0.0,
+          0
+        ],
+        "desc": "modify foo",
+        "node": "f8bbb9024b10f93cdbb8d940337398291d40dea8",
+        "parents": [
+          "06e557f3edf66faa1ccaba5dd8c203c21cc79f1e"
+        ],
+        "phase": "public",
+        "tags": [],
+        "user": "test"
+      },
+      {
+        "bookmarks": [],
+        "branch": "default",
+        "date": [
+          0.0,
+          0
+        ],
+        "desc": "initial",
+        "node": "06e557f3edf66faa1ccaba5dd8c203c21cc79f1e",
+        "parents": [],
+        "phase": "public",
+        "tags": [],
+        "user": "test"
+      }
+    ],
+    "node": "cc725e08502a79dd1eda913760fbe06ed7a9abc7"
+  }
+
 changeset/ renders the tip changeset
 
   $ request json-rev
@@ -2002,15 +2183,25 @@
     "topic": "phases"
   }
 
+Error page shouldn't crash
+
+  $ request json-changeset/deadbeef
+  404 Not Found
+  
+  {
+    "error": "unknown revision 'deadbeef'"
+  }
+  [1]
+
 Commit message with Japanese Kanji 'Noh', which ends with '\x5c'
 
   $ echo foo >> da/foo
-  $ HGENCODING=cp932 hg ci -m `$PYTHON -c 'print("\x94\x5c")'`
+  $ HGENCODING=cp932 hg ci -m `"$PYTHON" -c 'print("\x94\x5c")'`
 
 Commit message with null character
 
   $ echo foo >> da/foo
-  >>> open('msg', 'wb').write('commit with null character: \0\n')
+  >>> open('msg', 'wb').write(b'commit with null character: \0\n') and None
   $ hg ci -l msg
   $ rm msg
 
--- a/tests/test-hgweb-no-path-info.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hgweb-no-path-info.t	Mon Oct 22 14:46:06 2018 -0400
@@ -18,11 +18,8 @@
   > from __future__ import absolute_import
   > import os
   > import sys
-  > from mercurial.hgweb import (
+  > from mercurial import (
   >     hgweb,
-  >     hgwebdir,
-  > )
-  > from mercurial import (
   >     util,
   > )
   > stringio = util.stringio
@@ -36,6 +33,7 @@
   >     print('---- HEADERS')
   >     print([i for i in headers if i[0] != 'ETag'])
   >     print('---- DATA')
+  >     sys.stdout.flush()
   >     return output.write
   > 
   > env = {
@@ -55,22 +53,29 @@
   > }
   > 
   > def process(app):
+  >     try:
+  >         stdout = sys.stdout.buffer
+  >     except AttributeError:
+  >         stdout = sys.stdout
   >     content = app(env, startrsp)
-  >     sys.stdout.write(output.getvalue())
-  >     sys.stdout.write(''.join(content))
+  >     stdout.write(output.getvalue())
+  >     stdout.write(b''.join(content))
+  >     stdout.flush()
   >     getattr(content, 'close', lambda : None)()
-  >     print('---- ERRORS')
-  >     print(errors.getvalue())
+  >     if errors.getvalue():
+  >         print('---- ERRORS')
+  >         print(errors.getvalue())
+  >     sys.stdout.flush()
   > 
   > output = stringio()
   > env['QUERY_STRING'] = 'style=atom'
-  > process(hgweb('.', name='repo'))
+  > process(hgweb.hgweb(b'.', name=b'repo'))
   > 
   > output = stringio()
   > env['QUERY_STRING'] = 'style=raw'
-  > process(hgwebdir({'repo': '.'}))
+  > process(hgweb.hgwebdir({b'repo': b'.'}))
   > EOF
-  $ $PYTHON request.py
+  $ "$PYTHON" request.py
   ---- STATUS
   200 Script output follows
   ---- HEADERS
@@ -130,8 +135,6 @@
    </entry>
   
   </feed>
-  ---- ERRORS
-  
   ---- STATUS
   200 Script output follows
   ---- HEADERS
@@ -140,7 +143,5 @@
   
   /repo/
   
-  ---- ERRORS
-  
 
   $ cd ..
--- a/tests/test-hgweb-no-request-uri.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hgweb-no-request-uri.t	Mon Oct 22 14:46:06 2018 -0400
@@ -18,11 +18,9 @@
   > from __future__ import absolute_import
   > import os
   > import sys
-  > from mercurial.hgweb import (
+  > from mercurial import (
+  >     encoding,
   >     hgweb,
-  >     hgwebdir,
-  > )
-  > from mercurial import (
   >     util,
   > )
   > stringio = util.stringio
@@ -55,33 +53,33 @@
   > 
   > def process(app):
   >     content = app(env, startrsp)
-  >     sys.stdout.write(output.getvalue())
-  >     sys.stdout.write(''.join(content))
+  >     sys.stdout.write(encoding.strfromlocal(output.getvalue()))
+  >     sys.stdout.write(encoding.strfromlocal(b''.join(content)))
   >     getattr(content, 'close', lambda : None)()
   >     print('---- ERRORS')
-  >     print(errors.getvalue())
+  >     print(encoding.strfromlocal(errors.getvalue())) # avoid b'' output diff
   > 
   > output = stringio()
   > env['PATH_INFO'] = '/'
   > env['QUERY_STRING'] = 'style=atom'
-  > process(hgweb('.', name = 'repo'))
+  > process(hgweb.hgweb(b'.', name = b'repo'))
   > 
   > output = stringio()
   > env['PATH_INFO'] = '/file/tip/'
   > env['QUERY_STRING'] = 'style=raw'
-  > process(hgweb('.', name = 'repo'))
+  > process(hgweb.hgweb(b'.', name = b'repo'))
   > 
   > output = stringio()
   > env['PATH_INFO'] = '/'
   > env['QUERY_STRING'] = 'style=raw'
-  > process(hgwebdir({'repo': '.'}))
+  > process(hgweb.hgwebdir({b'repo': b'.'}))
   > 
   > output = stringio()
   > env['PATH_INFO'] = '/repo/file/tip/'
   > env['QUERY_STRING'] = 'style=raw'
-  > process(hgwebdir({'repo': '.'}))
+  > process(hgweb.hgwebdir({b'repo': b'.'}))
   > EOF
-  $ $PYTHON request.py
+  $ "$PYTHON" request.py
   ---- STATUS
   200 Script output follows
   ---- HEADERS
--- a/tests/test-hgweb-non-interactive.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hgweb-non-interactive.t	Mon Oct 22 14:46:06 2018 -0400
@@ -12,14 +12,14 @@
   > import sys
   > from mercurial import (
   >     dispatch,
+  >     encoding,
   >     hg,
+  >     pycompat,
   >     ui as uimod,
   >     util,
   > )
   > ui = uimod.ui
-  > from mercurial.hgweb.hgweb_mod import (
-  >     hgweb,
-  > )
+  > from mercurial.hgweb import hgweb_mod
   > stringio = util.stringio
   > 
   > class FileLike(object):
@@ -65,18 +65,20 @@
   >     'SERVER_PROTOCOL': 'HTTP/1.0'
   > }
   > 
-  > i = hgweb('.')
+  > i = hgweb_mod.hgweb(b'.')
   > for c in i(env, startrsp):
   >     pass
-  > print('---- ERRORS')
-  > print(errors.getvalue())
+  > sys.stdout.flush()
+  > pycompat.stdout.write(b'---- ERRORS\n')
+  > pycompat.stdout.write(b'%s\n' % errors.getvalue())
   > print('---- OS.ENVIRON wsgi variables')
   > print(sorted([x for x in os.environ if x.startswith('wsgi')]))
   > print('---- request.ENVIRON wsgi variables')
   > with i._obtainrepo() as repo:
-  >     print(sorted([x for x in repo.ui.environ if x.startswith('wsgi')]))
+  >     print(sorted([encoding.strfromlocal(x) for x in repo.ui.environ
+  >                   if x.startswith(b'wsgi')]))
   > EOF
-  $ $PYTHON request.py
+  $ "$PYTHON" request.py
   ---- STATUS
   200 Script output follows
   ---- HEADERS
--- a/tests/test-hgweb-raw.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hgweb-raw.t	Mon Oct 22 14:46:06 2018 -0400
@@ -55,4 +55,171 @@
   $ cat access.log error.log
   $LOCALIP - - [$LOGDATE$] "GET /raw-file/bf0ff59095c9/sub/some%20text%25.txt HTTP/1.1" 200 - (glob)
 
+  >>> with open('sub/binary.bin', 'wb') as fp:
+  ...     fp.write(b'Binary\0file') and None
+
+  $ hg ci -Aqm "add binary file" sub/
+  $ hg serve -p $HGPORT -A access.log -E error.log -d --pid-file=hg.pid \
+  > --config web.guessmime=True
+  $ cat hg.pid >> $DAEMON_PIDS
+  $ (get-with-headers.py localhost:$HGPORT 'annotate/tip/sub/binary.bin' content-type content-length content-disposition) >getoutput.txt
+  $ cat getoutput.txt
+  200 Script output follows
+  content-type: text/html; charset=ascii
+  
+  <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+  <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+  <head>
+  <link rel="icon" href="/static/hgicon.png" type="image/png" />
+  <meta name="robots" content="index, nofollow" />
+  <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
+  <script type="text/javascript" src="/static/mercurial.js"></script>
+  
+  <title>$TESTTMP/test: sub/binary.bin annotate</title> (glob)
+  </head>
+  <body>
+  
+  <div class="container">
+  <div class="menu">
+  <div class="logo">
+  <a href="https://mercurial-scm.org/">
+  <img src="/static/hglogo.png" alt="mercurial" /></a>
+  </div>
+  <ul>
+  <li><a href="/shortlog/tip">log</a></li>
+  <li><a href="/graph/tip">graph</a></li>
+  <li><a href="/tags">tags</a></li>
+  <li><a href="/bookmarks">bookmarks</a></li>
+  <li><a href="/branches">branches</a></li>
+  </ul>
+  
+  <ul>
+  <li><a href="/rev/tip">changeset</a></li>
+  <li><a href="/file/tip/sub/">browse</a></li>
+  </ul>
+  <ul>
+  <li><a href="/file/tip/sub/binary.bin">file</a></li>
+  <li><a href="/file/tip/sub/binary.bin">latest</a></li>
+  <li><a href="/diff/tip/sub/binary.bin">diff</a></li>
+  <li><a href="/comparison/tip/sub/binary.bin">comparison</a></li>
+  <li class="active">annotate</li>
+  <li><a href="/log/tip/sub/binary.bin">file log</a></li>
+  <li><a href="/raw-file/tip/sub/binary.bin">raw</a></li>
+  </ul>
+  <ul>
+  <li><a href="/help">help</a></li>
+  </ul>
+  </div>
+  
+  <div class="main">
+  <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
+  <h3>
+   annotate sub/binary.bin @ 1:<a href="/rev/7dc31308464a">7dc31308464a</a>
+   <span class="phase">draft</span> <span class="branchhead">default</span> <span class="tag">tip</span> 
+  </h3>
+  
+  
+  <form class="search" action="/log">
+  
+  <p><input name="rev" id="search1" type="text" size="30" value="" /></p>
+  <div id="hint">Find changesets by keywords (author, files, the commit message), revision
+  number or hash, or <a href="/help/revsets">revset expression</a>.</div>
+  </form>
+  
+  <div class="description">add binary file</div>
+  
+  <table id="changesetEntry">
+  <tr>
+   <th class="author">author</th>
+   <td class="author">&#116;&#101;&#115;&#116;</td>
+  </tr>
+  <tr>
+   <th class="date">date</th>
+   <td class="date age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+  </tr>
+  <tr>
+   <th class="author">parents</th>
+   <td class="author"></td>
+  </tr>
+  <tr>
+   <th class="author">children</th>
+   <td class="author"></td>
+  </tr>
+  </table>
+  
+  
+  <form id="diffopts-form"
+  data-ignorews="0"
+  data-ignorewsamount="0"
+  data-ignorewseol="0"
+  data-ignoreblanklines="0">
+  <span>Ignore whitespace changes - </span>
+  <span>Everywhere:</span>
+  <input id="ignorews-checkbox" type="checkbox" />
+  <span>Within whitespace:</span>
+  <input id="ignorewsamount-checkbox" type="checkbox" />
+  <span>At end of lines:</span>
+  <input id="ignorewseol-checkbox" type="checkbox" />
+  </form>
+  
+  <script type="text/javascript">
+      renderDiffOptsForm();
+  </script>
+  
+  <div class="overflow">
+  <table class="bigtable">
+  <thead>
+  <tr>
+   <th class="annotate">rev</th>
+   <th class="line">&nbsp;&nbsp;line source</th>
+  </tr>
+  </thead>
+  <tbody class="stripes2 sourcelines"
+         data-logurl="/log/tip/sub/binary.bin"
+         data-selectabletag="TR"
+         data-ishead="1">
+    
+  <tr id="l1" class="thisrev">
+  <td class="annotate parity0">
+  <a href="/annotate/7dc31308464a/sub/binary.bin#l1">
+  1
+  </a>
+  <div class="annotate-info">
+  <div>
+  <a href="/annotate/7dc31308464a/sub/binary.bin#l1">
+  7dc31308464a</a>
+  add binary file
+  </div>
+  <div><em>&#116;&#101;&#115;&#116;</em></div>
+  <div>parents: </div>
+  <a href="/diff/7dc31308464a/sub/binary.bin">diff</a>
+  <a href="/rev/7dc31308464a">changeset</a>
+  </div>
+  </td>
+  <td class="source followlines-btn-parent"><a href="#l1">     1</a> (binary:application/octet-stream)</td>
+  </tr>
+  </tbody>
+  </table>
+  </div>
+  </div>
+  </div>
+  
+  <script type="text/javascript" src="/static/followlines.js"></script>
+  
+  
+  
+  </body>
+  </html>
+  
+  $ (get-with-headers.py localhost:$HGPORT 'comparison/tip/sub/binary.bin' content-type content-length content-disposition) >getoutput.txt
+  $ (get-with-headers.py localhost:$HGPORT 'file/tip/sub/binary.bin' content-type content-length content-disposition) >getoutput.txt
+  $ (get-with-headers.py localhost:$HGPORT 'static/hgicon.png' content-type content-length content-disposition) >getoutput.txt
+  $ killdaemons.py hg.pid
+  $ cat access.log error.log
+  $LOCALIP - - [$LOGDATE$] "GET /raw-file/bf0ff59095c9/sub/some%20text%25.txt HTTP/1.1" 200 - (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /annotate/tip/sub/binary.bin HTTP/1.1" 200 - (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /comparison/tip/sub/binary.bin HTTP/1.1" 200 - (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /file/tip/sub/binary.bin HTTP/1.1" 200 - (glob)
+  $LOCALIP - - [$LOGDATE$] "GET /static/hgicon.png HTTP/1.1" 200 - (glob)
+
   $ cd ..
--- a/tests/test-hgweb.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hgweb.t	Mon Oct 22 14:46:06 2018 -0400
@@ -329,7 +329,7 @@
 
 Test the access/error files are opened in append mode
 
-  $ $PYTHON -c "print len(open('access.log', 'rb').readlines()), 'log lines written'"
+  $ "$PYTHON" -c "from __future__ import print_function; print(len(open('access.log', 'rb').readlines()), 'log lines written')"
   14 log lines written
 
 static file
--- a/tests/test-hgwebdir.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hgwebdir.t	Mon Oct 22 14:46:06 2018 -0400
@@ -66,6 +66,20 @@
   > EOF
   $ cd ..
 
+add file under the directory which could be shadowed by another repository
+
+  $ mkdir notrepo/f/f3
+  $ echo f3/file > notrepo/f/f3/file
+  $ hg -R notrepo/f ci -Am 'f3/file'
+  adding f3/file
+  $ hg -R notrepo/f update null
+  0 files updated, 0 files merged, 4 files removed, 0 files unresolved
+  $ hg init notrepo/f/f3
+  $ cat <<'EOF' > notrepo/f/f3/.hg/hgrc
+  > [web]
+  > hidden = true
+  > EOF
+
 create repository without .hg/store
 
   $ hg init nostore
@@ -1217,6 +1231,39 @@
   
   f2
 
+Test accessing file that could be shadowed by another repository if the URL
+path were audited as a working-directory path:
+
+  $ get-with-headers.py localhost:$HGPORT1 'rcoll/notrepo/f/file/tip/f3/file?style=raw'
+  200 Script output follows
+  
+  f3/file
+
+Test accessing working-directory file that is shadowed by another repository
+
+  $ get-with-headers.py localhost:$HGPORT1 'rcoll/notrepo/f/file/ffffffffffff/f3/file?style=raw'
+  403 Forbidden
+  
+  
+  error: path 'f3/file' is inside nested repo 'f3'
+  [1]
+
+Test accessing invalid paths:
+
+  $ get-with-headers.py localhost:$HGPORT1 'rcoll/notrepo/f/file/tip/..?style=raw'
+  403 Forbidden
+  
+  
+  error: .. not under root '$TESTTMP/dir/webdir/notrepo/f'
+  [1]
+
+  $ get-with-headers.py localhost:$HGPORT1 'rcoll/notrepo/f/file/tip/.hg/hgrc?style=raw'
+  403 Forbidden
+  
+  
+  error: path contains illegal component: .hg/hgrc
+  [1]
+
 Test descend = False
 
   $ killdaemons.py
--- a/tests/test-highlight.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-highlight.t	Mon Oct 22 14:46:06 2018 -0400
@@ -26,7 +26,7 @@
   >     where sieve (p:ns) = p : sieve [n | n <- ns, mod n p /= 0]
   > """
   > 
-  > from itertools import dropwhile, ifilter, islice, count, chain
+  > import itertools
   > 
   > def primes():
   >     """Generate all primes."""
@@ -35,12 +35,13 @@
   >         # It is important to yield *here* in order to stop the
   >         # infinite recursion.
   >         yield p
-  >         ns = ifilter(lambda n: n % p != 0, ns)
+  >         ns = itertools.ifilter(lambda n: n % p != 0, ns)
   >         for n in sieve(ns):
   >             yield n
   > 
-  >     odds = ifilter(lambda i: i % 2 == 1, count())
-  >     return chain([2], sieve(dropwhile(lambda n: n < 3, odds)))
+  >     odds = itertools.ifilter(lambda i: i % 2 == 1, itertools.count())
+  >     dropwhile = itertools.dropwhile
+  >     return itertools.chain([2], sieve(dropwhile(lambda n: n < 3, odds)))
   > 
   > if __name__ == "__main__":
   >     import sys
@@ -49,7 +50,7 @@
   >     except (ValueError, IndexError):
   >         n = 10
   >     p = primes()
-  >     print("The first %d primes: %s" % (n, list(islice(p, n))))
+  >     print("The first %d primes: %s" % (n, list(itertools.islice(p, n))))
   > EOF
   $ echo >> primes.py  # to test html markup with an empty line just before EOF
   $ hg ci -Ama
@@ -74,7 +75,7 @@
   <script type="text/javascript" src="/static/mercurial.js"></script>
   
   <link rel="stylesheet" href="/highlightcss" type="text/css" />
-  <title>test: f4fca47b67e6 primes.py</title>
+  <title>test: 687f2d169546 primes.py</title>
   </head>
   <body>
   
@@ -112,7 +113,7 @@
   <div class="main">
   <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
   <h3>
-   view primes.py @ 0:<a href="/rev/f4fca47b67e6">f4fca47b67e6</a>
+   view primes.py @ 0:<a href="/rev/687f2d169546">687f2d169546</a>
    <span class="phase">draft</span> <span class="branchhead">default</span> <span class="tag">tip</span> 
   </h3>
   
@@ -159,7 +160,7 @@
   <span id="l4"><span class="sd">    where sieve (p:ns) = p : sieve [n | n &lt;- ns, mod n p /= 0]</span></span><a href="#l4"></a>
   <span id="l5"><span class="sd">&quot;&quot;&quot;</span></span><a href="#l5"></a>
   <span id="l6"></span><a href="#l6"></a>
-  <span id="l7"><span class="kn">from</span> <span class="nn">itertools</span> <span class="kn">import</span> <span class="n">dropwhile</span><span class="p">,</span> <span class="n">ifilter</span><span class="p">,</span> <span class="n">islice</span><span class="p">,</span> <span class="n">count</span><span class="p">,</span> <span class="n">chain</span></span><a href="#l7"></a>
+  <span id="l7"><span class="kn">import</span> <span class="nn">itertools</span></span><a href="#l7"></a>
   <span id="l8"></span><a href="#l8"></a>
   <span id="l9"><span class="kn">def</span> <span class="nf">primes</span><span class="p">():</span></span><a href="#l9"></a>
   <span id="l10">    <span class="sd">&quot;&quot;&quot;Generate all primes.&quot;&quot;&quot;</span></span><a href="#l10"></a>
@@ -168,22 +169,23 @@
   <span id="l13">        <span class="c"># It is important to yield *here* in order to stop the</span></span><a href="#l13"></a>
   <span id="l14">        <span class="c"># infinite recursion.</span></span><a href="#l14"></a>
   <span id="l15">        <span class="kn">yield</span> <span class="n">p</span></span><a href="#l15"></a>
-  <span id="l16">        <span class="n">ns</span> <span class="o">=</span> <span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o">%</span> <span class="n">p</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">,</span> <span class="n">ns</span><span class="p">)</span></span><a href="#l16"></a>
+  <span id="l16">        <span class="n">ns</span> <span class="o">=</span> <span class="n">itertools</span><span class="o">.</span><span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o">%</span> <span class="n">p</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">,</span> <span class="n">ns</span><span class="p">)</span></span><a href="#l16"></a>
   <span id="l17">        <span class="kn">for</span> <span class="n">n</span> <span class="ow">in</span> <span class="n">sieve</span><span class="p">(</span><span class="n">ns</span><span class="p">):</span></span><a href="#l17"></a>
   <span id="l18">            <span class="kn">yield</span> <span class="n">n</span></span><a href="#l18"></a>
   <span id="l19"></span><a href="#l19"></a>
-  <span id="l20">    <span class="n">odds</span> <span class="o">=</span> <span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">i</span><span class="p">:</span> <span class="n">i</span> <span class="o">%</span> <span class="mi">2</span> <span class="o">==</span> <span class="mi">1</span><span class="p">,</span> <span class="n">count</span><span class="p">())</span></span><a href="#l20"></a>
-  <span id="l21">    <span class="kn">return</span> <span class="n">chain</span><span class="p">([</span><span class="mi">2</span><span class="p">],</span> <span class="n">sieve</span><span class="p">(</span><span class="n">dropwhile</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o">&lt;</span> <span class="mi">3</span><span class="p">,</span> <span class="n">odds</span><span class="p">)))</span></span><a href="#l21"></a>
-  <span id="l22"></span><a href="#l22"></a>
-  <span id="l23"><span class="kn">if</span> <span class="n">__name__</span> <span class="o">==</span> <span class="s">&quot;__main__&quot;</span><span class="p">:</span></span><a href="#l23"></a>
-  <span id="l24">    <span class="kn">import</span> <span class="nn">sys</span></span><a href="#l24"></a>
-  <span id="l25">    <span class="kn">try</span><span class="p">:</span></span><a href="#l25"></a>
-  <span id="l26">        <span class="n">n</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">sys</span><span class="o">.</span><span class="n">argv</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span></span><a href="#l26"></a>
-  <span id="l27">    <span class="kn">except</span> <span class="p">(</span><span class="ne">ValueError</span><span class="p">,</span> <span class="ne">IndexError</span><span class="p">):</span></span><a href="#l27"></a>
-  <span id="l28">        <span class="n">n</span> <span class="o">=</span> <span class="mi">10</span></span><a href="#l28"></a>
-  <span id="l29">    <span class="n">p</span> <span class="o">=</span> <span class="n">primes</span><span class="p">()</span></span><a href="#l29"></a>
-  <span id="l30">    <span class="kn">print</span><span class="p">(</span><span class="s">&quot;The first </span><span class="si">%d</span><span class="s"> primes: </span><span class="si">%s</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="nb">list</span><span class="p">(</span><span class="n">islice</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">n</span><span class="p">))))</span></span><a href="#l30"></a>
-  <span id="l31"></span><a href="#l31"></a>
+  <span id="l20">    <span class="n">odds</span> <span class="o">=</span> <span class="n">itertools</span><span class="o">.</span><span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">i</span><span class="p">:</span> <span class="n">i</span> <span class="o">%</span> <span class="mi">2</span> <span class="o">==</span> <span class="mi">1</span><span class="p">,</span> <span class="n">itertools</span><span class="o">.</span><span class="n">count</span><span class="p">())</span></span><a href="#l20"></a>
+  <span id="l21">    <span class="n">dropwhile</span> <span class="o">=</span> <span class="n">itertools</span><span class="o">.</span><span class="n">dropwhile</span></span><a href="#l21"></a>
+  <span id="l22">    <span class="kn">return</span> <span class="n">itertools</span><span class="o">.</span><span class="n">chain</span><span class="p">([</span><span class="mi">2</span><span class="p">],</span> <span class="n">sieve</span><span class="p">(</span><span class="n">dropwhile</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o">&lt;</span> <span class="mi">3</span><span class="p">,</span> <span class="n">odds</span><span class="p">)))</span></span><a href="#l22"></a>
+  <span id="l23"></span><a href="#l23"></a>
+  <span id="l24"><span class="kn">if</span> <span class="n">__name__</span> <span class="o">==</span> <span class="s">&quot;__main__&quot;</span><span class="p">:</span></span><a href="#l24"></a>
+  <span id="l25">    <span class="kn">import</span> <span class="nn">sys</span></span><a href="#l25"></a>
+  <span id="l26">    <span class="kn">try</span><span class="p">:</span></span><a href="#l26"></a>
+  <span id="l27">        <span class="n">n</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">sys</span><span class="o">.</span><span class="n">argv</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span></span><a href="#l27"></a>
+  <span id="l28">    <span class="kn">except</span> <span class="p">(</span><span class="ne">ValueError</span><span class="p">,</span> <span class="ne">IndexError</span><span class="p">):</span></span><a href="#l28"></a>
+  <span id="l29">        <span class="n">n</span> <span class="o">=</span> <span class="mi">10</span></span><a href="#l29"></a>
+  <span id="l30">    <span class="n">p</span> <span class="o">=</span> <span class="n">primes</span><span class="p">()</span></span><a href="#l30"></a>
+  <span id="l31">    <span class="kn">print</span><span class="p">(</span><span class="s">&quot;The first </span><span class="si">%d</span><span class="s"> primes: </span><span class="si">%s</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="nb">list</span><span class="p">(</span><span class="n">itertools</span><span class="o">.</span><span class="n">islice</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">n</span><span class="p">))))</span></span><a href="#l31"></a>
+  <span id="l32"></span><a href="#l32"></a>
   </pre>
   </div>
   
@@ -251,7 +253,7 @@
   <div class="main">
   <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
   <h3>
-   annotate primes.py @ 0:<a href="/rev/f4fca47b67e6">f4fca47b67e6</a>
+   annotate primes.py @ 0:<a href="/rev/687f2d169546">687f2d169546</a>
    <span class="phase">draft</span> <span class="branchhead">default</span> <span class="tag">tip</span> 
   </h3>
   
@@ -318,19 +320,19 @@
     
   <tr id="l1" class="thisrev">
   <td class="annotate parity0">
-  <a href="/annotate/f4fca47b67e6/primes.py#l1">
+  <a href="/annotate/687f2d169546/primes.py#l1">
   0
   </a>
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l1">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l1">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l1">     1</a> <span class="sd">&quot;&quot;&quot;Fun with generators. Corresponding Haskell implementation:</span></td>
@@ -340,14 +342,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l2">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l2">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l2">     2</a> </td>
@@ -357,14 +359,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l3">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l3">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l3">     3</a> <span class="sd">primes = 2 : sieve [3, 5..]</span></td>
@@ -374,14 +376,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l4">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l4">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l4">     4</a> <span class="sd">    where sieve (p:ns) = p : sieve [n | n &lt;- ns, mod n p /= 0]</span></td>
@@ -391,14 +393,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l5">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l5">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l5">     5</a> <span class="sd">&quot;&quot;&quot;</span></td>
@@ -408,14 +410,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l6">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l6">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l6">     6</a> </td>
@@ -425,31 +427,31 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l7">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l7">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l7">     7</a> <span class="kn">from</span> <span class="nn">itertools</span> <span class="kn">import</span> <span class="n">dropwhile</span><span class="p">,</span> <span class="n">ifilter</span><span class="p">,</span> <span class="n">islice</span><span class="p">,</span> <span class="n">count</span><span class="p">,</span> <span class="n">chain</span></td>
+  <td class="source followlines-btn-parent"><a href="#l7">     7</a> <span class="kn">import</span> <span class="nn">itertools</span></td>
   </tr>
   <tr id="l8" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l8">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l8">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l8">     8</a> </td>
@@ -459,14 +461,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l9">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l9">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l9">     9</a> <span class="kn">def</span> <span class="nf">primes</span><span class="p">():</span></td>
@@ -476,14 +478,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l10">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l10">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l10">    10</a>     <span class="sd">&quot;&quot;&quot;Generate all primes.&quot;&quot;&quot;</span></td>
@@ -493,14 +495,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l11">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l11">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l11">    11</a>     <span class="kn">def</span> <span class="nf">sieve</span><span class="p">(</span><span class="n">ns</span><span class="p">):</span></td>
@@ -510,14 +512,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l12">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l12">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l12">    12</a>         <span class="n">p</span> <span class="o">=</span> <span class="n">ns</span><span class="o">.</span><span class="n">next</span><span class="p">()</span></td>
@@ -527,14 +529,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l13">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l13">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l13">    13</a>         <span class="c"># It is important to yield *here* in order to stop the</span></td>
@@ -544,14 +546,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l14">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l14">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l14">    14</a>         <span class="c"># infinite recursion.</span></td>
@@ -561,14 +563,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l15">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l15">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l15">    15</a>         <span class="kn">yield</span> <span class="n">p</span></td>
@@ -578,31 +580,31 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l16">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l16">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l16">    16</a>         <span class="n">ns</span> <span class="o">=</span> <span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o">%</span> <span class="n">p</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">,</span> <span class="n">ns</span><span class="p">)</span></td>
+  <td class="source followlines-btn-parent"><a href="#l16">    16</a>         <span class="n">ns</span> <span class="o">=</span> <span class="n">itertools</span><span class="o">.</span><span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o">%</span> <span class="n">p</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">,</span> <span class="n">ns</span><span class="p">)</span></td>
   </tr>
   <tr id="l17" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l17">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l17">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l17">    17</a>         <span class="kn">for</span> <span class="n">n</span> <span class="ow">in</span> <span class="n">sieve</span><span class="p">(</span><span class="n">ns</span><span class="p">):</span></td>
@@ -612,14 +614,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l18">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l18">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l18">    18</a>             <span class="kn">yield</span> <span class="n">n</span></td>
@@ -629,14 +631,14 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l19">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l19">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
   <td class="source followlines-btn-parent"><a href="#l19">    19</a> </td>
@@ -646,204 +648,221 @@
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l20">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l20">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l20">    20</a>     <span class="n">odds</span> <span class="o">=</span> <span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">i</span><span class="p">:</span> <span class="n">i</span> <span class="o">%</span> <span class="mi">2</span> <span class="o">==</span> <span class="mi">1</span><span class="p">,</span> <span class="n">count</span><span class="p">())</span></td>
+  <td class="source followlines-btn-parent"><a href="#l20">    20</a>     <span class="n">odds</span> <span class="o">=</span> <span class="n">itertools</span><span class="o">.</span><span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">i</span><span class="p">:</span> <span class="n">i</span> <span class="o">%</span> <span class="mi">2</span> <span class="o">==</span> <span class="mi">1</span><span class="p">,</span> <span class="n">itertools</span><span class="o">.</span><span class="n">count</span><span class="p">())</span></td>
   </tr>
   <tr id="l21" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l21">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l21">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l21">    21</a>     <span class="kn">return</span> <span class="n">chain</span><span class="p">([</span><span class="mi">2</span><span class="p">],</span> <span class="n">sieve</span><span class="p">(</span><span class="n">dropwhile</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o">&lt;</span> <span class="mi">3</span><span class="p">,</span> <span class="n">odds</span><span class="p">)))</span></td>
+  <td class="source followlines-btn-parent"><a href="#l21">    21</a>     <span class="n">dropwhile</span> <span class="o">=</span> <span class="n">itertools</span><span class="o">.</span><span class="n">dropwhile</span></td>
   </tr>
   <tr id="l22" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l22">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l22">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l22">    22</a> </td>
+  <td class="source followlines-btn-parent"><a href="#l22">    22</a>     <span class="kn">return</span> <span class="n">itertools</span><span class="o">.</span><span class="n">chain</span><span class="p">([</span><span class="mi">2</span><span class="p">],</span> <span class="n">sieve</span><span class="p">(</span><span class="n">dropwhile</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o">&lt;</span> <span class="mi">3</span><span class="p">,</span> <span class="n">odds</span><span class="p">)))</span></td>
   </tr>
   <tr id="l23" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l23">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l23">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l23">    23</a> <span class="kn">if</span> <span class="n">__name__</span> <span class="o">==</span> <span class="s">&quot;__main__&quot;</span><span class="p">:</span></td>
+  <td class="source followlines-btn-parent"><a href="#l23">    23</a> </td>
   </tr>
   <tr id="l24" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l24">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l24">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l24">    24</a>     <span class="kn">import</span> <span class="nn">sys</span></td>
+  <td class="source followlines-btn-parent"><a href="#l24">    24</a> <span class="kn">if</span> <span class="n">__name__</span> <span class="o">==</span> <span class="s">&quot;__main__&quot;</span><span class="p">:</span></td>
   </tr>
   <tr id="l25" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l25">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l25">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l25">    25</a>     <span class="kn">try</span><span class="p">:</span></td>
+  <td class="source followlines-btn-parent"><a href="#l25">    25</a>     <span class="kn">import</span> <span class="nn">sys</span></td>
   </tr>
   <tr id="l26" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l26">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l26">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l26">    26</a>         <span class="n">n</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">sys</span><span class="o">.</span><span class="n">argv</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span></td>
+  <td class="source followlines-btn-parent"><a href="#l26">    26</a>     <span class="kn">try</span><span class="p">:</span></td>
   </tr>
   <tr id="l27" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l27">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l27">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l27">    27</a>     <span class="kn">except</span> <span class="p">(</span><span class="ne">ValueError</span><span class="p">,</span> <span class="ne">IndexError</span><span class="p">):</span></td>
+  <td class="source followlines-btn-parent"><a href="#l27">    27</a>         <span class="n">n</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">sys</span><span class="o">.</span><span class="n">argv</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span></td>
   </tr>
   <tr id="l28" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l28">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l28">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l28">    28</a>         <span class="n">n</span> <span class="o">=</span> <span class="mi">10</span></td>
+  <td class="source followlines-btn-parent"><a href="#l28">    28</a>     <span class="kn">except</span> <span class="p">(</span><span class="ne">ValueError</span><span class="p">,</span> <span class="ne">IndexError</span><span class="p">):</span></td>
   </tr>
   <tr id="l29" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l29">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l29">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l29">    29</a>     <span class="n">p</span> <span class="o">=</span> <span class="n">primes</span><span class="p">()</span></td>
+  <td class="source followlines-btn-parent"><a href="#l29">    29</a>         <span class="n">n</span> <span class="o">=</span> <span class="mi">10</span></td>
   </tr>
   <tr id="l30" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l30">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l30">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l30">    30</a>     <span class="kn">print</span><span class="p">(</span><span class="s">&quot;The first </span><span class="si">%d</span><span class="s"> primes: </span><span class="si">%s</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="nb">list</span><span class="p">(</span><span class="n">islice</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">n</span><span class="p">))))</span></td>
+  <td class="source followlines-btn-parent"><a href="#l30">    30</a>     <span class="n">p</span> <span class="o">=</span> <span class="n">primes</span><span class="p">()</span></td>
   </tr>
   <tr id="l31" class="thisrev">
   <td class="annotate parity0">
   
   <div class="annotate-info">
   <div>
-  <a href="/annotate/f4fca47b67e6/primes.py#l31">
-  f4fca47b67e6</a>
+  <a href="/annotate/687f2d169546/primes.py#l31">
+  687f2d169546</a>
   a
   </div>
   <div><em>&#116;&#101;&#115;&#116;</em></div>
   <div>parents: </div>
-  <a href="/diff/f4fca47b67e6/primes.py">diff</a>
-  <a href="/rev/f4fca47b67e6">changeset</a>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
   </div>
   </td>
-  <td class="source followlines-btn-parent"><a href="#l31">    31</a> </td>
+  <td class="source followlines-btn-parent"><a href="#l31">    31</a>     <span class="kn">print</span><span class="p">(</span><span class="s">&quot;The first </span><span class="si">%d</span><span class="s"> primes: </span><span class="si">%s</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="nb">list</span><span class="p">(</span><span class="n">itertools</span><span class="o">.</span><span class="n">islice</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">n</span><span class="p">))))</span></td>
+  </tr>
+  <tr id="l32" class="thisrev">
+  <td class="annotate parity0">
+  
+  <div class="annotate-info">
+  <div>
+  <a href="/annotate/687f2d169546/primes.py#l32">
+  687f2d169546</a>
+  a
+  </div>
+  <div><em>&#116;&#101;&#115;&#116;</em></div>
+  <div>parents: </div>
+  <a href="/diff/687f2d169546/primes.py">diff</a>
+  <a href="/rev/687f2d169546">changeset</a>
+  </div>
+  </td>
+  <td class="source followlines-btn-parent"><a href="#l32">    32</a> </td>
   </tr>
   </tbody>
   </table>
@@ -947,7 +966,7 @@
   $ cd ..
   $ hg init eucjp
   $ cd eucjp
-  $ $PYTHON -c 'print("\265\376")' >> eucjp.txt  # Japanese kanji "Kyo"
+  $ "$PYTHON" -c 'print("\265\376")' >> eucjp.txt  # Japanese kanji "Kyo"
   $ hg ci -Ama
   adding eucjp.txt
   $ hgserveget () {
--- a/tests/test-histedit-arguments.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-histedit-arguments.t	Mon Oct 22 14:46:06 2018 -0400
@@ -279,7 +279,7 @@
 Test that trimming description using multi-byte characters
 --------------------------------------------------------------------
 
-  $ $PYTHON <<EOF
+  $ "$PYTHON" <<EOF
   > fp = open('logfile', 'wb')
   > fp.write(b'12345678901234567890123456789012345678901234567890' +
   >          b'12345') # there are 5 more columns for 80 columns
--- a/tests/test-histedit-base.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-histedit-base.t	Mon Oct 22 14:46:06 2018 -0400
@@ -16,7 +16,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 7 changes to 7 files (+2 heads)
-  new changesets cd010b8cd998:02de42196ebe
+  new changesets cd010b8cd998:02de42196ebe (8 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up tip
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-histedit-commute.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-histedit-commute.t	Mon Oct 22 14:46:06 2018 -0400
@@ -364,7 +364,7 @@
   adding manifests
   adding file changes
   added 3 changesets with 3 changes to 1 files
-  new changesets 141947992243:bd22688093b3
+  new changesets 141947992243:bd22688093b3 (3 drafts)
   (run 'hg update' to get a working copy)
   $ hg co tip
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-histedit-obsolete.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-histedit-obsolete.t	Mon Oct 22 14:46:06 2018 -0400
@@ -71,7 +71,6 @@
   $ hg commit --amend -X . -m XXXXXX
   $ hg commit --amend -X . -m b2
   $ hg --hidden --config extensions.strip= strip 'desc(XXXXXX)' --no-backup
-  warning: ignoring unknown working parent aba7da937030!
   $ hg histedit --continue
   $ hg log -G
   @  8:273c1f3b8626 c
--- a/tests/test-hook.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-hook.t	Mon Oct 22 14:46:06 2018 -0400
@@ -456,9 +456,9 @@
   >     unreachable = 1
   > EOF
 
-  $ cat > syntaxerror.py << EOF
+  $ cat > syntaxerror.py << NO_CHECK_EOF
   > (foo
-  > EOF
+  > NO_CHECK_EOF
 
 test python hooks
 
--- a/tests/test-http-api-httpv2.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-http-api-httpv2.t	Mon Oct 22 14:46:06 2018 -0400
@@ -18,7 +18,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
-  s>     GET /api/exp-http-v2-0001 HTTP/1.1\r\n
+  s>     GET /api/exp-http-v2-0003 HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -30,7 +30,7 @@
   s>     Content-Type: text/plain\r\n
   s>     Content-Length: 33\r\n
   s>     \r\n
-  s>     API exp-http-v2-0001 not enabled\n
+  s>     API exp-http-v2-0003 not enabled\n
 
 Restart server with support for HTTP v2 API
 
@@ -46,7 +46,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/ro/badcommand HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/badcommand HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -67,7 +67,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
-  s>     GET /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
+  s>     GET /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -88,7 +88,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -100,7 +100,7 @@
   s>     Content-Type: text/plain\r\n
   s>     Content-Length: 85\r\n
   s>     \r\n
-  s>     client MUST specify Accept header with value: application/mercurial-exp-framing-0005\n
+  s>     client MUST specify Accept header with value: application/mercurial-exp-framing-0006\n
 
 Bad Accept header results in 406
 
@@ -110,7 +110,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     accept: invalid\r\n
   s>     user-agent: test\r\n
@@ -123,7 +123,7 @@
   s>     Content-Type: text/plain\r\n
   s>     Content-Length: 85\r\n
   s>     \r\n
-  s>     client MUST specify Accept header with value: application/mercurial-exp-framing-0005\n
+  s>     client MUST specify Accept header with value: application/mercurial-exp-framing-0006\n
 
 Bad Content-Type header results in 415
 
@@ -134,9 +134,9 @@
   >     content-type: badmedia
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
   s>     content-type: badmedia\r\n
   s>     user-agent: test\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -148,7 +148,7 @@
   s>     Content-Type: text/plain\r\n
   s>     Content-Length: 88\r\n
   s>     \r\n
-  s>     client MUST send Content-Type header with value: application/mercurial-exp-framing-0005\n
+  s>     client MUST send Content-Type header with value: application/mercurial-exp-framing-0006\n
 
 Request to read-only command works out of the box
 
@@ -160,10 +160,10 @@
   >     frame 1 1 stream-begin command-request new cbor:{b'name': b'customreadonly'}
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     *\r\n (glob)
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
   s>     user-agent: test\r\n
   s>     content-length: 29\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -173,44 +173,70 @@
   s>     HTTP/1.1 200 OK\r\n
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
   s>     Transfer-Encoding: chunked\r\n
   s>     \r\n
-  s>     32\r\n
-  s>     *\x00\x00\x01\x00\x02\x012\xa1FstatusBokX\x1dcustomreadonly bytes response
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92Hidentity
+  s>     \r\n
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok
+  s>     \r\n
+  s>     27\r\n
+  s>     \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response
+  s>     \r\n
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
   s>     \r\n
   s>     0\r\n
   s>     \r\n
 
-  $ sendhttpv2peer << EOF
+  $ sendhttpv2peerverbose << EOF
   > command customreadonly
   > EOF
   creating http peer for wire protocol version 2
   sending customreadonly command
-  s>     POST /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 29\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
+  s>     content-length: 65\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
   s>     user-agent: Mercurial debugwireproto\r\n
   s>     \r\n
-  s>     \x15\x00\x00\x01\x00\x01\x01\x11\xa1DnameNcustomreadonly
+  s>     \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x15\x00\x00\x01\x00\x01\x00\x11\xa1DnameNcustomreadonly
   s> makefile('rb', None)
   s>     HTTP/1.1 200 OK\r\n
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
   s>     Transfer-Encoding: chunked\r\n
   s>     \r\n
-  s>     32\r\n
-  s>     *\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBokX\x1dcustomreadonly bytes response
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92
+  s>     Hidentity
+  s>     \r\n
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041
+  s>     \xa1FstatusBok
   s>     \r\n
-  received frame(size=42; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     27\r\n
+  s>     \x1f\x00\x00\x01\x00\x02\x041
+  s>     X\x1dcustomreadonly bytes response
+  s>     \r\n
+  received frame(size=31; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
+  s>     \r\n
   s>     0\r\n
   s>     \r\n
-  response: [{b'status': b'ok'}, b'customreadonly bytes response']
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  response: gen[
+    b'customreadonly bytes response'
+  ]
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
 
 Request to read-write command fails because server is read-only by default
 
@@ -221,7 +247,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
-  s>     GET /api/exp-http-v2-0001/rw/customreadonly HTTP/1.1\r\n
+  s>     GET /api/exp-http-v2-0003/rw/customreadonly HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -242,7 +268,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
-  s>     GET /api/exp-http-v2-0001/rw/badcommand HTTP/1.1\r\n
+  s>     GET /api/exp-http-v2-0003/rw/badcommand HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -263,7 +289,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/rw/customreadonly HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/rw/customreadonly HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -301,10 +327,10 @@
   >     frame 1 1 stream-begin command-request new cbor:{b'name': b'customreadonly'}
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/rw/customreadonly HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/rw/customreadonly HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
   s>     user-agent: test\r\n
   s>     content-length: 29\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -314,11 +340,20 @@
   s>     HTTP/1.1 200 OK\r\n
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
   s>     Transfer-Encoding: chunked\r\n
   s>     \r\n
-  s>     32\r\n
-  s>     *\x00\x00\x01\x00\x02\x012\xa1FstatusBokX\x1dcustomreadonly bytes response
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92Hidentity
+  s>     \r\n
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok
+  s>     \r\n
+  s>     27\r\n
+  s>     \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response
+  s>     \r\n
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
   s>     \r\n
   s>     0\r\n
   s>     \r\n
@@ -331,9 +366,9 @@
   >     accept: $MEDIATYPE
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/rw/badcommand HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/rw/badcommand HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
   s>     user-agent: test\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
   s>     \r\n
@@ -353,7 +388,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/ro/debugreflect HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/debugreflect HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -393,24 +428,24 @@
   >     frame 1 1 stream-begin command-request new cbor:{b'name': b'command1', b'args': {b'foo': b'val1', b'bar1': b'val'}}
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/ro/debugreflect HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/debugreflect HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
   s>     user-agent: test\r\n
   s>     content-length: 47\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
   s>     \r\n
-  s>     \'\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2CfooDval1Dbar1CvalDnameHcommand1
+  s>     \'\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2Dbar1CvalCfooDval1DnameHcommand1
   s> makefile('rb', None)
   s>     HTTP/1.1 200 OK\r\n
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: text/plain\r\n
-  s>     Content-Length: 205\r\n
+  s>     Content-Length: 223\r\n
   s>     \r\n
-  s>     received: 1 1 1 \xa2Dargs\xa2CfooDval1Dbar1CvalDnameHcommand1\n
-  s>     ["runcommand", {"args": {"bar1": "val", "foo": "val1"}, "command": "command1", "data": null, "requestid": 1}]\n
+  s>     received: 1 1 1 \xa2Dargs\xa2Dbar1CvalCfooDval1DnameHcommand1\n
+  s>     ["runcommand", {"args": {"bar1": "val", "foo": "val1"}, "command": "command1", "data": null, "redirect": null, "requestid": 1}]\n
   s>     received: <no frame>\n
   s>     {"action": "noop"}
 
@@ -424,10 +459,10 @@
   >     frame 1 1 stream-begin command-request new cbor:{b'name': b'customreadonly'}
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/customreadonly HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
   s>     user-agent: test\r\n
   s>     content-length: 29\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -437,11 +472,20 @@
   s>     HTTP/1.1 200 OK\r\n
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
   s>     Transfer-Encoding: chunked\r\n
   s>     \r\n
-  s>     32\r\n
-  s>     *\x00\x00\x01\x00\x02\x012\xa1FstatusBokX\x1dcustomreadonly bytes response
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92Hidentity
+  s>     \r\n
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok
+  s>     \r\n
+  s>     27\r\n
+  s>     \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response
+  s>     \r\n
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
   s>     \r\n
   s>     0\r\n
   s>     \r\n
@@ -457,7 +501,7 @@
   >     frame 3 1 0 command-request new cbor:{b'name': b'customreadonly'}
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/ro/multirequest HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/multirequest HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     *\r\n (glob)
   s>     *\r\n (glob)
@@ -470,14 +514,29 @@
   s>     HTTP/1.1 200 OK\r\n
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
   s>     Transfer-Encoding: chunked\r\n
   s>     \r\n
-  s>     32\r\n
-  s>     *\x00\x00\x01\x00\x02\x012\xa1FstatusBokX\x1dcustomreadonly bytes response
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92Hidentity
+  s>     \r\n
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok
+  s>     \r\n
+  s>     27\r\n
+  s>     \x1f\x00\x00\x01\x00\x02\x041X\x1dcustomreadonly bytes response
   s>     \r\n
-  s>     32\r\n
-  s>     *\x00\x00\x03\x00\x02\x002\xa1FstatusBokX\x1dcustomreadonly bytes response
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
+  s>     \r\n
+  s>     13\r\n
+  s>     \x0b\x00\x00\x03\x00\x02\x041\xa1FstatusBok
+  s>     \r\n
+  s>     27\r\n
+  s>     \x1f\x00\x00\x03\x00\x02\x041X\x1dcustomreadonly bytes response
+  s>     \r\n
+  s>     8\r\n
+  s>     \x00\x00\x00\x03\x00\x02\x002
   s>     \r\n
   s>     0\r\n
   s>     \r\n
@@ -495,10 +554,10 @@
   >     frame 1 1 0 command-request continuation IbookmarksDnameHlistkeys
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/ro/multirequest HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/multirequest HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
   s>     user-agent: test\r\n
   s>     content-length: 115\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -508,14 +567,29 @@
   s>     HTTP/1.1 200 OK\r\n
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
   s>     Transfer-Encoding: chunked\r\n
   s>     \r\n
-  s>     33\r\n
-  s>     +\x00\x00\x03\x00\x02\x012\xa1FstatusBok\xa3Fphases@Ibookmarks@Jnamespaces@
+  s>     11\r\n
+  s>     \t\x00\x00\x03\x00\x02\x01\x92Hidentity
+  s>     \r\n
+  s>     13\r\n
+  s>     \x0b\x00\x00\x03\x00\x02\x041\xa1FstatusBok
+  s>     \r\n
+  s>     28\r\n
+  s>      \x00\x00\x03\x00\x02\x041\xa3Ibookmarks@Jnamespaces@Fphases@
   s>     \r\n
-  s>     14\r\n
-  s>     \x0c\x00\x00\x01\x00\x02\x002\xa1FstatusBok\xa0
+  s>     8\r\n
+  s>     \x00\x00\x00\x03\x00\x02\x002
+  s>     \r\n
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041\xa1FstatusBok
+  s>     \r\n
+  s>     9\r\n
+  s>     \x01\x00\x00\x01\x00\x02\x041\xa0
+  s>     \r\n
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
   s>     \r\n
   s>     0\r\n
   s>     \r\n
@@ -545,10 +619,10 @@
   >     frame 1 1 stream-begin command-request new cbor:{b'name': b'pushkey'}
   > EOF
   using raw connection to peer
-  s>     POST /api/exp-http-v2-0001/ro/multirequest HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/multirequest HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
   s>     user-agent: test\r\n
   s>     content-length: 22\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -563,4 +637,96 @@
   s>     \r\n
   s>     insufficient permissions to execute command: pushkey
 
+Defining an invalid content encoding results in warning
+
+  $ hg --config experimental.httppeer.v2-encoder-order=identity,badencoder --verbose debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/ << EOF
+  > command heads
+  > EOF
+  creating http peer for wire protocol version 2
+  sending heads command
+  wire protocol version 2 encoder referenced in config (badencoder) is not known; ignoring
+  s>     POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
+  s>     content-length: 56\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     user-agent: Mercurial debugwireproto\r\n
+  s>     \r\n
+  s>     \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x0c\x00\x00\x01\x00\x01\x00\x11\xa1DnameEheads
+  s> makefile('rb', None)
+  s>     HTTP/1.1 200 OK\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
+  s>     Transfer-Encoding: chunked\r\n
+  s>     \r\n
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92
+  s>     Hidentity
+  s>     \r\n
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041
+  s>     \xa1FstatusBok
+  s>     \r\n
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     1e\r\n
+  s>     \x16\x00\x00\x01\x00\x02\x041
+  s>     \x81T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
+  s>     \r\n
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
+  s>     \r\n
+  s>     0\r\n
+  s>     \r\n
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  response: [
+    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+  ]
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+#if zstd
+
+  $ hg --verbose debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/ << EOF
+  > command heads
+  > EOF
+  creating http peer for wire protocol version 2
+  sending heads command
+  s>     POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
+  s>     content-length: 70\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     user-agent: Mercurial debugwireproto\r\n
+  s>     \r\n
+  s>     *\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x83Hzstd-8mbDzlibHidentity\x0c\x00\x00\x01\x00\x01\x00\x11\xa1DnameEheads
+  s> makefile('rb', None)
+  s>     HTTP/1.1 200 OK\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
+  s>     Transfer-Encoding: chunked\r\n
+  s>     \r\n
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92
+  s>     Hzstd-8mb
+  s>     \r\n
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  s>     25\r\n
+  s>     \x1d\x00\x00\x01\x00\x02\x042
+  s>     (\xb5/\xfd\x00P\xa4\x00\x00p\xa1FstatusBok\x81T\x00\x01\x00\tP\x02
+  s>     \r\n
+  received frame(size=29; request=1; stream=2; streamflags=encoded; type=command-response; flags=eos)
+  s>     0\r\n
+  s>     \r\n
+  response: [
+    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+  ]
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+#endif
+
   $ cat error.log
--- a/tests/test-http-api.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-http-api.t	Mon Oct 22 14:46:06 2018 -0400
@@ -218,11 +218,11 @@
 Accessing a known but not enabled API yields a different error
 
   $ send << EOF
-  > httprequest GET api/exp-http-v2-0001
+  > httprequest GET api/exp-http-v2-0003
   >     user-agent: test
   > EOF
   using raw connection to peer
-  s>     GET /api/exp-http-v2-0001 HTTP/1.1\r\n
+  s>     GET /api/exp-http-v2-0003 HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
@@ -234,7 +234,7 @@
   s>     Content-Type: text/plain\r\n
   s>     Content-Length: 33\r\n
   s>     \r\n
-  s>     API exp-http-v2-0001 not enabled\n
+  s>     API exp-http-v2-0003 not enabled\n
 
 Restart server with support for HTTP v2 API
 
@@ -269,7 +269,7 @@
   s>     \r\n
   s>     APIs can be accessed at /api/<name>, where <name> can be one of the following:\n
   s>     \n
-  s>     exp-http-v2-0001
+  s>     exp-http-v2-0003
 
   $ send << EOF
   > httprequest GET api/
@@ -290,4 +290,4 @@
   s>     \r\n
   s>     APIs can be accessed at /api/<name>, where <name> can be one of the following:\n
   s>     \n
-  s>     exp-http-v2-0001
+  s>     exp-http-v2-0003
--- a/tests/test-http-bad-server.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-http-bad-server.t	Mon Oct 22 14:46:06 2018 -0400
@@ -118,9 +118,9 @@
   write(23) -> Server: badhttpserver\r\n
   write(37) -> Date: $HTTP_DATE$\r\n
   write(41) -> Content-Type: application/mercurial-0.1\r\n
-  write(21) -> Content-Length: 436\r\n
+  write(21) -> Content-Length: 450\r\n
   write(2) -> \r\n
-  write(436) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  write(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   readline(4? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
   readline(1? from -1) -> (1?) Accept-Encoding* (glob)
   read limit reached; closing socket
@@ -159,9 +159,9 @@
   write(23) -> Server: badhttpserver\r\n
   write(37) -> Date: $HTTP_DATE$\r\n
   write(41) -> Content-Type: application/mercurial-0.1\r\n
-  write(21) -> Content-Length: 436\r\n
+  write(21) -> Content-Length: 450\r\n
   write(2) -> \r\n
-  write(436) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  write(450) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   readline(13? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
   readline(1?? from -1) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(8? from -1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -216,9 +216,9 @@
   write(23) -> Server: badhttpserver\r\n
   write(37) -> Date: $HTTP_DATE$\r\n
   write(41) -> Content-Type: application/mercurial-0.1\r\n
-  write(21) -> Content-Length: 449\r\n
+  write(21) -> Content-Length: 463\r\n
   write(2) -> \r\n
-  write(449) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  write(463) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   readline(1?? from 65537) -> (27) POST /?cmd=batch HTTP/1.1\r\n (glob)
   readline(1?? from -1) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(1?? from -1) -> (41) content-type: application/mercurial-0.1\r\n (glob)
@@ -275,7 +275,7 @@
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
-  abort: HTTP request error (incomplete response; expected 416 bytes got 20)
+  abort: HTTP request error (incomplete response; expected 450 bytes got 20)
   (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
   [255]
 
@@ -292,16 +292,16 @@
   write(23 from 23) -> (121) Server: badhttpserver\r\n
   write(37 from 37) -> (84) Date: $HTTP_DATE$\r\n
   write(41 from 41) -> (43) Content-Type: application/mercurial-0.1\r\n
-  write(21 from 21) -> (22) Content-Length: 436\r\n
+  write(21 from 21) -> (22) Content-Length: 450\r\n
   write(2 from 2) -> (20) \r\n
-  write(20 from 436) -> (0) batch branchmap bund
+  write(20 from 450) -> (0) batch branchmap bund
   write limit reached; closing socket
 
   $ rm -f error.log
 
 Server sends incomplete headers for batch request
 
-  $ hg serve --config badserver.closeaftersendbytes=714 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=728 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
 TODO this output is horrible
@@ -323,13 +323,13 @@
   readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
   readline(-1) -> (2) \r\n
-  write(36 from 36) -> (678) HTTP/1.1 200 Script output follows\r\n
-  write(23 from 23) -> (655) Server: badhttpserver\r\n
-  write(37 from 37) -> (618) Date: $HTTP_DATE$\r\n
-  write(41 from 41) -> (577) Content-Type: application/mercurial-0.1\r\n
-  write(21 from 21) -> (556) Content-Length: 436\r\n
-  write(2 from 2) -> (554) \r\n
-  write(436 from 436) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  write(36 from 36) -> (692) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (669) Server: badhttpserver\r\n
+  write(37 from 37) -> (632) Date: $HTTP_DATE$\r\n
+  write(41 from 41) -> (591) Content-Type: application/mercurial-0.1\r\n
+  write(21 from 21) -> (570) Content-Length: 450\r\n
+  write(2 from 2) -> (568) \r\n
+  write(450 from 450) -> (118) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(-1) -> (27) Accept-Encoding: identity\r\n
   readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
@@ -350,7 +350,7 @@
 
 Server sends an incomplete HTTP response body to batch request
 
-  $ hg serve --config badserver.closeaftersendbytes=779 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=793 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
 TODO client spews a stack due to uncaught ValueError in batch.results()
@@ -371,13 +371,13 @@
   readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
   readline(-1) -> (2) \r\n
-  write(36 from 36) -> (743) HTTP/1.1 200 Script output follows\r\n
-  write(23 from 23) -> (720) Server: badhttpserver\r\n
-  write(37 from 37) -> (683) Date: $HTTP_DATE$\r\n
-  write(41 from 41) -> (642) Content-Type: application/mercurial-0.1\r\n
-  write(21 from 21) -> (621) Content-Length: 436\r\n
-  write(2 from 2) -> (619) \r\n
-  write(436 from 436) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  write(36 from 36) -> (757) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (734) Server: badhttpserver\r\n
+  write(37 from 37) -> (697) Date: $HTTP_DATE$\r\n
+  write(41 from 41) -> (656) Content-Type: application/mercurial-0.1\r\n
+  write(21 from 21) -> (635) Content-Length: 450\r\n
+  write(2 from 2) -> (633) \r\n
+  write(450 from 450) -> (183) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(-1) -> (27) Accept-Encoding: identity\r\n
   readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
@@ -400,7 +400,7 @@
 
 Server sends incomplete headers for getbundle response
 
-  $ hg serve --config badserver.closeaftersendbytes=926 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=940 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
 TODO this output is terrible
@@ -423,13 +423,13 @@
   readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
   readline(-1) -> (2) \r\n
-  write(36 from 36) -> (890) HTTP/1.1 200 Script output follows\r\n
-  write(23 from 23) -> (867) Server: badhttpserver\r\n
-  write(37 from 37) -> (830) Date: $HTTP_DATE$\r\n
-  write(41 from 41) -> (789) Content-Type: application/mercurial-0.1\r\n
-  write(21 from 21) -> (768) Content-Length: 436\r\n
-  write(2 from 2) -> (766) \r\n
-  write(436 from 436) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  write(36 from 36) -> (904) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (881) Server: badhttpserver\r\n
+  write(37 from 37) -> (844) Date: $HTTP_DATE$\r\n
+  write(41 from 41) -> (803) Content-Type: application/mercurial-0.1\r\n
+  write(21 from 21) -> (782) Content-Length: 450\r\n
+  write(2 from 2) -> (780) \r\n
+  write(450 from 450) -> (330) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(-1) -> (27) Accept-Encoding: identity\r\n
   readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
@@ -464,9 +464,29 @@
 
   $ rm -f error.log
 
+Server stops before it sends transfer encoding
+
+  $ hg serve --config badserver.closeaftersendbytes=973 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  abort: stream ended unexpectedly (got 0 bytes, expected 1)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ tail -4 error.log
+  write(41 from 41) -> (25) Content-Type: application/mercurial-0.2\r\n
+  write(25 from 28) -> (0) Transfer-Encoding: chunke
+  write limit reached; closing socket
+  write(36) -> HTTP/1.1 500 Internal Server Error\r\n
+
+  $ rm -f error.log
+
 Server sends empty HTTP body for getbundle
 
-  $ hg serve --config badserver.closeaftersendbytes=964 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=978 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -484,13 +504,13 @@
   readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
   readline(-1) -> (2) \r\n
-  write(36 from 36) -> (928) HTTP/1.1 200 Script output follows\r\n
-  write(23 from 23) -> (905) Server: badhttpserver\r\n
-  write(37 from 37) -> (868) Date: $HTTP_DATE$\r\n
-  write(41 from 41) -> (827) Content-Type: application/mercurial-0.1\r\n
-  write(21 from 21) -> (806) Content-Length: 436\r\n
-  write(2 from 2) -> (804) \r\n
-  write(436 from 436) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  write(36 from 36) -> (942) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (919) Server: badhttpserver\r\n
+  write(37 from 37) -> (882) Date: $HTTP_DATE$\r\n
+  write(41 from 41) -> (841) Content-Type: application/mercurial-0.1\r\n
+  write(21 from 21) -> (820) Content-Length: 450\r\n
+  write(2 from 2) -> (818) \r\n
+  write(450 from 450) -> (368) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(-1) -> (27) Accept-Encoding: identity\r\n
   readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
@@ -529,7 +549,7 @@
 
 Server sends partial compression string
 
-  $ hg serve --config badserver.closeaftersendbytes=988 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1002 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -547,13 +567,13 @@
   readline(-1) -> (2?) host: localhost:$HGPORT\r\n (glob)
   readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
   readline(-1) -> (2) \r\n
-  write(36 from 36) -> (952) HTTP/1.1 200 Script output follows\r\n
-  write(23 from 23) -> (929) Server: badhttpserver\r\n
-  write(37 from 37) -> (892) Date: $HTTP_DATE$\r\n
-  write(41 from 41) -> (851) Content-Type: application/mercurial-0.1\r\n
-  write(21 from 21) -> (830) Content-Length: 436\r\n
-  write(2 from 2) -> (828) \r\n
-  write(436 from 436) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  write(36 from 36) -> (966) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (943) Server: badhttpserver\r\n
+  write(37 from 37) -> (906) Date: $HTTP_DATE$\r\n
+  write(41 from 41) -> (865) Content-Type: application/mercurial-0.1\r\n
+  write(21 from 21) -> (844) Content-Length: 450\r\n
+  write(2 from 2) -> (842) \r\n
+  write(450 from 450) -> (392) batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(-1) -> (27) Accept-Encoding: identity\r\n
   readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
@@ -595,12 +615,12 @@
 
 Server sends partial bundle2 header magic
 
-  $ hg serve --config badserver.closeaftersendbytes=985 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=999 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
   requesting all changes
-  abort: HTTP request error (incomplete response; expected 1 bytes got 3)
+  abort: HTTP request error (incomplete response; expected 4 bytes got 3)
   (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
   [255]
 
@@ -619,12 +639,12 @@
 
 Server sends incomplete bundle2 stream params length
 
-  $ hg serve --config badserver.closeaftersendbytes=994 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1008 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
   requesting all changes
-  abort: HTTP request error (incomplete response; expected 1 bytes got 3)
+  abort: HTTP request error (incomplete response; expected 4 bytes got 3)
   (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
   [255]
 
@@ -644,7 +664,7 @@
 
 Servers stops after bundle2 stream params header
 
-  $ hg serve --config badserver.closeaftersendbytes=997 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1011 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -669,7 +689,7 @@
 
 Server stops sending after bundle2 part header length
 
-  $ hg serve --config badserver.closeaftersendbytes=1006 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1020 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -695,7 +715,7 @@
 
 Server stops sending after bundle2 part header
 
-  $ hg serve --config badserver.closeaftersendbytes=1053 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1067 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -725,7 +745,7 @@
 
 Server stops after bundle2 part payload chunk size
 
-  $ hg serve --config badserver.closeaftersendbytes=1074 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1088 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -733,7 +753,7 @@
   adding changesets
   transaction abort!
   rollback completed
-  abort: HTTP request error (incomplete response; expected 459 bytes got 7)
+  abort: HTTP request error (incomplete response; expected 466 bytes got 7)
   (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
   [255]
 
@@ -756,7 +776,7 @@
 
 Server stops sending in middle of bundle2 payload chunk
 
-  $ hg serve --config badserver.closeaftersendbytes=1535 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1549 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -788,7 +808,7 @@
 
 Server stops sending after 0 length payload chunk size
 
-  $ hg serve --config badserver.closeaftersendbytes=1566 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1580 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -799,7 +819,7 @@
   added 1 changesets with 1 changes to 1 files
   transaction abort!
   rollback completed
-  abort: HTTP request error (incomplete response; expected 23 bytes got 9)
+  abort: HTTP request error (incomplete response; expected 32 bytes got 9)
   (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
   [255]
 
@@ -825,8 +845,8 @@
 Server stops sending after 0 part bundle part header (indicating end of bundle2 payload)
 This is before the 0 size chunked transfer part that signals end of HTTP response.
 
-#  $ hg serve --config badserver.closeaftersendbytes=1741 -p $HGPORT -d --pid-file=hg.pid -E error.log
-  $ hg serve --config badserver.closeaftersendbytes=1848 -p $HGPORT -d --pid-file=hg.pid -E error.log
+#  $ hg serve --config badserver.closeaftersendbytes=1755 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1862 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
@@ -870,7 +890,7 @@
 
 Server sends a size 0 chunked-transfer size without terminating \r\n
 
-  $ hg serve --config badserver.closeaftersendbytes=1851 -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ hg serve --config badserver.closeaftersendbytes=1865 -p $HGPORT -d --pid-file=hg.pid -E error.log
   $ cat hg.pid > $DAEMON_PIDS
 
   $ hg clone http://localhost:$HGPORT/ clone
--- a/tests/test-http-bundle1.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-http-bundle1.t	Mon Oct 22 14:46:06 2018 -0400
@@ -49,7 +49,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 1 changesets, 4 total revisions
+  checked 1 changesets with 4 changes to 4 files
 #endif
 
 try to clone via stream, should use pull instead
@@ -101,7 +101,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 1 changesets, 4 total revisions
+  checked 1 changesets with 4 changes to 4 files
   $ cd test
   $ echo bar > bar
   $ hg commit -A -d '1 0' -m 2
--- a/tests/test-http-clone-r.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-http-clone-r.t	Mon Oct 22 14:46:06 2018 -0400
@@ -9,7 +9,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  new changesets bfaf4b5cbf01:916f1afdef90
+  new changesets bfaf4b5cbf01:916f1afdef90 (9 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up tip
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -40,7 +40,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -52,7 +52,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -64,7 +64,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -76,7 +76,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 4 changesets, 4 total revisions
+  checked 4 changesets with 4 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -88,7 +88,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -100,7 +100,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -112,7 +112,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 4 changesets, 5 total revisions
+  checked 4 changesets with 5 changes to 2 files
   adding changesets
   adding manifests
   adding file changes
@@ -124,7 +124,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 5 changesets, 6 total revisions
+  checked 5 changesets with 6 changes to 3 files
   adding changesets
   adding manifests
   adding file changes
@@ -136,7 +136,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 5 changesets, 5 total revisions
+  checked 5 changesets with 5 changes to 2 files
   $ cd test-8
   $ hg pull ../test-7
   pulling from ../test-7
@@ -152,7 +152,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 9 changesets, 7 total revisions
+  checked 9 changesets with 7 changes to 4 files
   $ cd ..
   $ cd test-1
   $ hg pull -r 4 http://localhost:$HGPORT/
@@ -169,7 +169,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 2 total revisions
+  checked 3 changesets with 2 changes to 1 files
   $ hg pull http://localhost:$HGPORT/
   pulling from http://localhost:$HGPORT/
   searching for changes
@@ -195,7 +195,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 5 changesets, 3 total revisions
+  checked 5 changesets with 3 changes to 1 files
   $ hg pull http://localhost:$HGPORT/
   pulling from http://localhost:$HGPORT/
   searching for changes
@@ -210,7 +210,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 9 changesets, 7 total revisions
+  checked 9 changesets with 7 changes to 4 files
   $ cd ..
 
 no default destination if url has no path:
--- a/tests/test-http-permissions.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-http-permissions.t	Mon Oct 22 14:46:06 2018 -0400
@@ -13,16 +13,16 @@
   >         return super(testenvhgweb, self).__call__(env, respond)
   > hgweb_mod.hgweb = testenvhgweb
   > 
-  > @wireprotov1server.wireprotocommand('customreadnoperm')
+  > @wireprotov1server.wireprotocommand(b'customreadnoperm')
   > def customread(repo, proto):
   >     return b'read-only command no defined permissions\n'
-  > @wireprotov1server.wireprotocommand('customwritenoperm')
+  > @wireprotov1server.wireprotocommand(b'customwritenoperm')
   > def customwritenoperm(repo, proto):
   >     return b'write command no defined permissions\n'
-  > @wireprotov1server.wireprotocommand('customreadwithperm', permission='pull')
+  > @wireprotov1server.wireprotocommand(b'customreadwithperm', permission=b'pull')
   > def customreadwithperm(repo, proto):
   >     return b'read-only command w/ defined permissions\n'
-  > @wireprotov1server.wireprotocommand('customwritewithperm', permission='push')
+  > @wireprotov1server.wireprotocommand(b'customwritewithperm', permission=b'push')
   > def customwritewithperm(repo, proto):
   >     return b'write command w/ defined permissions\n'
   > EOF
--- a/tests/test-http-protocol.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-http-protocol.t	Mon Oct 22 14:46:06 2018 -0400
@@ -192,7 +192,7 @@
   s>     Content-Type: application/mercurial-0.1\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   sending listkeys command
   s>     GET /?cmd=listkeys HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
@@ -213,7 +213,12 @@
   s>     bookmarks\t\n
   s>     namespaces\t\n
   s>     phases\t
-  response: {b'bookmarks': b'', b'namespaces': b'', b'phases': b''}
+  response: {
+    b'bookmarks': b'',
+    b'namespaces': b'',
+    b'phases': b''
+  }
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
 
 Same thing, but with "httprequest" command
 
@@ -249,7 +254,7 @@
   s>     Accept-Encoding: identity\r\n
   s>     vary: X-HgProto-1,X-HgUpgrade-1\r\n
   s>     x-hgproto-1: cbor\r\n
-  s>     x-hgupgrade-1: exp-http-v2-0001\r\n
+  s>     x-hgupgrade-1: exp-http-v2-0003\r\n
   s>     accept: application/mercurial-0.1\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
   s>     user-agent: Mercurial debugwireproto\r\n
@@ -261,7 +266,7 @@
   s>     Content-Type: application/mercurial-0.1\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   sending heads command
   s>     GET /?cmd=heads HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
@@ -279,7 +284,10 @@
   s>     Content-Length: 41\r\n
   s>     \r\n
   s>     0000000000000000000000000000000000000000\n
-  response: [b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00']
+  response: [
+    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+  ]
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
 
   $ killdaemons.py
   $ enablehttpv2 empty
@@ -288,14 +296,14 @@
 
 Client with HTTPv2 enabled automatically upgrades if the server supports it
 
-  $ hg --config experimental.httppeer.advertise-v2=true --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
+  $ hg --config experimental.httppeer.advertise-v2=true --config experimental.httppeer.v2-encoder-order=identity --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
   > command heads
   > EOF
   s>     GET /?cmd=capabilities HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     vary: X-HgProto-1,X-HgUpgrade-1\r\n
   s>     x-hgproto-1: cbor\r\n
-  s>     x-hgupgrade-1: exp-http-v2-0001\r\n
+  s>     x-hgupgrade-1: exp-http-v2-0003\r\n
   s>     accept: application/mercurial-0.1\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
   s>     user-agent: Mercurial debugwireproto\r\n
@@ -307,32 +315,49 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa7Eheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyCnewCnewColdColdInamespaceBnsKpermissions\x81DpushHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullKcompression\x81\xa1DnameDzlibNrawrepoformats\x82LgeneraldeltaHrevlogv1Qframingmediatypes\x81X&application/mercurial-exp-framing-0005GapibaseDapi/Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   sending heads command
-  s>     POST /api/exp-http-v2-0001/ro/heads HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 20\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
+  s>     content-length: 56\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
   s>     user-agent: Mercurial debugwireproto\r\n
   s>     \r\n
-  s>     \x0c\x00\x00\x01\x00\x01\x01\x11\xa1DnameEheads
+  s>     \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x0c\x00\x00\x01\x00\x01\x00\x11\xa1DnameEheads
   s> makefile('rb', None)
   s>     HTTP/1.1 200 OK\r\n
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
   s>     Transfer-Encoding: chunked\r\n
   s>     \r\n
-  s>     29\r\n
-  s>     !\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBok\x81T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92
+  s>     Hidentity
+  s>     \r\n
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041
+  s>     \xa1FstatusBok
   s>     \r\n
-  received frame(size=33; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     1e\r\n
+  s>     \x16\x00\x00\x01\x00\x02\x041
+  s>     \x81T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
+  s>     \r\n
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
+  s>     \r\n
   s>     0\r\n
   s>     \r\n
-  response: [b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00']
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  response: [
+    b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+  ]
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
 
   $ killdaemons.py
 
@@ -407,9 +432,9 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-0.1\r\n
-  s>     Content-Length: 453\r\n
+  s>     Content-Length: 467\r\n
   s>     \r\n
-  s>     batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
 Test with the HTTP peer
 
@@ -442,10 +467,10 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-0.1\r\n
-  s>     Content-Length: 453\r\n
+  s>     Content-Length: 467\r\n
   s>     \r\n
   real URL is http://$LOCALIP:$HGPORT/redirected (glob)
-  s>     batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   sending heads command
   s>     GET /redirected?cmd=heads HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
@@ -463,7 +488,10 @@
   s>     Content-Length: 41\r\n
   s>     \r\n
   s>     96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n
-  response: [b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL']
+  response: [
+    b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL'
+  ]
+  (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
 
   $ killdaemons.py
 
@@ -704,10 +732,10 @@
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
   s>     Content-Type: application/mercurial-0.1\r\n
-  s>     Content-Length: 453\r\n
+  s>     Content-Length: 467\r\n
   s>     \r\n
   real URL is http://$LOCALIP:$HGPORT/redirected (glob)
-  s>     batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   sending heads command
   s>     GET /redirected?cmd=heads HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
@@ -725,4 +753,7 @@
   s>     Content-Length: 41\r\n
   s>     \r\n
   s>     96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n
-  response: [b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL']
+  response: [
+    b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL'
+  ]
+  (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
--- a/tests/test-http-proxy.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-http-proxy.t	Mon Oct 22 14:46:06 2018 -0400
@@ -16,11 +16,9 @@
 
   $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone --stream http://localhost:$HGPORT/ b
   streaming all changes
-  3 files to transfer, 303 bytes of data (reporevlogstore !)
+  6 files to transfer, 412 bytes of data (reporevlogstore !)
   4 files to transfer, 330 bytes of data (reposimplestore !)
   transferred * bytes in * seconds (*/sec) (glob)
-  searching for changes
-  no changes found
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd b
@@ -29,7 +27,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   $ cd ..
 
 url for proxy, pull
@@ -49,7 +47,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   $ cd ..
 
 host:port for proxy
@@ -108,10 +106,8 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cat proxy.log
   * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
-  $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=branchmap HTTP/1.1" - - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=stream_out HTTP/1.1" - - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D83180e7845de420a1bb46896fd5fe05294f8d629 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
-  $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=83180e7845de420a1bb46896fd5fe05294f8d629&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
+  $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=bookmarks&stream=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
   $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
--- a/tests/test-http.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-http.t	Mon Oct 22 14:46:06 2018 -0400
@@ -29,10 +29,8 @@
 #if no-reposimplestore
   $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
   streaming all changes
-  6 files to transfer, 606 bytes of data
+  9 files to transfer, 715 bytes of data
   transferred * bytes in * seconds (*/sec) (glob)
-  searching for changes
-  no changes found
   updating to branch default
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg verify -R copy
@@ -40,7 +38,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 1 changesets, 4 total revisions
+  checked 1 changesets with 4 changes to 4 files
 #endif
 
 try to clone via stream, should use pull instead
@@ -92,7 +90,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 1 changesets, 4 total revisions
+  checked 1 changesets with 4 changes to 4 files
   $ cd test
   $ echo bar > bar
   $ hg commit -A -d '1 0' -m 2
@@ -219,10 +217,8 @@
 #if no-reposimplestore
   $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
   streaming all changes
-  7 files to transfer, 916 bytes of data
-  transferred * bytes in * seconds (*/sec) (glob)
-  searching for changes
-  no changes found
+  10 files to transfer, 1.01 KB of data
+  transferred * KB in * seconds (*/sec) (glob)
   updating to branch default
   5 files updated, 0 files merged, 0 files removed, 0 files unresolved
 #endif
@@ -373,10 +369,8 @@
   "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
   "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
   "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
-  "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
-  "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
-  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
-  "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
+  "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
+  "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&stream=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
   "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
   "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
   "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
@@ -470,13 +464,11 @@
   streaming all changes
   * files to transfer, * of data (glob)
   transferred * in * seconds (*/sec) (glob)
-  searching for changes
-  no changes found
   $ cat error.log
 #endif
 
 ... and also keep partial clones and pulls working
-  $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
+  $ hg clone http://localhost:$HGPORT1 --rev 0 test/partial/clone
   adding changesets
   adding manifests
   adding file changes
@@ -484,7 +476,7 @@
   new changesets 8b6053c928fe
   updating to branch default
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg pull -R test-partial-clone
+  $ hg pull -R test/partial/clone
   pulling from http://localhost:$HGPORT1/
   searching for changes
   adding changesets
@@ -494,6 +486,13 @@
   new changesets 5fed3813f7f5:56f9bc90cce6
   (run 'hg update' to get a working copy)
 
+  $ hg clone -U -r 0 test/partial/clone test/another/clone
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 4 changes to 4 files
+  new changesets 8b6053c928fe
+
 corrupt cookies file should yield a warning
 
   $ cat > $TESTTMP/cookies.txt << EOF
--- a/tests/test-https.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-https.t	Mon Oct 22 14:46:06 2018 -0400
@@ -195,7 +195,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 1 changesets, 4 total revisions
+  checked 1 changesets with 4 changes to 4 files
   $ cd test
   $ echo bar > bar
   $ hg commit -A -d '1 0' -m 2
--- a/tests/test-i18n.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-i18n.t	Mon Oct 22 14:46:06 2018 -0400
@@ -45,8 +45,8 @@
 tool itself by doctest
 
   $ cd "$TESTDIR"/../i18n
-  $ $PYTHON check-translation.py *.po
-  $ $PYTHON check-translation.py --doctest
+  $ "$PYTHON" check-translation.py *.po
+  $ "$PYTHON" check-translation.py --doctest
   $ cd $TESTTMP
 
 #if gettext
--- a/tests/test-identify.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-identify.t	Mon Oct 22 14:46:06 2018 -0400
@@ -49,9 +49,9 @@
     "bookmarks": [],
     "branch": "default",
     "dirty": "",
-    "id": "cb9a9f314b8b",
+    "id": "cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b",
     "node": "ffffffffffffffffffffffffffffffffffffffff",
-    "parents": [{"node": "cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b", "rev": 0}],
+    "parents": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
     "tags": ["tip"]
    }
   ]
@@ -63,7 +63,7 @@
   $ hg id -T '{parents % "{rev} {node|shortest} {desc}\n"}'
   0 cb9a a
   $ hg id -T '{parents}\n'
-  0:cb9a9f314b8b
+  cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
 
 test nested template: '{tags}'/'{node}' constants shouldn't override the
 default keywords, but '{id}' persists because there's no default keyword
@@ -71,7 +71,7 @@
 
   $ hg id -T '{tags}\n'
   tip
-  $ hg id -T '{revset("null:.") % "{rev}:{node|short} {tags} {id}\n"}'
+  $ hg id -T '{revset("null:.") % "{rev}:{node|short} {tags} {id|short}\n"}'
   -1:000000000000  cb9a9f314b8b
   0:cb9a9f314b8b tip cb9a9f314b8b
 
@@ -86,9 +86,9 @@
     "bookmarks": [],
     "branch": "default",
     "dirty": "+",
-    "id": "cb9a9f314b8b+",
+    "id": "cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b+",
     "node": "ffffffffffffffffffffffffffffffffffffffff",
-    "parents": [{"node": "cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b", "rev": 0}],
+    "parents": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
     "tags": ["tip"]
    }
   ]
--- a/tests/test-impexp-branch.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-impexp-branch.t	Mon Oct 22 14:46:06 2018 -0400
@@ -32,12 +32,12 @@
   $ hg export 1 > ../r1.patch
   $ cd ..
 
-  $ if $PYTHON findbranch.py < r0.patch; then
+  $ if "$PYTHON" findbranch.py < r0.patch; then
   >     echo "Export of default branch revision has Branch header" 1>&2
   >     exit 1
   > fi
 
-  $ if $PYTHON findbranch.py < r1.patch; then
+  $ if "$PYTHON" findbranch.py < r1.patch; then
   >     :  # Do nothing
   > else
   >     echo "Export of branch revision is missing Branch header" 1>&2
--- a/tests/test-import-bypass.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-import-bypass.t	Mon Oct 22 14:46:06 2018 -0400
@@ -227,7 +227,7 @@
 (this also tests that editor is not invoked for '--bypass', if the
 commit message is explicitly specified, regardless of '--edit')
 
-  $ $PYTHON -c 'open("a", "wb").write(b"a\r\n")'
+  $ "$PYTHON" -c 'open("a", "wb").write(b"a\r\n")'
   $ hg ci -m makeacrlf
   $ HGEDITOR=cat hg import -m 'should fail because of eol' --edit --bypass ../test.diff
   applying ../test.diff
--- a/tests/test-import-context.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-import-context.t	Mon Oct 22 14:46:06 2018 -0400
@@ -19,17 +19,20 @@
   > EOF
   $ cat > cat.py <<EOF
   > import sys
-  > sys.stdout.write(repr(open(sys.argv[1], 'rb').read()) + '\n')
+  > from mercurial import pycompat
+  > from mercurial.utils import stringutil
+  > pycompat.stdout.write(b'%s\n'
+  >                       % stringutil.pprint(open(sys.argv[1], 'rb').read()))
   > EOF
 
 Initialize the test repository
 
   $ hg init repo
   $ cd repo
-  $ $PYTHON ../writepatterns.py a 0 5A 1B 5C 1D
-  $ $PYTHON ../writepatterns.py b 1 1A 1B
-  $ $PYTHON ../writepatterns.py c 1 5A
-  $ $PYTHON ../writepatterns.py d 1 5A 1B
+  $ "$PYTHON" ../writepatterns.py a 0 5A 1B 5C 1D
+  $ "$PYTHON" ../writepatterns.py b 1 1A 1B
+  $ "$PYTHON" ../writepatterns.py c 1 5A
+  $ "$PYTHON" ../writepatterns.py d 1 5A 1B
   $ hg add
   adding a
   adding b
@@ -114,13 +117,13 @@
 
 What's in a
 
-  $ $PYTHON ../cat.py a
+  $ "$PYTHON" ../cat.py a
   'A\nA\nA\nA\nA\nE\nC\nC\nC\nC\nC\nF\nF\n'
-  $ $PYTHON ../cat.py newnoeol
+  $ "$PYTHON" ../cat.py newnoeol
   'a\nb'
-  $ $PYTHON ../cat.py c
+  $ "$PYTHON" ../cat.py c
   'A\nA\nA\nA\nA\nB\nB\n'
-  $ $PYTHON ../cat.py d
+  $ "$PYTHON" ../cat.py d
   'A\nA\nA\nA\n'
 
   $ cd ..
--- a/tests/test-import-eol.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-import-eol.t	Mon Oct 22 14:46:06 2018 -0400
@@ -29,14 +29,14 @@
 
 Test different --eol values
 
-  $ $PYTHON -c 'open("a", "wb").write(b"a\nbbb\ncc\n\nd\ne")'
+  $ "$PYTHON" -c 'open("a", "wb").write(b"a\nbbb\ncc\n\nd\ne")'
   $ hg ci -Am adda
   adding .hgignore
   adding a
-  $ $PYTHON ../makepatch.py empty:lf eol.diff
-  $ $PYTHON ../makepatch.py empty:crlf eol-empty-crlf.diff
-  $ $PYTHON ../makepatch.py empty:stripped-lf eol-empty-stripped-lf.diff
-  $ $PYTHON ../makepatch.py empty:stripped-crlf eol-empty-stripped-crlf.diff
+  $ "$PYTHON" ../makepatch.py empty:lf eol.diff
+  $ "$PYTHON" ../makepatch.py empty:crlf eol-empty-crlf.diff
+  $ "$PYTHON" ../makepatch.py empty:stripped-lf eol-empty-stripped-lf.diff
+  $ "$PYTHON" ../makepatch.py empty:stripped-crlf eol-empty-stripped-crlf.diff
 
 invalid eol
 
@@ -116,7 +116,7 @@
 
 auto EOL on CRLF file
 
-  $ $PYTHON -c 'open("a", "wb").write(b"a\r\nbbb\r\ncc\r\n\r\nd\r\ne")'
+  $ "$PYTHON" -c 'open("a", "wb").write(b"a\r\nbbb\r\ncc\r\n\r\nd\r\ne")'
   $ hg commit -m 'switch EOLs in a'
   $ hg --traceback --config patch.eol='auto' import eol.diff
   applying eol.diff
@@ -132,11 +132,11 @@
 
 auto EOL on new file or source without any EOL
 
-  $ $PYTHON -c 'open("noeol", "wb").write(b"noeol")'
+  $ "$PYTHON" -c 'open("noeol", "wb").write(b"noeol")'
   $ hg add noeol
   $ hg commit -m 'add noeol'
-  $ $PYTHON -c 'open("noeol", "wb").write(b"noeol\r\nnoeol\n")'
-  $ $PYTHON -c 'open("neweol", "wb").write(b"neweol\nneweol\r\n")'
+  $ "$PYTHON" -c 'open("noeol", "wb").write(b"noeol\r\nnoeol\n")'
+  $ "$PYTHON" -c 'open("neweol", "wb").write(b"neweol\nneweol\r\n")'
   $ hg add neweol
   $ hg diff --git > noeol.diff
   $ hg revert --no-backup noeol neweol
@@ -154,10 +154,10 @@
 
 Test --eol and binary patches
 
-  $ $PYTHON -c 'open("b", "wb").write(b"a\x00\nb\r\nd")'
+  $ "$PYTHON" -c 'open("b", "wb").write(b"a\x00\nb\r\nd")'
   $ hg ci -Am addb
   adding b
-  $ $PYTHON -c 'open("b", "wb").write(b"a\x00\nc\r\nd")'
+  $ "$PYTHON" -c 'open("b", "wb").write(b"a\x00\nc\r\nd")'
   $ hg diff --git > bin.diff
   $ hg revert --no-backup b
 
--- a/tests/test-import-git.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-import-git.t	Mon Oct 22 14:46:06 2018 -0400
@@ -615,8 +615,8 @@
 Prefix with strip, renames, creates etc
 
   $ hg revert -aC
+  forgetting b
   undeleting a
-  forgetting b
   $ rm b
   $ mkdir -p dir/dir2
   $ echo b > dir/dir2/b
@@ -715,10 +715,10 @@
 
   $ hg revert -aC
   forgetting dir/a
+  forgetting dir/dir2/b2
+  reverting dir/dir2/c
   undeleting dir/d
   undeleting dir/dir2/b
-  forgetting dir/dir2/b2
-  reverting dir/dir2/c
   $ rm dir/a dir/dir2/b2
   $ hg import --similarity 90 --no-commit - <<EOF
   > diff --git a/a b/b
--- a/tests/test-import-merge.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-import-merge.t	Mon Oct 22 14:46:06 2018 -0400
@@ -164,4 +164,4 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
--- a/tests/test-import.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-import.t	Mon Oct 22 14:46:06 2018 -0400
@@ -285,7 +285,8 @@
   $ rm -r b
 
   $ cat > mkmsg.py <<EOF
-  > import email.message, sys
+  > import email.message
+  > import sys
   > msg = email.message.Message()
   > patch = open(sys.argv[1], 'rb').read()
   > msg.set_payload(b'email commit message\n' + patch)
@@ -305,7 +306,7 @@
   new changesets 80971e65b431
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ $PYTHON mkmsg.py diffed-tip.patch msg.patch
+  $ "$PYTHON" mkmsg.py diffed-tip.patch msg.patch
   $ hg --cwd b import ../msg.patch
   applying ../msg.patch
   $ hg --cwd b tip | grep email
@@ -371,7 +372,7 @@
   new changesets 80971e65b431
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ $PYTHON mkmsg.py exported-tip.patch msg.patch
+  $ "$PYTHON" mkmsg.py exported-tip.patch msg.patch
   $ cat msg.patch | hg --cwd b import -
   applying patch from stdin
   $ hg --cwd b tip | grep second
@@ -383,7 +384,8 @@
 The '---' tests the gitsendmail handling without proper mail headers
 
   $ cat > mkmsg2.py <<EOF
-  > import email.message, sys
+  > import email.message
+  > import sys
   > msg = email.message.Message()
   > patch = open(sys.argv[1], 'rb').read()
   > msg.set_payload(b'email patch\n\nnext line\n---\n' + patch)
@@ -403,7 +405,7 @@
   new changesets 80971e65b431
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ $PYTHON mkmsg2.py diffed-tip.patch msg.patch
+  $ "$PYTHON" mkmsg2.py diffed-tip.patch msg.patch
   $ cat msg.patch | hg --cwd b import -
   applying patch from stdin
   $ hg --cwd b tip --template '{desc}\n'
@@ -865,7 +867,7 @@
   $ hg init binaryremoval
   $ cd binaryremoval
   $ echo a > a
-  $ $PYTHON -c "open('b', 'wb').write(b'a\x00b')"
+  $ "$PYTHON" -c "open('b', 'wb').write(b'a\x00b')"
   $ hg ci -Am addall
   adding a
   adding b
@@ -1014,8 +1016,8 @@
     a
   R a
   $ hg revert -a
+  forgetting b
   undeleting a
-  forgetting b
   $ cat b
   mod b
   $ rm b
@@ -1871,8 +1873,8 @@
 ===========================
 
   $ cat > $TESTTMP/parseextra.py <<EOF
+  > import mercurial.cmdutil
   > import mercurial.patch
-  > import mercurial.cmdutil
   > 
   > def processfoo(repo, data, extra, opts):
   >     if b'foo' in data:
--- a/tests/test-imports-checker.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-imports-checker.t	Mon Oct 22 14:46:06 2018 -0400
@@ -8,7 +8,7 @@
 it's working correctly.
   $ TERM=dumb
   $ export TERM
-  $ $PYTHON -m doctest $import_checker
+  $ "$PYTHON" -m doctest $import_checker
 
 Run additional tests for the import checker
 
@@ -136,7 +136,7 @@
   > from . import errors
   > EOF
 
-  $ $PYTHON "$import_checker" testpackage*/*.py testpackage/subpackage/*.py \
+  $ "$PYTHON" "$import_checker" testpackage*/*.py testpackage/subpackage/*.py \
   >   email/*.py
   testpackage/importalias.py:2: ui module must be "as" aliased to uimod
   testpackage/importfromalias.py:2: ui from testpackage must be "as" aliased to uimod
--- a/tests/test-incoming-outgoing.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-incoming-outgoing.t	Mon Oct 22 14:46:06 2018 -0400
@@ -12,7 +12,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 9 changesets, 9 total revisions
+  checked 9 changesets with 9 changes to 1 files
   $ hg serve -p $HGPORT -d --pid-file=hg.pid
   $ cat hg.pid >> $DAEMON_PIDS
   $ cd ..
@@ -329,14 +329,14 @@
   adding manifests
   adding file changes
   added 9 changesets with 9 changes to 1 files
-  new changesets 00a43fa82f62:e4feb4ac9035
+  new changesets 00a43fa82f62:e4feb4ac9035 (9 drafts)
   (run 'hg update' to get a working copy)
   $ hg -R temp2 unbundle test2.hg
   adding changesets
   adding manifests
   adding file changes
   added 9 changesets with 9 changes to 1 files
-  new changesets 00a43fa82f62:e4feb4ac9035
+  new changesets 00a43fa82f62:e4feb4ac9035 (9 drafts)
   (run 'hg update' to get a working copy)
   $ hg -R temp tip
   changeset:   8:e4feb4ac9035
@@ -370,7 +370,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 14 changesets, 14 total revisions
+  checked 14 changesets with 14 changes to 1 files
   $ cd ..
   $ hg -R test-dev outgoing test
   comparing with test
--- a/tests/test-infinitepush-bundlestore.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-infinitepush-bundlestore.t	Mon Oct 22 14:46:06 2018 -0400
@@ -77,7 +77,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 20759b6926ce
+  new changesets 20759b6926ce (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg log -G
   o  changeset:   1:20759b6926ce
@@ -170,7 +170,7 @@
   adding file changes
   added 1 changesets with 1 changes to 2 files
   adding remote bookmark newbook
-  new changesets 1de1d7d92f89
+  new changesets 1de1d7d92f89 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg log -G -T '{desc} {phase} {bookmarks}'
   o  new scratch commit draft scratch/mybranch
--- a/tests/test-infinitepush-ci.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-infinitepush-ci.t	Mon Oct 22 14:46:06 2018 -0400
@@ -321,7 +321,7 @@
   adding manifests
   adding file changes
   added 5 changesets with 5 changes to 5 files
-  new changesets eaba929e866c:9b42578d4447
+  new changesets eaba929e866c:9b42578d4447 (1 drafts)
   (run 'hg update' to get a working copy)
 
   $ hg glog
@@ -423,7 +423,7 @@
   added 1 changesets with 0 changes to 1 files (+1 heads)
   1 new obsolescence markers
   obsoleted 1 changesets
-  new changesets 99949238d9ac
+  new changesets 99949238d9ac (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
   $ hg glog
--- a/tests/test-infinitepush.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-infinitepush.t	Mon Oct 22 14:46:06 2018 -0400
@@ -112,7 +112,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 33910bfe6ffe
+  new changesets 33910bfe6ffe (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg log -G -T '{desc} {phase} {bookmarks}'
   o  testpullbycommithash1 draft
@@ -163,7 +163,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets a79b6597f322:c70aee6da07d
+  new changesets a79b6597f322:c70aee6da07d (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg log -r scratch/scratchontopofpublic -T '{phase}'
   draft (no-eol)
--- a/tests/test-inherit-mode.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-inherit-mode.t	Mon Oct 22 14:46:06 2018 -0400
@@ -48,7 +48,7 @@
 store can be written by the group, other files cannot
 store is setgid
 
-  $ $PYTHON ../printmodes.py .
+  $ "$PYTHON" ../printmodes.py .
   00700 ./.hg/
   00600 ./.hg/00changelog.i
   00600 ./.hg/requires
@@ -64,11 +64,12 @@
 (in particular, store/**, dirstate, branch cache file, undo files)
 new directories are setgid
 
-  $ $PYTHON ../printmodes.py .
+  $ "$PYTHON" ../printmodes.py .
   00700 ./.hg/
   00600 ./.hg/00changelog.i
   00770 ./.hg/cache/
   00660 ./.hg/cache/branch2-served
+  00660 ./.hg/cache/manifestfulltextcache (reporevlogstore !)
   00660 ./.hg/cache/rbc-names-v1
   00660 ./.hg/cache/rbc-revs-v1
   00660 ./.hg/dirstate
@@ -108,7 +109,7 @@
 before push
 group can write everything
 
-  $ $PYTHON ../printmodes.py ../push
+  $ "$PYTHON" ../printmodes.py ../push
   00770 ../push/.hg/
   00660 ../push/.hg/00changelog.i
   00660 ../push/.hg/requires
@@ -120,7 +121,7 @@
 after push
 group can still write everything
 
-  $ $PYTHON ../printmodes.py ../push
+  $ "$PYTHON" ../printmodes.py ../push
   00770 ../push/.hg/
   00660 ../push/.hg/00changelog.i
   00770 ../push/.hg/cache/
@@ -162,8 +163,8 @@
   $ mkdir dir
   $ touch dir/file
   $ hg ci -qAm 'add dir/file'
-  $ storemode=`$PYTHON ../mode.py .hg/store`
-  $ dirmode=`$PYTHON ../mode.py .hg/store/data/dir`
+  $ storemode=`"$PYTHON" ../mode.py .hg/store`
+  $ dirmode=`"$PYTHON" ../mode.py .hg/store/data/dir`
   $ if [ "$storemode" != "$dirmode" ]; then
   >  echo "$storemode != $dirmode"
   > fi
--- a/tests/test-install.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-install.t	Mon Oct 22 14:46:06 2018 -0400
@@ -153,7 +153,9 @@
   $ . "$TESTDIR/helpers-testrepo.sh"
 
   $ cat >> wixxml.py << EOF
-  > import os, subprocess, sys
+  > import os
+  > import subprocess
+  > import sys
   > import xml.etree.ElementTree as ET
   > 
   > # MSYS mangles the path if it expands $TESTDIR
@@ -205,7 +207,7 @@
   >     print('  %s' % f)
   > EOF
 
-  $ ( testrepohgenv; $PYTHON wixxml.py help )
+  $ ( testrepohgenv; "$PYTHON" wixxml.py help )
   Not installed:
     help/common.txt
     help/hg-ssh.8.txt
@@ -214,7 +216,7 @@
     help/hgrc.5.txt
   Not tracked:
 
-  $ ( testrepohgenv; $PYTHON wixxml.py templates )
+  $ ( testrepohgenv; "$PYTHON" wixxml.py templates )
   Not installed:
   Not tracked:
 
@@ -231,7 +233,7 @@
 ancient virtualenv from their linux distro or similar and it's not yet
 the default for them.
   $ unset PYTHONPATH
-  $ $PYTHON -m virtualenv --no-site-packages --never-download installenv >> pip.log
+  $ "$PYTHON" -m virtualenv --no-site-packages --never-download installenv >> pip.log
 Note: we use this weird path to run pip and hg to avoid platform differences,
 since it's bin on most platforms but Scripts on Windows.
   $ ./installenv/*/pip install --no-index $TESTDIR/.. >> pip.log
--- a/tests/test-issue1175.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-issue1175.t	Mon Oct 22 14:46:06 2018 -0400
@@ -41,7 +41,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 6 changesets, 4 total revisions
+  checked 6 changesets with 4 changes to 4 files
 
   $ hg export --git tip
   # HG changeset patch
--- a/tests/test-issue4074.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-issue4074.t	Mon Oct 22 14:46:06 2018 -0400
@@ -16,12 +16,12 @@
 
 Check in a big file:
 
-  $ $PYTHON ../s.py > a
+  $ "$PYTHON" ../s.py > a
   $ hg ci -qAm0
 
 Modify it:
 
-  $ $PYTHON ../s.py > a
+  $ "$PYTHON" ../s.py > a
 
 Time a check-in, should never take more than 10 seconds user time:
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-issue5979.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,34 @@
+  $ hg init r1
+  $ cd r1
+  $ hg ci --config ui.allowemptycommit=true -m c0
+  $ hg ci --config ui.allowemptycommit=true -m c1
+  $ hg ci --config ui.allowemptycommit=true -m c2
+  $ hg co -q 0
+  $ hg ci --config ui.allowemptycommit=true -m c3
+  created new head
+  $ hg co -q 3
+  $ hg merge --quiet
+  $ hg ci --config ui.allowemptycommit=true -m c4
+
+  $ hg log -G -T'{desc}'
+  @    c4
+  |\
+  | o  c3
+  | |
+  o |  c2
+  | |
+  o |  c1
+  |/
+  o  c0
+  
+
+  >>> from mercurial import hg
+  >>> from mercurial import ui as uimod
+  >>> repo = hg.repository(uimod.ui())
+  >>> for anc in repo.changelog.ancestors([4], inclusive=True):
+  ...   print(anc)
+  4
+  3
+  2
+  1
+  0
--- a/tests/test-issue660.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-issue660.t	Mon Oct 22 14:46:06 2018 -0400
@@ -66,9 +66,9 @@
 Revert all - should succeed:
 
   $ hg revert --all
-  undeleting a
   forgetting a/a
   forgetting b
+  undeleting a
   undeleting b/b
 
   $ hg st
--- a/tests/test-journal-exists.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-journal-exists.t	Mon Oct 22 14:46:06 2018 -0400
@@ -19,7 +19,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
 
 Check that zero-size journals are correctly aborted:
 
--- a/tests/test-journal.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-journal.t	Mon Oct 22 14:46:06 2018 -0400
@@ -3,7 +3,7 @@
   $ cat >> testmocks.py << EOF
   > # mock out procutil.getuser() and util.makedate() to supply testable values
   > import os
-  > from mercurial import util, pycompat
+  > from mercurial import pycompat, util
   > from mercurial.utils import dateutil, procutil
   > def mockgetuser():
   >     return b'foobar'
@@ -149,44 +149,44 @@
     "command": "up",
     "date": [5, 0],
     "name": ".",
-    "newhashes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
-    "oldhashes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
+    "newnodes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
+    "oldnodes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
     "user": "foobar"
    },
    {
     "command": "up 0",
     "date": [2, 0],
     "name": ".",
-    "newhashes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
-    "oldhashes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
+    "newnodes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
+    "oldnodes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
     "user": "foobar"
    },
    {
     "command": "commit -Aqm b",
     "date": [1, 0],
     "name": ".",
-    "newhashes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
-    "oldhashes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
+    "newnodes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
+    "oldnodes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
     "user": "foobar"
    },
    {
     "command": "commit -Aqm a",
     "date": [0, 0],
     "name": ".",
-    "newhashes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
-    "oldhashes": ["0000000000000000000000000000000000000000"],
+    "newnodes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
+    "oldnodes": ["0000000000000000000000000000000000000000"],
     "user": "foobar"
    }
   ]
 
   $ cat <<EOF >> $HGRCPATH
   > [templates]
-  > j = "{oldhashes % '{node|upper}'} -> {newhashes % '{node|upper}'}
+  > j = "{oldnodes % '{node|upper}'} -> {newnodes % '{node|upper}'}
   >      - user: {user}
   >      - command: {command}
   >      - date: {date|rfc3339date}
-  >      - newhashes: {newhashes}
-  >      - oldhashes: {oldhashes}
+  >      - newnodes: {newnodes}
+  >      - oldnodes: {oldnodes}
   >      "
   > EOF
   $ hg journal -Tj -l1
@@ -195,8 +195,8 @@
   - user: foobar
   - command: up
   - date: 1970-01-01T00:00:05+00:00
-  - newhashes: 1e6c11564562b4ed919baca798bc4338bd299d6a
-  - oldhashes: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
+  - newnodes: 1e6c11564562b4ed919baca798bc4338bd299d6a
+  - oldnodes: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
 
   $ hg journal --commit
   previous locations of '.':
@@ -231,6 +231,62 @@
   summary:     a
   
 
+  $ hg journal --commit -Tjson
+  [
+   {
+    "changesets": [{"bookmarks": ["bar", "baz"], "branch": "default", "date": [0, 0], "desc": "b", "node": "1e6c11564562b4ed919baca798bc4338bd299d6a", "parents": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"], "phase": "draft", "rev": 1, "tags": ["tip"], "user": "test"}],
+    "command": "up",
+    "date": [5, 0],
+    "name": ".",
+    "newnodes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
+    "oldnodes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
+    "user": "foobar"
+   },
+   {
+    "changesets": [{"bookmarks": [], "branch": "default", "date": [0, 0], "desc": "a", "node": "cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b", "parents": ["0000000000000000000000000000000000000000"], "phase": "draft", "rev": 0, "tags": [], "user": "test"}],
+    "command": "up 0",
+    "date": [2, 0],
+    "name": ".",
+    "newnodes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
+    "oldnodes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
+    "user": "foobar"
+   },
+   {
+    "changesets": [{"bookmarks": ["bar", "baz"], "branch": "default", "date": [0, 0], "desc": "b", "node": "1e6c11564562b4ed919baca798bc4338bd299d6a", "parents": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"], "phase": "draft", "rev": 1, "tags": ["tip"], "user": "test"}],
+    "command": "commit -Aqm b",
+    "date": [1, 0],
+    "name": ".",
+    "newnodes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
+    "oldnodes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
+    "user": "foobar"
+   },
+   {
+    "changesets": [{"bookmarks": [], "branch": "default", "date": [0, 0], "desc": "a", "node": "cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b", "parents": ["0000000000000000000000000000000000000000"], "phase": "draft", "rev": 0, "tags": [], "user": "test"}],
+    "command": "commit -Aqm a",
+    "date": [0, 0],
+    "name": ".",
+    "newnodes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
+    "oldnodes": ["0000000000000000000000000000000000000000"],
+    "user": "foobar"
+   }
+  ]
+
+  $ hg journal --commit \
+  > -T'command: {command}\n{changesets % " rev: {rev}\n children: {children}\n"}'
+  previous locations of '.':
+  command: up
+   rev: 1
+   children: 
+  command: up 0
+   rev: 0
+   children: 1:1e6c11564562
+  command: commit -Aqm b
+   rev: 1
+   children: 
+  command: commit -Aqm a
+   rev: 0
+   children: 1:1e6c11564562
+
 Test for behaviour on unexpected storage version information
 
   $ printf '42\0' > .hg/namejournal
--- a/tests/test-keyword.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-keyword.t	Mon Oct 22 14:46:06 2018 -0400
@@ -125,7 +125,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets a2392c293916
+  new changesets a2392c293916 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up a2392c293916
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -214,10 +214,10 @@
 
 hg status of kw-ignored binary file starting with '\1\n'
 
-  >>> open("i", "wb").write("\1\nfoo")
+  >>> open("i", "wb").write(b"\1\nfoo") and None
   $ hg -q commit -Am metasep i
   $ hg status
-  >>> open("i", "wb").write("\1\nbar")
+  >>> open("i", "wb").write(b"\1\nbar") and None
   $ hg status
   M i
   $ hg -q commit -m "modify metasep" i
@@ -263,7 +263,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 3 changes to 3 files
-  new changesets a2392c293916:ef63ca68695b
+  new changesets a2392c293916:ef63ca68695b (2 drafts)
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
   MIME-Version: 1.0
   Content-Type: text/plain; charset="us-ascii"
@@ -378,8 +378,8 @@
 record chunk
 
   >>> lines = open('a', 'rb').readlines()
-  >>> lines.insert(1, 'foo\n')
-  >>> lines.append('bar\n')
+  >>> lines.insert(1, b'foo\n')
+  >>> lines.append(b'bar\n')
   >>> open('a', 'wb').writelines(lines)
   $ hg record -d '10 1' -m rectest a<<EOF
   > y
@@ -842,7 +842,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 3 changesets, 4 total revisions
+  checked 3 changesets with 4 changes to 3 files
   $ cat a b
   expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
   do not process $Id:
@@ -942,8 +942,8 @@
 Imported patch should not be rejected
 
   >>> import re
-  >>> text = re.sub(r'(Id.*)', r'\1 rejecttest', open('a').read())
-  >>> open('a', 'wb').write(text)
+  >>> text = re.sub(br'(Id.*)', br'\1 rejecttest', open('a', 'rb').read())
+  >>> open('a', 'wb').write(text) and None
   $ hg --debug commit -m'rejects?' -d '3 0' -u 'User Name <user@example.com>'
   committing files:
   a
@@ -1423,9 +1423,9 @@
   ...     # hello block
   ...     readchannel(server)
   ... 
-  ...     runcommand(server, ['cat', 'm'])
-  ...     runcommand(server, ['diff', '-c', '.', 'm'])
-  ...     runcommand(server, ['cat', 'm'])
+  ...     runcommand(server, [b'cat', b'm'])
+  ...     runcommand(server, [b'diff', b'-c', b'.', b'm'])
+  ...     runcommand(server, [b'cat', b'm'])
   *** runcommand cat m
   $Id: m 800511b3a22d Thu, 01 Jan 1970 00:00:00 +0000 test $
   bar
--- a/tests/test-largefiles-misc.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-largefiles-misc.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,6 +1,53 @@
 This file contains testcases that tend to be related to special cases or less
 common commands affecting largefile.
 
+  $ hg init requirements
+  $ cd requirements
+
+# largefiles not loaded by default.
+
+  $ hg config extensions
+  [1]
+
+# Adding largefiles to requires file will auto-load largefiles extension.
+
+  $ echo largefiles >> .hg/requires
+  $ hg config extensions
+  extensions.largefiles=
+
+# But only if there is no config entry for the extension already.
+
+  $ cat > .hg/hgrc << EOF
+  > [extensions]
+  > largefiles=!
+  > EOF
+
+  $ hg config extensions
+  abort: repository requires features unknown to this Mercurial: largefiles!
+  (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
+  [255]
+
+  $ cat > .hg/hgrc << EOF
+  > [extensions]
+  > largefiles=
+  > EOF
+
+  $ hg config extensions
+  extensions.largefiles=
+
+  $ cat > .hg/hgrc << EOF
+  > [extensions]
+  > largefiles = missing.py
+  > EOF
+
+  $ hg config extensions
+  *** failed to import extension largefiles from missing.py: [Errno 2] $ENOENT$: 'missing.py'
+  abort: repository requires features unknown to this Mercurial: largefiles!
+  (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
+  [255]
+
+  $ cd ..
+
 Each sections should be independent of each others.
 
   $ USERCACHE="$TESTTMP/cache"; export USERCACHE
@@ -111,7 +158,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 09a186cfa6da
+  new changesets 09a186cfa6da (1 drafts)
   updating to branch default
   getting changed largefiles
   1 largefiles updated, 0 removed
@@ -220,10 +267,8 @@
   getting changed largefiles
   1 largefiles updated, 0 removed
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ cat ../shared_lfrepo/.hg/hgrc
-  
-  [extensions]
-  largefiles=
+  $ grep largefiles ../shared_lfrepo/.hg/requires
+  largefiles
 
 verify that large files in subrepos handled properly
   $ hg init subrepo
@@ -1095,7 +1140,7 @@
 Move (and then undo) a directory move with only largefiles.
 
   $ cd subrepo-root
-  $ $PYTHON $TESTDIR/list-tree.py .hglf dir* large*
+  $ "$PYTHON" $TESTDIR/list-tree.py .hglf dir* large*
   .hglf/
   .hglf/dir/
   .hglf/dir/subdir/
@@ -1110,7 +1155,7 @@
   $ hg mv dir/subdir dir/subdir2
   moving .hglf/dir/subdir/large.bin to .hglf/dir/subdir2/large.bin
 
-  $ $PYTHON $TESTDIR/list-tree.py .hglf dir* large*
+  $ "$PYTHON" $TESTDIR/list-tree.py .hglf dir* large*
   .hglf/
   .hglf/dir/
   .hglf/dir/subdir2/
@@ -1135,8 +1180,8 @@
   ? large.orig
 
   $ hg revert --all
+  forgetting .hglf/dir/subdir2/large.bin
   undeleting .hglf/dir/subdir/large.bin
-  forgetting .hglf/dir/subdir2/large.bin
   reverting subrepo no-largefiles
 
   $ hg status -C
@@ -1150,7 +1195,7 @@
 
 The standin for subdir2 should be deleted, not just dropped
 
-  $ $PYTHON $TESTDIR/list-tree.py .hglf dir* large*
+  $ "$PYTHON" $TESTDIR/list-tree.py .hglf dir* large*
   .hglf/
   .hglf/dir/
   .hglf/dir/subdir/
@@ -1177,7 +1222,7 @@
   R dir/subdir/large.bin
   ? large.orig
 
-  $ $PYTHON $TESTDIR/list-tree.py .hglf dir* large*
+  $ "$PYTHON" $TESTDIR/list-tree.py .hglf dir* large*
   .hglf/
   .hglf/dir/
   .hglf/dir/subdir2/
@@ -1202,7 +1247,7 @@
     dir/subdir/large.bin
   R dir/subdir/large.bin
 
-  $ $PYTHON $TESTDIR/list-tree.py .hglf dir* large*
+  $ "$PYTHON" $TESTDIR/list-tree.py .hglf dir* large*
   .hglf/
   .hglf/dir2/
   .hglf/dir2/subdir/
@@ -1214,14 +1259,14 @@
   large
 
   $ hg revert --all
+  forgetting .hglf/dir2/subdir/large.bin
   undeleting .hglf/dir/subdir/large.bin
-  forgetting .hglf/dir2/subdir/large.bin
   reverting subrepo no-largefiles
 
   $ hg status -C
   ? dir2/subdir/large.bin
 
-  $ $PYTHON $TESTDIR/list-tree.py .hglf dir* large*
+  $ "$PYTHON" $TESTDIR/list-tree.py .hglf dir* large*
   .hglf/
   .hglf/dir/
   .hglf/dir/subdir/
@@ -1263,7 +1308,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets bf5e395ced2c
+  new changesets bf5e395ced2c (1 drafts)
   nothing to rebase - updating instead
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
--- a/tests/test-largefiles-update.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-largefiles-update.t	Mon Oct 22 14:46:06 2018 -0400
@@ -412,7 +412,7 @@
   $ hg update -q 5
   remote turned local largefile large2 into a normal file
   keep (l)argefile or use (n)ormal file? l
-  $ hg debugdirstate --nodates | grep large2
+  $ hg debugdirstate --no-dates | grep large2
   a   0         -1 unset               .hglf/large2
   r   0          0 set                 large2
   $ hg status -A large2
@@ -428,7 +428,7 @@
   $ hg update -q 5
   remote turned local largefile large3 into a normal file
   keep (l)argefile or use (n)ormal file? l
-  $ hg debugdirstate --nodates | grep large3
+  $ hg debugdirstate --no-dates | grep large3
   a   0         -1 unset               .hglf/large3
   r   0          0 set                 large3
   $ hg status -A large3
@@ -456,7 +456,7 @@
   adding manifests
   adding file changes
   added 3 changesets with 5 changes to 5 files
-  new changesets 9530e27857f7:d65e59e952a9
+  new changesets 9530e27857f7:d65e59e952a9 (3 drafts)
   remote turned local largefile large2 into a normal file
   keep (l)argefile or use (n)ormal file? l
   largefile large1 has a merge conflict
@@ -492,7 +492,7 @@
   adding manifests
   adding file changes
   added 3 changesets with 5 changes to 5 files
-  new changesets 9530e27857f7:d65e59e952a9
+  new changesets 9530e27857f7:d65e59e952a9 (3 drafts)
   remote turned local largefile large2 into a normal file
   keep (l)argefile or use (n)ormal file? l
   largefile large1 has a merge conflict
@@ -611,7 +611,8 @@
   > EOF
   rebasing 1:72518492caa6 "#1"
   rebasing 4:07d6153b5c04 "#4"
-  local [dest] changed .hglf/large1 which other [source] deleted
+  file '.hglf/large1' was deleted in other [source] but was modified in local [dest].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? c
 
   $ hg diff -c "tip~1" --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
--- a/tests/test-largefiles-wireproto.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-largefiles-wireproto.t	Mon Oct 22 14:46:06 2018 -0400
@@ -51,7 +51,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets b6eb3a2e2efe
+  new changesets b6eb3a2e2efe (1 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
@@ -64,7 +64,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets b6eb3a2e2efe
+  new changesets b6eb3a2e2efe (1 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 #endif
@@ -166,7 +166,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 2 files
   searching 1 changesets for largefiles
   verified existence of 1 revisions of 1 largefiles
   $ hg serve --config extensions.largefiles=! -R ../r6 -d -p $HGPORT --pid-file ../hg.pid
@@ -236,14 +236,14 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets cf03e5bb9936
+  new changesets cf03e5bb9936 (1 drafts)
 
 Archive contains largefiles
   >>> import os
   >>> import urllib2
   >>> u = 'http://localhost:%s/archive/default.zip' % os.environ['HGPORT2']
   >>> with open('archive.zip', 'w') as f:
-  ...     f.write(urllib2.urlopen(u).read())
+  ...     f.write(urllib2.urlopen(u).read()) and None
   $ unzip -t archive.zip
   Archive:  archive.zip
       testing: empty-default/.hg_archival.txt*OK (glob)
@@ -259,7 +259,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   searching 1 changesets for largefiles
   changeset 0:cf03e5bb9936: f1 missing
   verified existence of 1 revisions of 1 largefiles
@@ -295,7 +295,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   searching 1 changesets for largefiles
   verified contents of 1 revisions of 1 largefiles
   $ hg -R http-clone up -Cqr null
@@ -352,13 +352,13 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files
-  new changesets 567253b0f523:04d19c27a332
+  new changesets 567253b0f523:04d19c27a332 (2 drafts)
   $ hg -R batchverifyclone verify --large --lfa
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 2 files
   searching 2 changesets for largefiles
   verified existence of 2 revisions of 2 largefiles
   $ tail -1 access.log
@@ -389,14 +389,14 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 6bba8cb6935d
+  new changesets 6bba8cb6935d (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg -R batchverifyclone verify --lfa
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 3 files
   searching 3 changesets for largefiles
   verified existence of 3 revisions of 3 largefiles
   $ tail -1 access.log
@@ -424,12 +424,12 @@
   > import base64
   > from mercurial.hgweb import common
   > def perform_authentication(hgweb, req, op):
-  >     auth = req.headers.get('Authorization')
+  >     auth = req.headers.get(b'Authorization')
   >     if not auth:
-  >         raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
-  >                 [('WWW-Authenticate', 'Basic Realm="mercurial"')])
-  >     if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']:
-  >         raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
+  >         raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, b'who',
+  >                 [(b'WWW-Authenticate', b'Basic Realm="mercurial"')])
+  >     if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user', b'pass']:
+  >         raise common.ErrorResponse(common.HTTP_FORBIDDEN, b'no')
   > def extsetup():
   >     common.permhooks.insert(0, perform_authentication)
   > EOT
@@ -452,7 +452,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 567253b0f523
+  new changesets 567253b0f523 (1 drafts)
   updating to branch default
   getting changed largefiles
   1 largefiles updated, 0 removed
--- a/tests/test-largefiles.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-largefiles.t	Mon Oct 22 14:46:06 2018 -0400
@@ -43,12 +43,12 @@
   $ touch large1 sub/large2
   $ sleep 1
   $ hg st
-  $ hg debugstate --nodates
+  $ hg debugstate --no-dates
   n 644         41 set                 .hglf/large1
   n 644         41 set                 .hglf/sub/large2
   n 644          8 set                 normal1
   n 644          8 set                 sub/normal2
-  $ hg debugstate --large --nodates
+  $ hg debugstate --large --no-dates
   n 644          7 set                 large1
   n 644          7 set                 sub/large2
   $ echo normal11 > normal1
@@ -225,7 +225,7 @@
   $ . "$TESTDIR/cgienv"
 
   $ SCRIPT_NAME='' \
-  > $PYTHON "$TESTTMP/hgweb.cgi" > /dev/null
+  > "$PYTHON" "$TESTTMP/hgweb.cgi" > /dev/null
 
 Test archiving the various revisions.  These hit corner cases known with
 archiving.
@@ -961,7 +961,7 @@
   adding manifests
   adding file changes
   added 4 changesets with 10 changes to 4 files
-  new changesets 30d30fe6a5be:9e8fbc4bce62
+  new changesets 30d30fe6a5be:9e8fbc4bce62 (4 drafts)
   updating to branch default
   getting changed largefiles
   2 largefiles updated, 0 removed
@@ -1031,7 +1031,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  10 files, 8 changesets, 24 total revisions
+  checked 8 changesets with 24 changes to 10 files
   searching 8 changesets for largefiles
   verified contents of 13 revisions of 6 largefiles
   $ hg -R a-clone1 sum
@@ -1090,7 +1090,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 8 changes to 4 files
-  new changesets 30d30fe6a5be:ce8896473775
+  new changesets 30d30fe6a5be:ce8896473775 (2 drafts)
   updating to branch default
   getting changed largefiles
   2 largefiles updated, 0 removed
@@ -1104,7 +1104,7 @@
   adding manifests
   adding file changes
   added 6 changesets with 16 changes to 8 files
-  new changesets 51a0ae4d5864:daea875e9014
+  new changesets 51a0ae4d5864:daea875e9014 (6 drafts)
   (run 'hg update' to get a working copy)
   6 largefiles cached
 
@@ -1132,7 +1132,7 @@
   adding manifests
   adding file changes
   added 6 changesets with 16 changes to 8 files
-  new changesets 51a0ae4d5864:daea875e9014
+  new changesets 51a0ae4d5864:daea875e9014 (6 drafts)
   calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
   (run 'hg update' to get a working copy)
   pulling largefiles for revision 7
@@ -1205,13 +1205,13 @@
   adding manifests
   adding file changes
   added 1 changesets with 2 changes to 2 files (+1 heads)
-  new changesets a381d2c8c80e
+  new changesets a381d2c8c80e (1 drafts)
+  0 largefiles cached
   rebasing 8:f574fb32bb45 "modify normal file largefile in repo d"
   Invoking status precommit hook
   M sub/normal4
   M sub2/large6
   saved backup bundle to $TESTTMP/d/.hg/strip-backup/f574fb32bb45-dd1d9f80-rebase.hg
-  0 largefiles cached
   $ [ -f .hg/largefiles/e166e74c7303192238d60af5a9c4ce9bef0b7928 ]
   $ hg log --template '{rev}:{node|short}  {desc|firstline}\n'
   9:598410d3eb9a  modify normal file largefile in repo d
@@ -1263,7 +1263,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 2 changes to 2 files (+1 heads)
-  new changesets a381d2c8c80e
+  new changesets a381d2c8c80e (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg rebase
   rebasing 8:f574fb32bb45 "modify normal file largefile in repo d"
@@ -1513,9 +1513,9 @@
   $ cat sub/large4
   large4-modified
   $ hg revert -a --no-backup
-  undeleting .hglf/sub2/large6
   forgetting .hglf/sub2/large8
   reverting normal3
+  undeleting .hglf/sub2/large6
   $ hg status
   ? sub/large4.orig
   ? sub/normal4.orig
@@ -1549,7 +1549,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  10 files, 10 changesets, 28 total revisions
+  checked 10 changesets with 28 changes to 10 files
   searching 1 changesets for largefiles
   verified existence of 3 revisions of 3 largefiles
 
@@ -1563,7 +1563,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  10 files, 10 changesets, 28 total revisions
+  checked 10 changesets with 28 changes to 10 files
   searching 1 changesets for largefiles
   changeset 9:598410d3eb9a: sub/large4 references missing $TESTTMP/d/.hg/largefiles/e166e74c7303192238d60af5a9c4ce9bef0b7928
   verified existence of 3 revisions of 3 largefiles
@@ -1669,7 +1669,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 24 changes to 10 files
-  new changesets 30d30fe6a5be:daea875e9014
+  new changesets 30d30fe6a5be:daea875e9014 (8 drafts)
   updating to branch default
   getting changed largefiles
   3 largefiles updated, 0 removed
@@ -1695,7 +1695,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 4 changes to 4 files (+1 heads)
-  new changesets a381d2c8c80e:598410d3eb9a
+  new changesets a381d2c8c80e:598410d3eb9a (2 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   2 largefiles cached
   $ hg merge
@@ -1771,7 +1771,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 26 changes to 10 files
-  new changesets 30d30fe6a5be:a381d2c8c80e
+  new changesets 30d30fe6a5be:a381d2c8c80e (9 drafts)
   updating to branch default
   getting changed largefiles
   3 largefiles updated, 0 removed
@@ -1784,7 +1784,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 2 changes to 2 files
-  new changesets 598410d3eb9a
+  new changesets 598410d3eb9a (1 drafts)
   $ hg log --template '{rev}:{node|short}  {desc|firstline}\n'
   9:598410d3eb9a  modify normal file largefile in repo d
   8:a381d2c8c80e  modify normal file and largefile in repo b
--- a/tests/test-lfconvert.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-lfconvert.t	Mon Oct 22 14:46:06 2018 -0400
@@ -101,6 +101,7 @@
   largefiles
   revlogv1
   store
+  testonly-simplestore (reposimplestore !)
 
 "lfconvert" includes a newline at the end of the standin files.
   $ cat .hglf/large .hglf/sub/maybelarge.dat
@@ -336,7 +337,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  9 files, 8 changesets, 13 total revisions
+  checked 8 changesets with 13 changes to 9 files
   searching 7 changesets for largefiles
   changeset 0:d4892ec57ce2: large references missing $TESTTMP/largefiles-repo-hg/.hg/largefiles/2e000fa7e85759c7f4c254d4d9c33ef481e459a7
   changeset 1:334e5237836d: sub/maybelarge.dat references missing $TESTTMP/largefiles-repo-hg/.hg/largefiles/34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c
--- a/tests/test-lfs-largefiles.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-lfs-largefiles.t	Mon Oct 22 14:46:06 2018 -0400
@@ -201,7 +201,7 @@
 commit.  By the time the commit occurs, the tracked file is smaller than the
 threshold (assuming it is > 41, so the standins don't become lfs objects).
 
-  $ $PYTHON -c 'import sys ; sys.stdout.write("y\n" * 1048576)' > large_by_size.bin
+  $ "$PYTHON" -c 'import sys ; sys.stdout.write("y\n" * 1048576)' > large_by_size.bin
   $ hg --config largefiles.minsize=1 ci -Am 'large by size'
   adding large_by_size.bin as a largefile
   $ hg manifest
@@ -286,8 +286,7 @@
   0 remove large_by_size.bin
   $ cd nolargefiles
 
-The requirement is added to the destination repo, and the extension is enabled
-locally.
+The requirement is added to the destination repo.
 
   $ cat .hg/requires
   dotencode
@@ -296,8 +295,6 @@
   lfs
   revlogv1
   store
-  $ hg config --debug extensions | grep lfs
-  $TESTTMP/nolargefiles/.hg/hgrc:*: extensions.lfs= (glob)
 
   $ hg log -r 'all()' -G -T '{rev} {join(lfs_files, ", ")} ({desc})\n'
   o  8 large_by_size.bin (remove large_by_size.bin)
--- a/tests/test-lfs-pointer.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-lfs-pointer.py	Mon Oct 22 14:46:06 2018 -0400
@@ -1,11 +1,5 @@
 from __future__ import absolute_import, print_function
 
-import os
-import sys
-
-# make it runnable using python directly without run-tests.py
-sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
-
 # Import something from Mercurial, so the module loader gets initialized.
 from mercurial import pycompat
 del pycompat  # unused for now
--- a/tests/test-lfs-serve-access.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-lfs-serve-access.t	Mon Oct 22 14:46:06 2018 -0400
@@ -42,6 +42,7 @@
 Downloads fail...
 
   $ hg clone http://localhost:$HGPORT httpclone
+  (remote is using large file support (lfs); lfs will be enabled for this repository)
   requesting all changes
   adding changesets
   adding manifests
@@ -52,7 +53,7 @@
   abort: LFS HTTP error: HTTP Error 400: no such method: .git (action=download)!
   [255]
 
-  $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
+  $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
 
   $ cat $TESTTMP/access.log $TESTTMP/errors.log
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
@@ -76,6 +77,7 @@
   $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
   using http://localhost:$HGPORT/subdir/mount/point
   sending capabilities command
+  (remote is using large file support (lfs); lfs will be enabled for this repository)
   query 1; heads
   sending batch command
   requesting all changes
@@ -88,7 +90,6 @@
   adding file changes
   adding lfs.bin revisions
   added 1 changesets with 1 changes to 1 files
-  calling hook pretxnchangegroup.lfs: hgext.lfs.checkrequireslfs
   bundle2-input-part: total payload size 648
   bundle2-input-part: "listkeys" (params: 1 mandatory) supported
   bundle2-input-part: "phase-heads" supported
@@ -140,8 +141,9 @@
   getting lfs.bin
   lfs: found f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e in the local lfs store
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
 
-  $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
+  $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
 
   $ cat $TESTTMP/access.log $TESTTMP/errors.log
   $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
@@ -150,6 +152,33 @@
   $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
   $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
 
+Blobs that already exist in the usercache are linked into the repo store, even
+though the client doesn't send the blob.
+
+  $ hg init server2
+  $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server2 serve -d \
+  >    -p $HGPORT --pid-file=hg.pid \
+  >    -A $TESTTMP/access.log -E $TESTTMP/errors.log
+  $ cat hg.pid >> $DAEMON_PIDS
+
+  $ hg --config "lfs.usercache=$TESTTMP/servercache" -R cloned2 --debug \
+  >    push http://localhost:$HGPORT | grep '^[{} ]'
+  {
+    "objects": [
+      {
+        "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
+        "size": 20
+      }
+    ]
+    "transfer": "basic"
+  }
+  $ find server2/.hg/store/lfs/objects | sort
+  server2/.hg/store/lfs/objects
+  server2/.hg/store/lfs/objects/f0
+  server2/.hg/store/lfs/objects/f0/3217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
+  $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
+  $ cat $TESTTMP/errors.log
+
   $ cat >> $TESTTMP/lfsstoreerror.py <<EOF
   > import errno
   > from hgext.lfs import blobstore
@@ -211,6 +240,7 @@
 Test an I/O error in localstore.verify() (Batch API) with GET
 
   $ hg clone http://localhost:$HGPORT1 httpclone2
+  (remote is using large file support (lfs); lfs will be enabled for this repository)
   requesting all changes
   adding changesets
   adding manifests
@@ -259,7 +289,7 @@
   abort: HTTP error: HTTP Error 422: corrupt blob (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
   [255]
 
-  $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
+  $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
 
   $ cat $TESTTMP/access.log
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
@@ -411,7 +441,7 @@
     "transfer": "basic"
   }
 
-  $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
+  $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
 
   $ cat $TESTTMP/access.log $TESTTMP/errors.log
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
--- a/tests/test-lfs-serve.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-lfs-serve.t	Mon Oct 22 14:46:06 2018 -0400
@@ -35,6 +35,26 @@
   $ hg init server
   $ SERVER_REQUIRES="$TESTTMP/server/.hg/requires"
 
+  $ cat > $TESTTMP/debugprocessors.py <<EOF
+  > from mercurial import (
+  >     cmdutil,
+  >     commands,
+  >     pycompat,
+  >     registrar,
+  > )
+  > cmdtable = {}
+  > command = registrar.command(cmdtable)
+  > @command(b'debugprocessors', [], b'FILE')
+  > def debugprocessors(ui, repo, file_=None, **opts):
+  >     opts = pycompat.byteskwargs(opts)
+  >     opts[b'changelog'] = False
+  >     opts[b'manifest'] = False
+  >     opts[b'dir'] = False
+  >     rl = cmdutil.openrevlog(repo, b'debugprocessors', file_, opts)
+  >     for flag, proc in rl._flagprocessors.iteritems():
+  >         ui.status(b"registered processor '%#x'\n" % (flag))
+  > EOF
+
 Skip the experimental.changegroup3=True config.  Failure to agree on this comes
 first, and causes a "ValueError: no common changegroup version" or "abort:
 HTTP Error 500: Internal Server Error", if the extension is only loaded on one
@@ -42,6 +62,8 @@
 for flag '0x2000'!" if the extension is only loaded on one side (possibly also
 masked by the Internal Server Error message).
   $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > debugprocessors = $TESTTMP/debugprocessors.py
   > [experimental]
   > lfs.disableusercache = True
   > [lfs]
@@ -51,6 +73,8 @@
   > push_ssl=False
   > EOF
 
+  $ cp $HGRCPATH $HGRCPATH.orig
+
 #if lfsremote-on
   $ hg --config extensions.lfs= -R server \
   >    serve -p $HGPORT -d --pid-file=hg.pid --errorlog=$TESTTMP/errors.log
@@ -114,12 +138,12 @@
   creating temporary repository to stage migrated data: * (glob)
   (it is safe to interrupt this process any time before data migration completes)
   migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
-  migrating 132 bytes in store; 129 bytes tracked data
-  migrating 1 filelogs containing 1 revisions (9 bytes in store; 8 bytes tracked data)
+  migrating 324 bytes in store; 129 bytes tracked data
+  migrating 1 filelogs containing 1 revisions (73 bytes in store; 8 bytes tracked data)
   finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
-  migrating 1 manifests containing 1 revisions (53 bytes in store; 52 bytes tracked data)
+  migrating 1 manifests containing 1 revisions (117 bytes in store; 52 bytes tracked data)
   finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
-  migrating changelog containing 1 revisions (70 bytes in store; 69 bytes tracked data)
+  migrating changelog containing 1 revisions (134 bytes in store; 69 bytes tracked data)
   finished migrating 1 changelog revisions; change in size: 0 bytes
   finished migrating 3 total revisions; total change in store size: 0 bytes
   copying phaseroots
@@ -280,10 +304,10 @@
   $ grep 'lfs' .hg/requires $SERVER_REQUIRES
   $TESTTMP/server/.hg/requires:lfs
 
-TODO: fail more gracefully.
-
-  $ hg clone -q http://localhost:$HGPORT $TESTTMP/client4_clone
-  abort: HTTP Error 500: Internal Server Error
+  $ hg clone http://localhost:$HGPORT $TESTTMP/client4_clone
+  (remote is using large file support (lfs), but it is explicitly disabled in the local configuration)
+  abort: repository requires features unknown to this Mercurial: lfs!
+  (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
   [255]
   $ grep 'lfs' $TESTTMP/client4_clone/.hg/requires $SERVER_REQUIRES
   grep: $TESTTMP/client4_clone/.hg/requires: $ENOENT$
@@ -331,6 +355,103 @@
   $ hg identify http://localhost:$HGPORT
   c729025cc5e3
 
+  $ mv $HGRCPATH $HGRCPATH.tmp
+  $ cp $HGRCPATH.orig $HGRCPATH
+
+  >>> from __future__ import absolute_import
+  >>> from hgclient import check, readchannel, runcommand
+  >>> @check
+  ... def checkflags(server):
+  ...     readchannel(server)
+  ...     print('')
+  ...     print('# LFS required- both lfs and non-lfs revlogs have 0x2000 flag')
+  ...     runcommand(server, ['debugprocessors', 'lfs.bin', '-R',
+  ...                '../server'])
+  ...     runcommand(server, ['debugprocessors', 'nonlfs2.txt', '-R',
+  ...                '../server'])
+  ...     runcommand(server, ['config', 'extensions', '--cwd',
+  ...                '../server'])
+  ... 
+  ...     print("\n# LFS not enabled- revlogs don't have 0x2000 flag")
+  ...     runcommand(server, ['debugprocessors', 'nonlfs3.txt'])
+  ...     runcommand(server, ['config', 'extensions'])
+  
+  # LFS required- both lfs and non-lfs revlogs have 0x2000 flag
+  *** runcommand debugprocessors lfs.bin -R ../server
+  registered processor '0x8000'
+  registered processor '0x2000'
+  *** runcommand debugprocessors nonlfs2.txt -R ../server
+  registered processor '0x8000'
+  registered processor '0x2000'
+  *** runcommand config extensions --cwd ../server
+  extensions.debugprocessors=$TESTTMP/debugprocessors.py
+  extensions.lfs=
+  
+  # LFS not enabled- revlogs don't have 0x2000 flag
+  *** runcommand debugprocessors nonlfs3.txt
+  registered processor '0x8000'
+  *** runcommand config extensions
+  extensions.debugprocessors=$TESTTMP/debugprocessors.py
+
+  $ rm $HGRCPATH
+  $ mv $HGRCPATH.tmp $HGRCPATH
+
+  $ hg clone $TESTTMP/client $TESTTMP/nonlfs -qr 0 --config extensions.lfs=
+  $ cat >> $TESTTMP/nonlfs/.hg/hgrc <<EOF
+  > [extensions]
+  > lfs = !
+  > EOF
+
+  >>> from __future__ import absolute_import, print_function
+  >>> from hgclient import check, readchannel, runcommand
+  >>> @check
+  ... def checkflags2(server):
+  ...     readchannel(server)
+  ...     print('')
+  ...     print('# LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag')
+  ...     runcommand(server, ['debugprocessors', 'lfs.bin', '-R',
+  ...                '../server'])
+  ...     runcommand(server, ['debugprocessors', 'nonlfs2.txt', '-R',
+  ...                '../server'])
+  ...     runcommand(server, ['config', 'extensions', '--cwd',
+  ...                '../server'])
+  ... 
+  ...     print('\n# LFS enabled without requirement- revlogs have 0x2000 flag')
+  ...     runcommand(server, ['debugprocessors', 'nonlfs3.txt'])
+  ...     runcommand(server, ['config', 'extensions'])
+  ... 
+  ...     print("\n# LFS disabled locally- revlogs don't have 0x2000 flag")
+  ...     runcommand(server, ['debugprocessors', 'nonlfs.txt', '-R',
+  ...                '../nonlfs'])
+  ...     runcommand(server, ['config', 'extensions', '--cwd',
+  ...                '../nonlfs'])
+  
+  # LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag
+  *** runcommand debugprocessors lfs.bin -R ../server
+  registered processor '0x8000'
+  registered processor '0x2000'
+  *** runcommand debugprocessors nonlfs2.txt -R ../server
+  registered processor '0x8000'
+  registered processor '0x2000'
+  *** runcommand config extensions --cwd ../server
+  extensions.debugprocessors=$TESTTMP/debugprocessors.py
+  extensions.lfs=
+  
+  # LFS enabled without requirement- revlogs have 0x2000 flag
+  *** runcommand debugprocessors nonlfs3.txt
+  registered processor '0x8000'
+  registered processor '0x2000'
+  *** runcommand config extensions
+  extensions.debugprocessors=$TESTTMP/debugprocessors.py
+  extensions.lfs=
+  
+  # LFS disabled locally- revlogs don't have 0x2000 flag
+  *** runcommand debugprocessors nonlfs.txt -R ../nonlfs
+  registered processor '0x8000'
+  *** runcommand config extensions --cwd ../nonlfs
+  extensions.debugprocessors=$TESTTMP/debugprocessors.py
+  extensions.lfs=!
+
 --------------------------------------------------------------------------------
 Case #6: client with lfs content and the extension enabled; server with
 lfs content, and the extension enabled.
@@ -386,6 +507,7 @@
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
   updated to "d3b84d50eacb: lfs file with lfs client"
   1 other heads for branch "default"
+  (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
   $ grep 'lfs' $TESTTMP/client6_pull/.hg/requires $SERVER_REQUIRES
   $TESTTMP/client6_pull/.hg/requires:lfs
   $TESTTMP/server/.hg/requires:lfs
@@ -533,14 +655,12 @@
 
 #endif
 
-  $ $PYTHON $TESTDIR/killdaemons.py $DAEMON_PIDS
+  $ "$PYTHON" $TESTDIR/killdaemons.py $DAEMON_PIDS
 
 #if lfsremote-on
   $ cat $TESTTMP/errors.log | grep '^[A-Z]'
   Traceback (most recent call last):
   ValueError: no common changegroup version
-  Traceback (most recent call last):
-  ValueError: no common changegroup version
 #else
   $ cat $TESTTMP/errors.log
 #endif
--- a/tests/test-lfs-test-server.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-lfs-test-server.t	Mon Oct 22 14:46:06 2018 -0400
@@ -36,7 +36,7 @@
   >             sys.exit(0)
   > sys.exit(1)
   > EOF
-  $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
+  $ "$PYTHON" $TESTTMP/spawn.py >> $DAEMON_PIDS
 #endif
 
   $ cat >> $HGRCPATH <<EOF
@@ -694,10 +694,6 @@
   $ rm *
   $ hg revert --all -r 1 --debug
   http auth: user foo, password ***
-  adding a
-  reverting b
-  reverting c
-  reverting d
   http auth: user foo, password ***
   Status: 200
   Content-Length: 905 (git-server !)
@@ -778,9 +774,13 @@
   lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
   lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
   lfs: downloaded 3 files (51 bytes)
+  reverting b
   lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+  reverting c
   lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
+  reverting d
   lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
+  adding a
   lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
 
 Check error message when the remote missed a blob:
@@ -850,7 +850,7 @@
 
 (Restart the server in a different location so it no longer has the content)
 
-  $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
+  $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
 
 #if hg-server
   $ cat $TESTTMP/access.log $TESTTMP/errors.log
@@ -888,7 +888,7 @@
 #endif
 
 #if windows git-server
-  $ $PYTHON $TESTTMP/spawn.py >> $DAEMON_PIDS
+  $ "$PYTHON" $TESTTMP/spawn.py >> $DAEMON_PIDS
 #endif
 
 #if hg-server
@@ -938,4 +938,4 @@
   abort: LFS server error for "a": The object does not exist!
   [255]
 
-  $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
+  $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
--- a/tests/test-lfs.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-lfs.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,5 +1,52 @@
 #require no-reposimplestore no-chg
 
+  $ hg init requirements
+  $ cd requirements
+
+# LFS not loaded by default.
+
+  $ hg config extensions
+  [1]
+
+# Adding lfs to requires file will auto-load lfs extension.
+
+  $ echo lfs >> .hg/requires
+  $ hg config extensions
+  extensions.lfs=
+
+# But only if there is no config entry for the extension already.
+
+  $ cat > .hg/hgrc << EOF
+  > [extensions]
+  > lfs=!
+  > EOF
+
+  $ hg config extensions
+  abort: repository requires features unknown to this Mercurial: lfs!
+  (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
+  [255]
+
+  $ cat > .hg/hgrc << EOF
+  > [extensions]
+  > lfs=
+  > EOF
+
+  $ hg config extensions
+  extensions.lfs=
+
+  $ cat > .hg/hgrc << EOF
+  > [extensions]
+  > lfs = missing.py
+  > EOF
+
+  $ hg config extensions
+  *** failed to import extension lfs from missing.py: [Errno 2] $ENOENT$: 'missing.py'
+  abort: repository requires features unknown to this Mercurial: lfs!
+  (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
+  [255]
+
+  $ cd ..
+
 # Initial setup
 
   $ cat >> $HGRCPATH << EOF
@@ -248,8 +295,6 @@
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd repo7
-  $ hg config extensions --debug | grep lfs
-  $TESTTMP/repo7/.hg/hgrc:*: extensions.lfs= (glob)
   $ cat large
   LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES
   $ cat small
@@ -260,8 +305,8 @@
   $ hg --config extensions.share= share repo7 sharedrepo
   updating working directory
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg -R sharedrepo config extensions --debug | grep lfs
-  $TESTTMP/sharedrepo/.hg/hgrc:*: extensions.lfs= (glob)
+  $ grep lfs sharedrepo/.hg/requires
+  lfs
 
 # Test rename and status
 
@@ -282,9 +327,9 @@
   $ hg commit -m b
   $ hg status
   >>> with open('a2', 'wb') as f:
-  ...     f.write(b'\1\nSTART-WITH-HG-FILELOG-METADATA')
+  ...     f.write(b'\1\nSTART-WITH-HG-FILELOG-METADATA') and None
   >>> with open('a1', 'wb') as f:
-  ...     f.write(b'\1\nMETA\n')
+  ...     f.write(b'\1\nMETA\n') and None
   $ hg commit -m meta
   $ hg status
   $ hg log -T '{rev}: {file_copies} | {file_dels} | {file_adds}\n'
@@ -486,7 +531,7 @@
   > [lfs]
   > track=all()
   > EOF
-  $ $PYTHON <<'EOF'
+  $ "$PYTHON" <<'EOF'
   > def write(path, content):
   >     with open(path, 'wb') as f:
   >         f.write(content)
@@ -504,9 +549,13 @@
    4 files changed, 2 insertions(+), 0 deletions(-)
   $ hg commit -m binarytest
   $ cat > $TESTTMP/dumpbinary.py << EOF
+  > from mercurial.utils import (
+  >     stringutil,
+  > )
   > def reposetup(ui, repo):
-  >     for n in 'abcd':
-  >         ui.write(('%s: binary=%s\n') % (n, repo['.'][n].isbinary()))
+  >     for n in (b'a', b'b', b'c', b'd'):
+  >         ui.write((b'%s: binary=%s\n')
+  >                   % (n, stringutil.pprint(repo[b'.'][n].isbinary())))
   > EOF
   $ hg --config extensions.dumpbinary=$TESTTMP/dumpbinary.py id --trace
   a: binary=True
@@ -634,23 +683,29 @@
   > # print raw revision sizes, flags, and hashes for certain files
   > import hashlib
   > from mercurial.node import short
-  > from mercurial import revlog
+  > from mercurial import (
+  >     pycompat,
+  >     revlog,
+  > )
+  > from mercurial.utils import (
+  >     stringutil,
+  > )
   > def hash(rawtext):
   >     h = hashlib.sha512()
   >     h.update(rawtext)
-  >     return h.hexdigest()[:4]
+  >     return pycompat.sysbytes(h.hexdigest()[:4])
   > def reposetup(ui, repo):
   >     # these 2 files are interesting
-  >     for name in ['l', 's']:
+  >     for name in [b'l', b's']:
   >         fl = repo.file(name)
   >         if len(fl) == 0:
   >             continue
-  >         sizes = [fl.rawsize(i) for i in fl]
+  >         sizes = [fl._revlog.rawsize(i) for i in fl]
   >         texts = [fl.revision(i, raw=True) for i in fl]
-  >         flags = [int(fl.flags(i)) for i in fl]
+  >         flags = [int(fl._revlog.flags(i)) for i in fl]
   >         hashes = [hash(t) for t in texts]
-  >         print('  %s: rawsizes=%r flags=%r hashes=%r'
-  >               % (name, sizes, flags, hashes))
+  >         pycompat.stdout.write(b'  %s: rawsizes=%r flags=%r hashes=%s\n'
+  >                               % (name, sizes, flags, stringutil.pprint(hashes)))
   > EOF
 
   $ for i in client client2 server repo3 repo4 repo5 repo6 repo7 repo8 repo9 \
@@ -722,7 +777,7 @@
   checking files
    l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
    large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
-  4 files, 5 changesets, 10 total revisions
+  checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
@@ -759,7 +814,7 @@
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: adding b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c to the usercache
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
-  4 files, 5 changesets, 10 total revisions
+  checked 5 changesets with 10 changes to 4 files
 
 Verify will not copy/link a corrupted file from the usercache into the local
 store, and poison it.  (The verify with a good remote now works.)
@@ -776,7 +831,7 @@
    large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
-  4 files, 5 changesets, 10 total revisions
+  checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
@@ -791,7 +846,7 @@
   lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
-  4 files, 5 changesets, 10 total revisions
+  checked 5 changesets with 10 changes to 4 files
 
 Damaging a file required by the update destination fails the update.
 
@@ -817,7 +872,7 @@
   checking files
    l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
    large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
-  4 files, 5 changesets, 10 total revisions
+  checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
@@ -831,8 +886,6 @@
   pushing to dest
   searching for changes
   lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
-  lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
-  lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
   abort: detected corrupt lfs object: 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e
   (run hg verify)
   [255]
@@ -848,7 +901,7 @@
    large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
-  4 files, 5 changesets, 10 total revisions
+  checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
@@ -977,9 +1030,6 @@
   size 39
   x-is-binary 0
 
-  $ hg -R convert_lfs2 config --debug extensions | grep lfs
-  $TESTTMP/convert_lfs2/.hg/hgrc:*: extensions.lfs= (glob)
-
 Committing deleted files works:
 
   $ hg init $TESTTMP/repo-del
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-linelog.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,193 @@
+from __future__ import absolute_import, print_function
+
+import difflib
+import random
+import unittest
+
+from mercurial import linelog
+
+vecratio = 3 # number of replacelines / number of replacelines_vec
+maxlinenum = 0xffffff
+maxb1 = 0xffffff
+maxdeltaa = 10
+maxdeltab = 10
+
+def _genedits(seed, endrev):
+    lines = []
+    random.seed(seed)
+    rev = 0
+    for rev in range(0, endrev):
+        n = len(lines)
+        a1 = random.randint(0, n)
+        a2 = random.randint(a1, min(n, a1 + maxdeltaa))
+        b1 = random.randint(0, maxb1)
+        b2 = random.randint(b1, b1 + maxdeltab)
+        usevec = not bool(random.randint(0, vecratio))
+        if usevec:
+            blines = [(random.randint(0, rev), random.randint(0, maxlinenum))
+                      for _ in range(b1, b2)]
+        else:
+            blines = [(rev, bidx) for bidx in range(b1, b2)]
+        lines[a1:a2] = blines
+        yield lines, rev, a1, a2, b1, b2, blines, usevec
+
+class linelogtests(unittest.TestCase):
+    def testlinelogencodedecode(self):
+        program = [linelog._eof(0, 0),
+                   linelog._jge(41, 42),
+                   linelog._jump(0, 43),
+                   linelog._eof(0, 0),
+                   linelog._jl(44, 45),
+                   linelog._line(46, 47),
+                   ]
+        ll = linelog.linelog(program, maxrev=100)
+        enc = ll.encode()
+        # round-trips okay
+        self.assertEqual(linelog.linelog.fromdata(enc)._program, ll._program)
+        self.assertEqual(linelog.linelog.fromdata(enc), ll)
+        # This encoding matches the encoding used by hg-experimental's
+        # linelog file, or is supposed to if it doesn't.
+        self.assertEqual(enc, (b'\x00\x00\x01\x90\x00\x00\x00\x06'
+                               b'\x00\x00\x00\xa4\x00\x00\x00*'
+                               b'\x00\x00\x00\x00\x00\x00\x00+'
+                               b'\x00\x00\x00\x00\x00\x00\x00\x00'
+                               b'\x00\x00\x00\xb1\x00\x00\x00-'
+                               b'\x00\x00\x00\xba\x00\x00\x00/'))
+
+    def testsimpleedits(self):
+        ll = linelog.linelog()
+        # Initial revision: add lines 0, 1, and 2
+        ll.replacelines(1, 0, 0, 0, 3)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(1)],
+                         [(1, 0),
+                          (1, 1),
+                          (1, 2),
+                         ])
+        # Replace line 1 with a new line
+        ll.replacelines(2, 1, 2, 1, 2)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(2)],
+                         [(1, 0),
+                          (2, 1),
+                          (1, 2),
+                         ])
+        # delete a line out of 2
+        ll.replacelines(3, 1, 2, 0, 0)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(3)],
+                         [(1, 0),
+                          (1, 2),
+                         ])
+        # annotation of 1 is unchanged
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(1)],
+                         [(1, 0),
+                          (1, 1),
+                          (1, 2),
+                         ])
+        ll.annotate(3) # set internal state to revision 3
+        start = ll.getoffset(0)
+        end = ll.getoffset(1)
+        self.assertEqual(ll.getalllines(start, end), [
+            (1, 0),
+            (2, 1),
+            (1, 1),
+        ])
+        self.assertEqual(ll.getalllines(), [
+            (1, 0),
+            (2, 1),
+            (1, 1),
+            (1, 2),
+        ])
+
+    def testparseclinelogfile(self):
+        # This data is what the replacements in testsimpleedits
+        # produce when fed to the original linelog.c implementation.
+        data = (b'\x00\x00\x00\x0c\x00\x00\x00\x0f'
+                b'\x00\x00\x00\x00\x00\x00\x00\x02'
+                b'\x00\x00\x00\x05\x00\x00\x00\x06'
+                b'\x00\x00\x00\x06\x00\x00\x00\x00'
+                b'\x00\x00\x00\x00\x00\x00\x00\x07'
+                b'\x00\x00\x00\x06\x00\x00\x00\x02'
+                b'\x00\x00\x00\x00\x00\x00\x00\x00'
+                b'\x00\x00\x00\t\x00\x00\x00\t'
+                b'\x00\x00\x00\x00\x00\x00\x00\x0c'
+                b'\x00\x00\x00\x08\x00\x00\x00\x05'
+                b'\x00\x00\x00\x06\x00\x00\x00\x01'
+                b'\x00\x00\x00\x00\x00\x00\x00\x05'
+                b'\x00\x00\x00\x0c\x00\x00\x00\x05'
+                b'\x00\x00\x00\n\x00\x00\x00\x01'
+                b'\x00\x00\x00\x00\x00\x00\x00\t')
+        llc = linelog.linelog.fromdata(data)
+        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(1)],
+                         [(1, 0),
+                          (1, 1),
+                          (1, 2),
+                         ])
+        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(2)],
+                         [(1, 0),
+                          (2, 1),
+                          (1, 2),
+                         ])
+        self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(3)],
+                         [(1, 0),
+                          (1, 2),
+                         ])
+        # Check we emit the same bytecode.
+        ll = linelog.linelog()
+        # Initial revision: add lines 0, 1, and 2
+        ll.replacelines(1, 0, 0, 0, 3)
+        # Replace line 1 with a new line
+        ll.replacelines(2, 1, 2, 1, 2)
+        # delete a line out of 2
+        ll.replacelines(3, 1, 2, 0, 0)
+        diff = '\n   ' + '\n   '.join(difflib.unified_diff(
+            ll.debugstr().splitlines(), llc.debugstr().splitlines(),
+            'python', 'c', lineterm=''))
+        self.assertEqual(ll._program, llc._program, 'Program mismatch: ' + diff)
+        # Done as a secondary step so we get a better result if the
+        # program is where the mismatch is.
+        self.assertEqual(ll, llc)
+        self.assertEqual(ll.encode(), data)
+
+    def testanothersimplecase(self):
+        ll = linelog.linelog()
+        ll.replacelines(3, 0, 0, 0, 2)
+        ll.replacelines(4, 0, 2, 0, 0)
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(4)],
+                         [])
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(3)],
+                         [(3, 0), (3, 1)])
+        # rev 2 is empty because contents were only ever introduced in rev 3
+        self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(2)],
+                         [])
+
+    def testrandomedits(self):
+        # Inspired by original linelog tests.
+        seed = random.random()
+        numrevs = 2000
+        ll = linelog.linelog()
+        # Populate linelog
+        for lines, rev, a1, a2, b1, b2, blines, usevec in _genedits(
+                seed, numrevs):
+            if usevec:
+                ll.replacelines_vec(rev, a1, a2, blines)
+            else:
+                ll.replacelines(rev, a1, a2, b1, b2)
+            ar = ll.annotate(rev)
+            self.assertEqual(ll.annotateresult, lines)
+        # Verify we can get back these states by annotating each rev
+        for lines, rev, a1, a2, b1, b2, blines, usevec in _genedits(
+                seed, numrevs):
+            ar = ll.annotate(rev)
+            self.assertEqual([(l.rev, l.linenum) for l in ar], lines)
+
+    def testinfinitebadprogram(self):
+        ll = linelog.linelog.fromdata(
+            b'\x00\x00\x00\x00\x00\x00\x00\x02'  # header
+            b'\x00\x00\x00\x00\x00\x00\x00\x01'  # JUMP to self
+        )
+        with self.assertRaises(linelog.LineLogError):
+            # should not be an infinite loop and raise
+            ll.annotate(1)
+
+if __name__ == '__main__':
+    import silenttestrunner
+    silenttestrunner.main(__name__)
--- a/tests/test-locate.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-locate.t	Mon Oct 22 14:46:06 2018 -0400
@@ -158,7 +158,7 @@
 
 Convert native path separator to slash (issue5572)
 
-  $ hg files -T '{path|slashpath}\n'
+  $ hg files -T '{path|relpath|slashpath}\n'
   ../b
   ../dir.h/foo
   ../t.h
--- a/tests/test-lock.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-lock.py	Mon Oct 22 14:46:06 2018 -0400
@@ -2,19 +2,20 @@
 
 import copy
 import errno
-import os
-import silenttestrunner
 import tempfile
 import types
 import unittest
 
+import silenttestrunner
+
 from mercurial import (
+    encoding,
     error,
     lock,
     vfs as vfsmod,
 )
 
-testlockname = 'testlock'
+testlockname = b'testlock'
 
 # work around http://bugs.python.org/issue1515
 if types.MethodType not in copy._deepcopy_dispatch:
@@ -106,7 +107,7 @@
 
 class testlock(unittest.TestCase):
     def testlock(self):
-        state = teststate(self, tempfile.mkdtemp(dir=os.getcwd()))
+        state = teststate(self, tempfile.mkdtemp(dir=encoding.getcwd()))
         lock = state.makelock()
         state.assertacquirecalled(True)
         lock.release()
@@ -115,7 +116,7 @@
         state.assertlockexists(False)
 
     def testrecursivelock(self):
-        state = teststate(self, tempfile.mkdtemp(dir=os.getcwd()))
+        state = teststate(self, tempfile.mkdtemp(dir=encoding.getcwd()))
         lock = state.makelock()
         state.assertacquirecalled(True)
 
@@ -135,7 +136,7 @@
         state.assertlockexists(False)
 
     def testlockfork(self):
-        state = teststate(self, tempfile.mkdtemp(dir=os.getcwd()))
+        state = teststate(self, tempfile.mkdtemp(dir=encoding.getcwd()))
         lock = state.makelock()
         state.assertacquirecalled(True)
 
@@ -154,7 +155,7 @@
         state.assertlockexists(False)
 
     def testinheritlock(self):
-        d = tempfile.mkdtemp(dir=os.getcwd())
+        d = tempfile.mkdtemp(dir=encoding.getcwd())
         parentstate = teststate(self, d)
         parentlock = parentstate.makelock()
         parentstate.assertacquirecalled(True)
@@ -184,7 +185,7 @@
         parentstate.assertlockexists(False)
 
     def testmultilock(self):
-        d = tempfile.mkdtemp(dir=os.getcwd())
+        d = tempfile.mkdtemp(dir=encoding.getcwd())
         state0 = teststate(self, d)
         lock0 = state0.makelock()
         state0.assertacquirecalled(True)
@@ -225,7 +226,7 @@
         lock0.release()
 
     def testinheritlockfork(self):
-        d = tempfile.mkdtemp(dir=os.getcwd())
+        d = tempfile.mkdtemp(dir=encoding.getcwd())
         parentstate = teststate(self, d)
         parentlock = parentstate.makelock()
         parentstate.assertacquirecalled(True)
@@ -253,7 +254,7 @@
         parentlock.release()
 
     def testinheritcheck(self):
-        d = tempfile.mkdtemp(dir=os.getcwd())
+        d = tempfile.mkdtemp(dir=encoding.getcwd())
         state = teststate(self, d)
         def check():
             raise error.LockInheritanceContractViolation('check failed')
@@ -273,7 +274,7 @@
         retrying 5 times.
         """
 
-        d = tempfile.mkdtemp(dir=os.getcwd())
+        d = tempfile.mkdtemp(dir=encoding.getcwd())
         state = teststate(self, d)
 
         def emulatefrequentlock(*args):
--- a/tests/test-log-exthook.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-log-exthook.t	Mon Oct 22 14:46:06 2018 -0400
@@ -3,13 +3,14 @@
 
   $ cat > $TESTTMP/logexthook.py <<EOF
   > from __future__ import absolute_import
+  > import codecs
   > from mercurial import (
   >   commands,
   >   logcmdutil,
   >   repair,
   > )
   > def rot13description(self, ctx):
-  >     summary = "summary".encode('rot13')
+  >     summary = codecs.encode("summary", 'rot-13')
   >     description = ctx.description().strip().splitlines()[0].encode('rot13')
   >     self.ui.write("%s:     %s\n" % (summary, description))
   > def reposetup(ui, repo):
--- a/tests/test-log.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-log.t	Mon Oct 22 14:46:06 2018 -0400
@@ -2126,6 +2126,7 @@
   phase:       draft
   parent:      0:65624cd9070a035fa7191a54f2b8af39f16b0c08
   parent:      -1:0000000000000000000000000000000000000000
+  manifest:    2147483647:ffffffffffffffffffffffffffffffffffffffff
   user:        test
   date:        [A-Za-z0-9:+ ]+ (re)
   extra:       branch=default
@@ -2164,6 +2165,7 @@
   phase:       draft
   parent:      0:65624cd9070a035fa7191a54f2b8af39f16b0c08
   parent:      -1:0000000000000000000000000000000000000000
+  manifest:    2147483647:ffffffffffffffffffffffffffffffffffffffff
   user:        test
   date:        [A-Za-z0-9:+ ]+ (re)
   files:       d1/f1
@@ -2208,10 +2210,10 @@
     "branch": "default",
     "date": [*, 0], (glob)
     "desc": "",
-    "node": null,
+    "node": "ffffffffffffffffffffffffffffffffffffffff",
     "parents": ["65624cd9070a035fa7191a54f2b8af39f16b0c08"],
     "phase": "draft",
-    "rev": null,
+    "rev": 2147483647,
     "tags": [],
     "user": "test"
    }
@@ -2220,8 +2222,8 @@
   $ hg log -r 'wdir()' -Tjson -q
   [
    {
-    "node": null,
-    "rev": null
+    "node": "ffffffffffffffffffffffffffffffffffffffff",
+    "rev": 2147483647
    }
   ]
 
@@ -2234,13 +2236,13 @@
     "date": [*, 0], (glob)
     "desc": "",
     "extra": {"branch": "default"},
-    "manifest": null,
+    "manifest": "ffffffffffffffffffffffffffffffffffffffff",
     "modified": ["d1/f1"],
-    "node": null,
+    "node": "ffffffffffffffffffffffffffffffffffffffff",
     "parents": ["65624cd9070a035fa7191a54f2b8af39f16b0c08"],
     "phase": "draft",
     "removed": [".d6/f1"],
-    "rev": null,
+    "rev": 2147483647,
     "tags": [],
     "user": "test"
    }
--- a/tests/test-logexchange.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-logexchange.t	Mon Oct 22 14:46:06 2018 -0400
@@ -3,7 +3,7 @@
 
   $ cat >> $HGRCPATH << EOF
   > [ui]
-  > ssh = $PYTHON "$TESTDIR/dummyssh"
+  > ssh = "$PYTHON" "$TESTDIR/dummyssh"
   > [alias]
   > glog = log -G -T '{rev}:{node|short}  {desc}'
   > [extensions]
@@ -77,10 +77,8 @@
 
   $ hg show work
   o  3e14 (wat) (default/wat) added bar
-  |
   ~
   @  ec24 (default/default) Added h
-  |
   ~
 
   $ hg update "default/wat"
@@ -291,6 +289,7 @@
   ~
 
 Updating to revision using hoisted name
+---------------------------------------
 
 Deleting local bookmark to make sure we update to hoisted name only
 
@@ -395,3 +394,140 @@
      default/bar               6:87d6d6676308
      default/foo               8:3e1487808078
    * foo                       8:3e1487808078
+
+Testing the names argument to remotenames, remotebranches and remotebookmarks revsets
+--------------------------------------------------------------------------------------
+
+  $ cd ..
+  $ hg clone ssh://user@dummy/server client2
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 9 changesets with 9 changes to 9 files (+1 heads)
+  new changesets 18d04c59bb5d:3e1487808078
+  updating to branch default
+  8 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server2
+  $ hg up wat
+  6 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo foo > watwat
+  $ hg ci -Aqm "added watwat"
+  $ hg bookmark bar
+  abort: bookmark 'bar' already exists (use -f to force)
+  [255]
+  $ hg up ec24
+  3 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ echo i > i
+  $ hg ci -Aqm "added i"
+
+  $ cd ../client2
+  $ echo "[paths]" >> .hg/hgrc
+  $ echo "server2 = $TESTTMP/server2" >> .hg/hgrc
+  $ hg pull server2
+  pulling from $TESTTMP/server2
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files
+  new changesets f34adec73c21:bf433e48adea
+  (run 'hg update' to get a working copy)
+
+  $ hg log -Gr 'remotenames()' -T '{rev}:{node|short} {desc}\n({remotebranches})  [{remotebookmarks}]\n\n'
+  o  10:bf433e48adea added i
+  |  (server2/default)  []
+  |
+  | o  9:f34adec73c21 added watwat
+  | |  (server2/wat)  []
+  | |
+  | o  8:3e1487808078 added bar
+  | :  (default/wat)  [default/foo]
+  | :
+  @ :  7:ec2426147f0e Added h
+  | :  (default/default)  []
+  | :
+  o :  6:87d6d6676308 Added g
+  :/   ()  [default/bar server2/bar]
+  :
+  o  3:62615734edd5 Added d
+  |  ()  [server2/foo]
+  ~
+
+Testing for a single remote name which exists
+
+  $ hg log -r 'remotebranches("default/wat")' -GT "{rev}:{node|short} {remotebranches}\n"
+  o  8:3e1487808078 default/wat
+  |
+  ~
+
+  $ hg log -r 'remotebookmarks("server2/foo")' -GT "{rev}:{node|short} {remotebookmarks}\n"
+  o  3:62615734edd5 server2/foo
+  |
+  ~
+
+  $ hg log -r 'remotenames("re:default")' -GT "{rev}:{node|short} {remotenames}\n"
+  o  10:bf433e48adea server2/default
+  |
+  | o  8:3e1487808078 default/foo default/wat
+  | |
+  | ~
+  @  7:ec2426147f0e default/default
+  |
+  o  6:87d6d6676308 default/bar server2/bar
+  |
+  ~
+
+Testing for a literal name which does not exists, which should fail.
+
+  $ hg log -r 'remotebranches(def)' -GT "{rev}:{node|short} {remotenames}\n"
+  abort: remote name 'def' does not exist!
+  [255]
+
+  $ hg log -r 'remotebookmarks("server3")' -GT "{rev}:{node|short} {remotenames}\n"
+  abort: remote name 'server3' does not exist!
+  [255]
+
+  $ hg log -r 'remotenames("server3")' -GT "{rev}:{node|short} {remotenames}\n"
+  abort: remote name 'server3' does not exist!
+  [255]
+
+Testing for a pattern which does not match anything, which shouldn't fail.
+
+  $ hg log -r 'remotenames("re:^server3$")'
+
+Testing for multiple names, which is not supported.
+
+  $ hg log -r 'remotenames("re:default", "re:server2")' -GT "{rev}:{node|short} {remotenames}\n"
+  hg: parse error: only one argument accepted
+  [255]
+
+  $ hg log -r 'remotebranches("default/wat", "server2/wat")' -GT "{rev}:{node|short} {remotebranches}\n"
+  hg: parse error: only one argument accepted
+  [255]
+
+  $ hg log -r 'remotebookmarks("default/foo", "server2/foo")' -GT "{rev}:{node|short} {remotebookmarks}\n"
+  hg: parse error: only one argument accepted
+  [255]
+
+Testing pattern matching
+
+  $ hg log -r 'remotenames("re:def")' -GT "{rev}:{node|short} {remotenames}\n"
+  o  10:bf433e48adea server2/default
+  |
+  | o  8:3e1487808078 default/foo default/wat
+  | |
+  | ~
+  @  7:ec2426147f0e default/default
+  |
+  o  6:87d6d6676308 default/bar server2/bar
+  |
+  ~
+
+  $ hg log -r 'remotebranches("re:ser.*2")' -GT "{rev}:{node|short} {remotebranches}\n"
+  o  10:bf433e48adea server2/default
+  |
+  ~
+  o  9:f34adec73c21 server2/wat
+  |
+  ~
--- a/tests/test-logtoprocess.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-logtoprocess.t	Mon Oct 22 14:46:06 2018 -0400
@@ -17,9 +17,9 @@
   > configitem('logtoprocess', 'foo',
   >     default=None,
   > )
-  > @command(b'foo', [])
+  > @command(b'foobar', [])
   > def foo(ui, repo):
-  >     ui.log('foo', 'a message: %(bar)s\n', bar='spam')
+  >     ui.log('foo', 'a message: %s\n', 'spam')
   > EOF
   $ cp $HGRCPATH $HGRCPATH.bak
   $ cat >> $HGRCPATH << EOF
@@ -27,47 +27,51 @@
   > logtoprocess=
   > foocommand=$TESTTMP/foocommand.py
   > [logtoprocess]
-  > command=echo 'logtoprocess command output:';
+  > command=(echo 'logtoprocess command output:';
   >     echo "\$EVENT";
   >     echo "\$MSG1";
-  >     echo "\$MSG2"
-  > commandfinish=echo 'logtoprocess commandfinish output:';
+  >     echo "\$MSG2") > $TESTTMP/command.log
+  > commandfinish=(echo 'logtoprocess commandfinish output:';
   >     echo "\$EVENT";
   >     echo "\$MSG1";
   >     echo "\$MSG2";
-  >     echo "\$MSG3"
-  > foo=echo 'logtoprocess foo output:';
+  >     echo "\$MSG3";
+  >     echo "canonical: \$OPT_CANONICAL_COMMAND") > $TESTTMP/commandfinish.log
+  > foo=(echo 'logtoprocess foo output:';
   >     echo "\$EVENT";
   >     echo "\$MSG1";
-  >     echo "\$OPT_BAR"
+  >     echo "\$MSG2") > $TESTTMP/foo.log
   > EOF
 
 Running a command triggers both a ui.log('command') and a
 ui.log('commandfinish') call. The foo command also uses ui.log.
 
 Use sort to avoid ordering issues between the various processes we spawn:
-  $ hg foo | cat | sort
-  
-  
+  $ hg fooba
+  $ sleep 1
+  $ cat $TESTTMP/command.log | sort
   
-   (chg !)
+  command
+  fooba
+  fooba
+  logtoprocess command output:
+
+#if no-chg
+  $ cat $TESTTMP/commandfinish.log | sort
+  
   0
+  canonical: foobar
+  commandfinish
+  fooba
+  fooba exited 0 after * seconds (glob)
+  logtoprocess commandfinish output:
+  $ cat $TESTTMP/foo.log | sort
+  
   a message: spam
-  command
-  command (chg !)
-  commandfinish
   foo
-  foo
-  foo
-  foo
-  foo exited 0 after * seconds (glob)
-  logtoprocess command output:
-  logtoprocess command output: (chg !)
-  logtoprocess commandfinish output:
   logtoprocess foo output:
-  serve --cmdserver chgunix * (glob) (chg !)
-  serve --cmdserver chgunix * (glob) (chg !)
   spam
+#endif
 
 Confirm that logging blocked time catches stdio properly:
   $ cp $HGRCPATH.bak $HGRCPATH
@@ -76,10 +80,47 @@
   > logtoprocess=
   > pager=
   > [logtoprocess]
-  > uiblocked=echo "\$EVENT stdio \$OPT_STDIO_BLOCKED ms command \$OPT_COMMAND_DURATION ms"
+  > uiblocked=echo "\$EVENT stdio \$OPT_STDIO_BLOCKED ms command \$OPT_COMMAND_DURATION ms" > $TESTTMP/uiblocked.log
   > [ui]
   > logblockedtimes=True
   > EOF
 
-  $ hg log | cat
+  $ hg log
+  $ sleep 1
+  $ cat $TESTTMP/uiblocked.log
   uiblocked stdio [0-9]+.[0-9]* ms command [0-9]+.[0-9]* ms (re)
+
+Try to confirm that pager wait on logtoprocess:
+
+Add a script that wait on a file to appears for 5 seconds, if it sees it touch
+another file or die after 5 seconds. If the scripts is awaited by hg, the
+script will die after the timeout before we could touch the file and the
+resulting file will not exists. If not, we will touch the file and see it.
+
+  $ cat > $TESTTMP/wait-output.sh << EOF
+  > #!/bin/sh
+  > for i in \`$TESTDIR/seq.py 50\`; do
+  >   if [ -f "$TESTTMP/wait-for-touched" ];
+  >   then
+  >     touch "$TESTTMP/touched";
+  >     break;
+  >   else
+  >     sleep 0.1;
+  >   fi
+  > done
+  > EOF
+  $ chmod +x $TESTTMP/wait-output.sh
+
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > logtoprocess=
+  > pager=
+  > [logtoprocess]
+  > commandfinish=$TESTTMP/wait-output.sh
+  > EOF
+  $ hg version -q --pager=always
+  Mercurial Distributed SCM (version *) (glob)
+  $ touch $TESTTMP/wait-for-touched
+  $ sleep 0.2
+  $ test -f $TESTTMP/touched && echo "SUCCESS Pager is not waiting on ltp" || echo "FAIL Pager is waiting on ltp"
+  SUCCESS Pager is not waiting on ltp
--- a/tests/test-lrucachedict.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-lrucachedict.py	Mon Oct 22 14:46:06 2018 -0400
@@ -1,77 +1,337 @@
 from __future__ import absolute_import, print_function
 
+import unittest
+
+import silenttestrunner
+
 from mercurial import (
     util,
 )
 
-def printifpresent(d, xs, name='d'):
-    for x in xs:
-        present = x in d
-        print("'%s' in %s: %s" % (x, name, present))
-        if present:
-            print("%s['%s']: %s" % (name, x, d[x]))
+class testlrucachedict(unittest.TestCase):
+    def testsimple(self):
+        d = util.lrucachedict(4)
+        self.assertEqual(d.capacity, 4)
+        d.insert('a', 'va', cost=2)
+        d['b'] = 'vb'
+        d['c'] = 'vc'
+        d.insert('d', 'vd', cost=42)
+
+        self.assertEqual(d['a'], 'va')
+        self.assertEqual(d['b'], 'vb')
+        self.assertEqual(d['c'], 'vc')
+        self.assertEqual(d['d'], 'vd')
+
+        self.assertEqual(d.totalcost, 44)
+
+        # 'a' should be dropped because it was least recently used.
+        d['e'] = 've'
+        self.assertNotIn('a', d)
+        self.assertIsNone(d.get('a'))
+        self.assertEqual(d.totalcost, 42)
+
+        self.assertEqual(d['b'], 'vb')
+        self.assertEqual(d['c'], 'vc')
+        self.assertEqual(d['d'], 'vd')
+        self.assertEqual(d['e'], 've')
+
+        # Replacing item with different cost adjusts totalcost.
+        d.insert('e', 've', cost=4)
+        self.assertEqual(d.totalcost, 46)
+
+        # Touch entries in some order (both get and set).
+        d['e']
+        d['c'] = 'vc2'
+        d['d']
+        d['b'] = 'vb2'
 
-def test_lrucachedict():
-    d = util.lrucachedict(4)
-    d['a'] = 'va'
-    d['b'] = 'vb'
-    d['c'] = 'vc'
-    d['d'] = 'vd'
+        # 'e' should be dropped now
+        d['f'] = 'vf'
+        self.assertNotIn('e', d)
+        self.assertEqual(d['b'], 'vb2')
+        self.assertEqual(d['c'], 'vc2')
+        self.assertEqual(d['d'], 'vd')
+        self.assertEqual(d['f'], 'vf')
+
+        d.clear()
+        for key in ('a', 'b', 'c', 'd', 'e', 'f'):
+            self.assertNotIn(key, d)
+
+    def testunfull(self):
+        d = util.lrucachedict(4)
+        d['a'] = 1
+        d['b'] = 2
+        d['a']
+        d['b']
 
-    # all of these should be present
-    printifpresent(d, ['a', 'b', 'c', 'd'])
+        for key in ('a', 'b'):
+            self.assertIn(key, d)
+
+    def testget(self):
+        d = util.lrucachedict(4)
+        d['a'] = 'va'
+        d['b'] = 'vb'
+        d['c'] = 'vc'
+
+        self.assertIsNone(d.get('missing'))
+        self.assertEqual(list(d), ['c', 'b', 'a'])
+
+        self.assertEqual(d.get('a'), 'va')
+        self.assertEqual(list(d), ['a', 'c', 'b'])
+
+    def testcopypartial(self):
+        d = util.lrucachedict(4)
+        d.insert('a', 'va', cost=4)
+        d.insert('b', 'vb', cost=2)
+
+        dc = d.copy()
 
-    # 'a' should be dropped because it was least recently used
-    d['e'] = 've'
-    printifpresent(d, ['a', 'b', 'c', 'd', 'e'])
+        self.assertEqual(len(dc), 2)
+        self.assertEqual(dc.totalcost, 6)
+        for key in ('a', 'b'):
+            self.assertIn(key, dc)
+            self.assertEqual(dc[key], 'v%s' % key)
+
+        self.assertEqual(len(d), 2)
+        for key in ('a', 'b'):
+            self.assertIn(key, d)
+            self.assertEqual(d[key], 'v%s' % key)
 
-    assert d.get('a') is None
-    assert d.get('e') == 've'
+        d['c'] = 'vc'
+        del d['b']
+        self.assertEqual(d.totalcost, 4)
+        dc = d.copy()
+        self.assertEqual(len(dc), 2)
+        self.assertEqual(dc.totalcost, 4)
+        for key in ('a', 'c'):
+            self.assertIn(key, dc)
+            self.assertEqual(dc[key], 'v%s' % key)
+
+    def testcopyempty(self):
+        d = util.lrucachedict(4)
+        dc = d.copy()
+        self.assertEqual(len(dc), 0)
+
+    def testcopyfull(self):
+        d = util.lrucachedict(4)
+        d.insert('a', 'va', cost=42)
+        d['b'] = 'vb'
+        d['c'] = 'vc'
+        d['d'] = 'vd'
+
+        dc = d.copy()
+
+        for key in ('a', 'b', 'c', 'd'):
+            self.assertIn(key, dc)
+            self.assertEqual(dc[key], 'v%s' % key)
 
-    # touch entries in some order (get or set).
-    d['e']
-    d['c'] = 'vc2'
-    d['d']
-    d['b'] = 'vb2'
+        self.assertEqual(d.totalcost, 42)
+        self.assertEqual(dc.totalcost, 42)
+
+        # 'a' should be dropped because it was least recently used.
+        dc['e'] = 've'
+        self.assertNotIn('a', dc)
+        for key in ('b', 'c', 'd', 'e'):
+            self.assertIn(key, dc)
+            self.assertEqual(dc[key], 'v%s' % key)
+
+        self.assertEqual(d.totalcost, 42)
+        self.assertEqual(dc.totalcost, 0)
+
+        # Contents and order of original dict should remain unchanged.
+        dc['b'] = 'vb_new'
+
+        self.assertEqual(list(iter(d)), ['d', 'c', 'b', 'a'])
+        for key in ('a', 'b', 'c', 'd'):
+            self.assertEqual(d[key], 'v%s' % key)
 
-    # 'e' should be dropped now
-    d['f'] = 'vf'
-    printifpresent(d, ['b', 'c', 'd', 'e', 'f'])
+        d = util.lrucachedict(4, maxcost=42)
+        d.insert('a', 'va', cost=5)
+        d.insert('b', 'vb', cost=4)
+        d.insert('c', 'vc', cost=3)
+        dc = d.copy()
+        self.assertEqual(dc.maxcost, 42)
+        self.assertEqual(len(dc), 3)
+
+        # Max cost can be lowered as part of copy.
+        dc = d.copy(maxcost=10)
+        self.assertEqual(dc.maxcost, 10)
+        self.assertEqual(len(dc), 2)
+        self.assertEqual(dc.totalcost, 7)
+        self.assertIn('b', dc)
+        self.assertIn('c', dc)
+
+    def testcopydecreasecapacity(self):
+        d = util.lrucachedict(5)
+        d.insert('a', 'va', cost=4)
+        d.insert('b', 'vb', cost=2)
+        d['c'] = 'vc'
+        d['d'] = 'vd'
 
-    d.clear()
-    printifpresent(d, ['b', 'c', 'd', 'e', 'f'])
+        dc = d.copy(2)
+        self.assertEqual(dc.totalcost, 0)
+        for key in ('a', 'b'):
+            self.assertNotIn(key, dc)
+        for key in ('c', 'd'):
+            self.assertIn(key, dc)
+            self.assertEqual(dc[key], 'v%s' % key)
+
+        dc.insert('e', 've', cost=7)
+        self.assertEqual(dc.totalcost, 7)
+        self.assertNotIn('c', dc)
+        for key in ('d', 'e'):
+            self.assertIn(key, dc)
+            self.assertEqual(dc[key], 'v%s' % key)
+
+        # Original should remain unchanged.
+        self.assertEqual(d.totalcost, 6)
+        for key in ('a', 'b', 'c', 'd'):
+            self.assertIn(key, d)
+            self.assertEqual(d[key], 'v%s' % key)
+
+    def testcopyincreasecapacity(self):
+        d = util.lrucachedict(5)
+        d['a'] = 'va'
+        d['b'] = 'vb'
+        d['c'] = 'vc'
+        d['d'] = 'vd'
+
+        dc = d.copy(6)
+        for key in ('a', 'b', 'c', 'd'):
+            self.assertIn(key, dc)
+            self.assertEqual(dc[key], 'v%s' % key)
 
-    # Now test dicts that aren't full.
-    d = util.lrucachedict(4)
-    d['a'] = 1
-    d['b'] = 2
-    d['a']
-    d['b']
-    printifpresent(d, ['a', 'b'])
+        dc['e'] = 've'
+        dc['f'] = 'vf'
+        for key in ('a', 'b', 'c', 'd', 'e', 'f'):
+            self.assertIn(key, dc)
+            self.assertEqual(dc[key], 'v%s' % key)
+
+        dc['g'] = 'vg'
+        self.assertNotIn('a', dc)
+        for key in ('b', 'c', 'd', 'e', 'f', 'g'):
+            self.assertIn(key, dc)
+            self.assertEqual(dc[key], 'v%s' % key)
+
+        # Original should remain unchanged.
+        for key in ('a', 'b', 'c', 'd'):
+            self.assertIn(key, d)
+            self.assertEqual(d[key], 'v%s' % key)
+
+    def testpopoldest(self):
+        d = util.lrucachedict(4)
+        d.insert('a', 'va', cost=10)
+        d.insert('b', 'vb', cost=5)
+
+        self.assertEqual(len(d), 2)
+        self.assertEqual(d.popoldest(), ('a', 'va'))
+        self.assertEqual(len(d), 1)
+        self.assertEqual(d.totalcost, 5)
+        self.assertEqual(d.popoldest(), ('b', 'vb'))
+        self.assertEqual(len(d), 0)
+        self.assertEqual(d.totalcost, 0)
+        self.assertIsNone(d.popoldest())
+
+        d['a'] = 'va'
+        d['b'] = 'vb'
+        d['c'] = 'vc'
+        d['d'] = 'vd'
+
+        self.assertEqual(d.popoldest(), ('a', 'va'))
+        self.assertEqual(len(d), 3)
+        for key in ('b', 'c', 'd'):
+            self.assertEqual(d[key], 'v%s' % key)
+
+        d['a'] = 'va'
+        self.assertEqual(d.popoldest(), ('b', 'vb'))
 
-    # test copy method
-    d = util.lrucachedict(4)
-    d['a'] = 'va3'
-    d['b'] = 'vb3'
-    d['c'] = 'vc3'
-    d['d'] = 'vd3'
+    def testmaxcost(self):
+        # Item cost is zero by default.
+        d = util.lrucachedict(6, maxcost=10)
+        d['a'] = 'va'
+        d['b'] = 'vb'
+        d['c'] = 'vc'
+        d['d'] = 'vd'
+        self.assertEqual(len(d), 4)
+        self.assertEqual(d.totalcost, 0)
+
+        d.clear()
+
+        # Insertion to exact cost threshold works without eviction.
+        d.insert('a', 'va', cost=6)
+        d.insert('b', 'vb', cost=4)
+
+        self.assertEqual(len(d), 2)
+        self.assertEqual(d['a'], 'va')
+        self.assertEqual(d['b'], 'vb')
 
-    dc = d.copy()
+        # Inserting a new element with 0 cost works.
+        d['c'] = 'vc'
+        self.assertEqual(len(d), 3)
+
+        # Inserting a new element with cost putting us above high
+        # water mark evicts oldest single item.
+        d.insert('d', 'vd', cost=1)
+        self.assertEqual(len(d), 3)
+        self.assertEqual(d.totalcost, 5)
+        self.assertNotIn('a', d)
+        for key in ('b', 'c', 'd'):
+            self.assertEqual(d[key], 'v%s' % key)
+
+        # Inserting a new element with enough room for just itself
+        # evicts all items before.
+        d.insert('e', 've', cost=10)
+        self.assertEqual(len(d), 1)
+        self.assertEqual(d.totalcost, 10)
+        self.assertIn('e', d)
 
-    # all of these should be present
-    print("\nAll of these should be present:")
-    printifpresent(dc, ['a', 'b', 'c', 'd'], 'dc')
+        # Inserting a new element with cost greater than threshold
+        # still retains that item.
+        d.insert('f', 'vf', cost=11)
+        self.assertEqual(len(d), 1)
+        self.assertEqual(d.totalcost, 11)
+        self.assertIn('f', d)
+
+        # Inserting a new element will evict the last item since it is
+        # too large.
+        d['g'] = 'vg'
+        self.assertEqual(len(d), 1)
+        self.assertEqual(d.totalcost, 0)
+        self.assertIn('g', d)
+
+        d.clear()
+
+        d.insert('a', 'va', cost=7)
+        d.insert('b', 'vb', cost=3)
+        self.assertEqual(len(d), 2)
+
+        # Replacing a value with smaller cost won't result in eviction.
+        d.insert('b', 'vb2', cost=2)
+        self.assertEqual(len(d), 2)
 
-    # 'a' should be dropped because it was least recently used
-    print("\nAll of these except 'a' should be present:")
-    dc['e'] = 've3'
-    printifpresent(dc, ['a', 'b', 'c', 'd', 'e'], 'dc')
+        # Replacing a value with a higher cost will evict when threshold
+        # exceeded.
+        d.insert('b', 'vb3', cost=4)
+        self.assertEqual(len(d), 1)
+        self.assertNotIn('a', d)
 
-    # contents and order of original dict should remain unchanged
-    print("\nThese should be in reverse alphabetical order and read 'v?3':")
-    dc['b'] = 'vb3_new'
-    for k in list(iter(d)):
-        print("d['%s']: %s" % (k, d[k]))
+    def testmaxcostcomplex(self):
+        d = util.lrucachedict(100, maxcost=100)
+        d.insert('a', 'va', cost=9)
+        d.insert('b', 'vb', cost=21)
+        d.insert('c', 'vc', cost=7)
+        d.insert('d', 'vc', cost=50)
+        self.assertEqual(d.totalcost, 87)
+
+        # Inserting new element should free multiple elements so we hit
+        # low water mark.
+        d.insert('e', 'vd', cost=25)
+        self.assertEqual(len(d), 2)
+        self.assertNotIn('a', d)
+        self.assertNotIn('b', d)
+        self.assertNotIn('c', d)
+        self.assertIn('d', d)
+        self.assertIn('e', d)
 
 if __name__ == '__main__':
-    test_lrucachedict()
+    silenttestrunner.main(__name__)
--- a/tests/test-lrucachedict.py.out	Wed Oct 10 12:25:28 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,62 +0,0 @@
-'a' in d: True
-d['a']: va
-'b' in d: True
-d['b']: vb
-'c' in d: True
-d['c']: vc
-'d' in d: True
-d['d']: vd
-'a' in d: False
-'b' in d: True
-d['b']: vb
-'c' in d: True
-d['c']: vc
-'d' in d: True
-d['d']: vd
-'e' in d: True
-d['e']: ve
-'b' in d: True
-d['b']: vb2
-'c' in d: True
-d['c']: vc2
-'d' in d: True
-d['d']: vd
-'e' in d: False
-'f' in d: True
-d['f']: vf
-'b' in d: False
-'c' in d: False
-'d' in d: False
-'e' in d: False
-'f' in d: False
-'a' in d: True
-d['a']: 1
-'b' in d: True
-d['b']: 2
-
-All of these should be present:
-'a' in dc: True
-dc['a']: va3
-'b' in dc: True
-dc['b']: vb3
-'c' in dc: True
-dc['c']: vc3
-'d' in dc: True
-dc['d']: vd3
-
-All of these except 'a' should be present:
-'a' in dc: False
-'b' in dc: True
-dc['b']: vb3
-'c' in dc: True
-dc['c']: vc3
-'d' in dc: True
-dc['d']: vd3
-'e' in dc: True
-dc['e']: ve3
-
-These should be in reverse alphabetical order and read 'v?3':
-d['d']: vd3
-d['c']: vc3
-d['b']: vb3
-d['a']: va3
--- a/tests/test-mactext.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-mactext.t	Mon Oct 22 14:46:06 2018 -0400
@@ -24,7 +24,7 @@
   $ hg add f
   $ hg ci -m 1
 
-  $ $PYTHON unix2mac.py f
+  $ "$PYTHON" unix2mac.py f
   $ hg ci -m 2
   attempt to commit or push text file(s) using CR line endings
   in dea860dc51ec: f
@@ -32,7 +32,7 @@
   rollback completed
   abort: pretxncommit.cr hook failed
   [255]
-  $ hg cat f | $PYTHON print.py
+  $ hg cat f | "$PYTHON" print.py
   hello<LF>
-  $ cat f | $PYTHON print.py
+  $ cat f | "$PYTHON" print.py
   hello<CR>
--- a/tests/test-manifest.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-manifest.t	Mon Oct 22 14:46:06 2018 -0400
@@ -15,7 +15,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 3 changes to 3 files
-  new changesets b73562a03cfe:5bdc995175ba
+  new changesets b73562a03cfe:5bdc995175ba (2 drafts)
   (run 'hg update' to get a working copy)
 
 The next call is expected to return nothing:
--- a/tests/test-match.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-match.py	Mon Oct 22 14:46:06 2018 -0400
@@ -6,14 +6,832 @@
 
 from mercurial import (
     match as matchmod,
+    util,
 )
 
+class BaseMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.basematcher(b'', b'')
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.basematcher(b'', b'')
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+
+class AlwaysMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.alwaysmatcher(b'', b'')
+        self.assertEqual(m.visitdir(b'.'), b'all')
+        self.assertEqual(m.visitdir(b'dir'), b'all')
+
+    def testVisitchildrenset(self):
+        m = matchmod.alwaysmatcher(b'', b'')
+        self.assertEqual(m.visitchildrenset(b'.'), b'all')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'all')
+
 class NeverMatcherTests(unittest.TestCase):
 
     def testVisitdir(self):
-        m = matchmod.nevermatcher('', '')
-        self.assertFalse(m.visitdir('.'))
-        self.assertFalse(m.visitdir('dir'))
+        m = matchmod.nevermatcher(b'', b'')
+        self.assertFalse(m.visitdir(b'.'))
+        self.assertFalse(m.visitdir(b'dir'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.nevermatcher(b'', b'')
+        self.assertEqual(m.visitchildrenset(b'.'), set())
+        self.assertEqual(m.visitchildrenset(b'dir'), set())
+
+class PredicateMatcherTests(unittest.TestCase):
+    # predicatematcher does not currently define either of these methods, so
+    # this is equivalent to BaseMatcherTests.
+
+    def testVisitdir(self):
+        m = matchmod.predicatematcher(b'', b'', lambda *a: False)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.predicatematcher(b'', b'', lambda *a: False)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+
+class PatternMatcherTests(unittest.TestCase):
+
+    def testVisitdirPrefix(self):
+        m = matchmod.match(b'x', b'', patterns=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertEqual(m.visitdir(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrensetPrefix(self):
+        m = matchmod.match(b'x', b'', patterns=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitdirRootfilesin(self):
+        m = matchmod.match(b'x', b'', patterns=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertFalse(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+        # FIXME: These should probably be True.
+        self.assertFalse(m.visitdir(b'dir'))
+        self.assertFalse(m.visitdir(b'dir/subdir'))
+
+    def testVisitchildrensetRootfilesin(self):
+        m = matchmod.match(b'x', b'', patterns=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+        # FIXME: These should probably be {'subdir'} and 'this', respectively,
+        # or at least 'this' and 'this'.
+        self.assertEqual(m.visitchildrenset(b'dir'), set())
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), set())
+
+    def testVisitdirGlob(self):
+        m = matchmod.match(b'x', b'', patterns=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertFalse(m.visitdir(b'folder'))
+        # OPT: these should probably be False.
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetGlob(self):
+        m = matchmod.match(b'x', b'', patterns=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.patternmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+        # OPT: these should probably be set().
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+
+class IncludeMatcherTests(unittest.TestCase):
+
+    def testVisitdirPrefix(self):
+        m = matchmod.match(b'x', b'', include=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertEqual(m.visitdir(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrensetPrefix(self):
+        m = matchmod.match(b'x', b'', include=[b'path:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'all')
+        # OPT: This should probably be 'all' if its parent is?
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitdirRootfilesin(self):
+        m = matchmod.match(b'x', b'', include=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertFalse(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrensetRootfilesin(self):
+        m = matchmod.match(b'x', b'', include=[b'rootfilesin:dir/subdir'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitdirGlob(self):
+        m = matchmod.match(b'x', b'', include=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertFalse(m.visitdir(b'folder'))
+        # OPT: these should probably be False.
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertTrue(m.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetGlob(self):
+        m = matchmod.match(b'x', b'', include=[b'glob:dir/z*'])
+        assert isinstance(m, matchmod.includematcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+        self.assertEqual(m.visitchildrenset(b'dir'), b'this')
+        # OPT: these should probably be set().
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this')
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this')
+
+class ExactMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.match(b'x', b'', patterns=[b'dir/subdir/foo.txt'],
+                           exact=True)
+        assert isinstance(m, matchmod.exactmatcher)
+        self.assertTrue(m.visitdir(b'.'))
+        self.assertTrue(m.visitdir(b'dir'))
+        self.assertTrue(m.visitdir(b'dir/subdir'))
+        self.assertFalse(m.visitdir(b'dir/subdir/foo.txt'))
+        self.assertFalse(m.visitdir(b'dir/foo'))
+        self.assertFalse(m.visitdir(b'dir/subdir/x'))
+        self.assertFalse(m.visitdir(b'folder'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.match(b'x', b'', patterns=[b'dir/subdir/foo.txt'],
+                           exact=True)
+        assert isinstance(m, matchmod.exactmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir'), {b'foo.txt'})
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(m.visitchildrenset(b'dir/subdir/foo.txt'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+    def testVisitchildrensetFilesAndDirs(self):
+        m = matchmod.match(b'x', b'', patterns=[b'rootfile.txt',
+                                                b'a/file1.txt',
+                                                b'a/b/file2.txt',
+                                                # no file in a/b/c
+                                                b'a/b/c/d/file4.txt'],
+                           exact=True)
+        assert isinstance(m, matchmod.exactmatcher)
+        self.assertEqual(m.visitchildrenset(b'.'), {b'a', b'rootfile.txt'})
+        self.assertEqual(m.visitchildrenset(b'a'), {b'b', b'file1.txt'})
+        self.assertEqual(m.visitchildrenset(b'a/b'), {b'c', b'file2.txt'})
+        self.assertEqual(m.visitchildrenset(b'a/b/c'), {b'd'})
+        self.assertEqual(m.visitchildrenset(b'a/b/c/d'), {b'file4.txt'})
+        self.assertEqual(m.visitchildrenset(b'a/b/c/d/e'), set())
+        self.assertEqual(m.visitchildrenset(b'folder'), set())
+
+class DifferenceMatcherTests(unittest.TestCase):
+
+    def testVisitdirM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a nevermatcher.
+        self.assertFalse(dm.visitdir(b'.'))
+        self.assertFalse(dm.visitdir(b'dir'))
+        self.assertFalse(dm.visitdir(b'dir/subdir'))
+        self.assertFalse(dm.visitdir(b'dir/subdir/z'))
+        self.assertFalse(dm.visitdir(b'dir/foo'))
+        self.assertFalse(dm.visitdir(b'dir/subdir/x'))
+        self.assertFalse(dm.visitdir(b'folder'))
+
+    def testVisitchildrensetM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a nevermatcher.
+        self.assertEqual(dm.visitchildrenset(b'.'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(dm.visitchildrenset(b'folder'), set())
+
+    def testVisitdirM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a alwaysmatcher. OPT: if m2 is a
+        # nevermatcher, we could return 'all' for these.
+        #
+        # We're testing Equal-to-True instead of just 'assertTrue' since
+        # assertTrue does NOT verify that it's a bool, just that it's truthy.
+        # While we may want to eventually make these return 'all', they should
+        # not currently do so.
+        self.assertEqual(dm.visitdir(b'.'), True)
+        self.assertEqual(dm.visitdir(b'dir'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(dm.visitdir(b'dir/foo'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/x'), True)
+        self.assertEqual(dm.visitdir(b'folder'), True)
+
+    def testVisitchildrensetM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        dm = matchmod.differencematcher(m1, m2)
+        # dm should be equivalent to a alwaysmatcher.
+        self.assertEqual(dm.visitchildrenset(b'.'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitdir(b'.'), True)
+        self.assertEqual(dm.visitdir(b'dir'), True)
+        self.assertFalse(dm.visitdir(b'dir/subdir'))
+        # OPT: We should probably return False for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just True.
+        self.assertEqual(dm.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/x'), True)
+        # OPT: We could return 'all' for these.
+        self.assertEqual(dm.visitdir(b'dir/foo'), True)
+        self.assertEqual(dm.visitdir(b'folder'), True)
+
+    def testVisitchildrensetM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitchildrenset(b'.'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'folder'), b'all')
+        # OPT: We should probably return set() for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just 'this'.
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeIncludfe(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitdir(b'.'), True)
+        self.assertEqual(dm.visitdir(b'dir'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir'), True)
+        self.assertFalse(dm.visitdir(b'dir/foo'))
+        self.assertFalse(dm.visitdir(b'folder'))
+        # OPT: We should probably return False for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just True.
+        self.assertEqual(dm.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(dm.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        dm = matchmod.differencematcher(m1, m2)
+        self.assertEqual(dm.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(dm.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(dm.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(dm.visitchildrenset(b'folder'), set())
+        # OPT: We should probably return set() for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just 'this'.
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'this')
+
+class IntersectionMatcherTests(unittest.TestCase):
+
+    def testVisitdirM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a alwaysmatcher.
+        self.assertEqual(im.visitdir(b'.'), b'all')
+        self.assertEqual(im.visitdir(b'dir'), b'all')
+        self.assertEqual(im.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(im.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(im.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(im.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(im.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a alwaysmatcher.
+        self.assertEqual(im.visitchildrenset(b'.'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(im.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a nevermatcher.
+        self.assertFalse(im.visitdir(b'.'))
+        self.assertFalse(im.visitdir(b'dir'))
+        self.assertFalse(im.visitdir(b'dir/subdir'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+        self.assertFalse(im.visitdir(b'folder'))
+
+    def testVisitchildrensetM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        im = matchmod.intersectmatchers(m1, m2)
+        # im should be equivalent to a nevermqtcher.
+        self.assertEqual(im.visitchildrenset(b'.'), set())
+        self.assertEqual(im.visitchildrenset(b'dir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+
+    def testVisitdirM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        # OPT: We should probably return 'all' for these; we don't because
+        # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of
+        # an 'all' pattern, just True.
+        self.assertEqual(im.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        # OPT: We should probably return 'all' for these
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeIncludfe(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertFalse(im.visitdir(b'dir/subdir'))
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetIncludeInclude(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # FIXME: is True correct here?
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertFalse(im.visitdir(b'dir'))
+        self.assertFalse(im.visitdir(b'dir/subdir'))
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # FIXME: is set() correct here?
+        self.assertEqual(im.visitchildrenset(b'.'), set())
+        self.assertEqual(im.visitchildrenset(b'dir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir'), True)
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        # OPT: this should probably be 'all' not True.
+        self.assertEqual(im.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        im = matchmod.intersectmatchers(m1, m2)
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), {b'x'})
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        # OPT: this should probably be 'all' not 'this'.
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # OPT: these next three could probably be False as well.
+        self.assertEqual(im.visitdir(b'.'), True)
+        self.assertEqual(im.visitdir(b'dir'), True)
+        self.assertEqual(im.visitdir(b'dir/subdir'), True)
+        self.assertFalse(im.visitdir(b'dir/foo'))
+        self.assertFalse(im.visitdir(b'folder'))
+        self.assertFalse(im.visitdir(b'dir/subdir/z'))
+        self.assertFalse(im.visitdir(b'dir/subdir/x'))
+
+    def testVisitchildrensetIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        im = matchmod.intersectmatchers(m1, m2)
+        # OPT: these next two could probably be set() as well.
+        self.assertEqual(im.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(im.visitchildrenset(b'dir/subdir'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(im.visitchildrenset(b'folder'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set())
+        self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set())
+
+class UnionMatcherTests(unittest.TestCase):
+
+    def testVisitdirM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM2always(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM1never(self):
+        m1 = matchmod.nevermatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM1never(self):
+        m1 = matchmod.nevermatcher(b'', b'')
+        m2 = matchmod.alwaysmatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+
+    def testVisitchildrensetM2never(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.nevermatcher(b'', b'')
+        um = matchmod.unionmatcher([m1, m2])
+        # um should be equivalent to a alwaysmatcher.
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+
+    def testVisitdirM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), b'all')
+        self.assertEqual(um.visitdir(b'dir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitdir(b'dir/foo'), b'all')
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+
+    def testVisitchildrensetM2SubdirPrefix(self):
+        m1 = matchmod.alwaysmatcher(b'', b'')
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all')
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeIncludfe(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertFalse(um.visitdir(b'folder'))
+        # OPT: These two should probably be 'all' not True.
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), b'this')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), set())
+        # OPT: These next two could be 'all' instead of 'this'.
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertEqual(um.visitdir(b'folder'), b'all')
+        # OPT: These should probably be 'all' not True.
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), True)
+
+    def testVisitchildrensetIncludeInclude2(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        m2 = matchmod.match(b'', b'', include=[b'path:folder'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'folder', b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), b'all')
+        # OPT: These next two could be 'all' instead of 'this'.
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), b'all')
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertFalse(um.visitdir(b'folder'))
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+        # OPT: this should probably be 'all' not True.
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), True)
+
+    def testVisitchildrensetIncludeInclude3(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), set())
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+        # OPT: this should probably be 'all' not 'this'.
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this')
+
+    # We're using includematcher instead of patterns because it behaves slightly
+    # better (giving narrower results) than patternmatcher.
+    def testVisitdirIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        um = matchmod.unionmatcher([m1, m2])
+        # OPT: these next three could probably be False as well.
+        self.assertEqual(um.visitdir(b'.'), True)
+        self.assertEqual(um.visitdir(b'dir'), True)
+        self.assertEqual(um.visitdir(b'dir/subdir'), True)
+        self.assertFalse(um.visitdir(b'dir/foo'))
+        self.assertFalse(um.visitdir(b'folder'))
+        self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all')
+
+    def testVisitchildrensetIncludeInclude4(self):
+        m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x'])
+        m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z'])
+        um = matchmod.unionmatcher([m1, m2])
+        self.assertEqual(um.visitchildrenset(b'.'), {b'dir'})
+        self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'})
+        self.assertEqual(um.visitchildrenset(b'dir/subdir'), {b'x', b'z'})
+        self.assertEqual(um.visitchildrenset(b'dir/foo'), set())
+        self.assertEqual(um.visitchildrenset(b'folder'), set())
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all')
+        self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all')
+
+class SubdirMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        sm = matchmod.subdirmatcher(b'dir', m)
+
+        self.assertEqual(sm.visitdir(b'.'), True)
+        self.assertEqual(sm.visitdir(b'subdir'), b'all')
+        # OPT: These next two should probably be 'all' not True.
+        self.assertEqual(sm.visitdir(b'subdir/x'), True)
+        self.assertEqual(sm.visitdir(b'subdir/z'), True)
+        self.assertFalse(sm.visitdir(b'foo'))
+
+    def testVisitchildrenset(self):
+        m = matchmod.match(b'', b'', include=[b'path:dir/subdir'])
+        sm = matchmod.subdirmatcher(b'dir', m)
+
+        self.assertEqual(sm.visitchildrenset(b'.'), {b'subdir'})
+        self.assertEqual(sm.visitchildrenset(b'subdir'), b'all')
+        # OPT: These next two should probably be 'all' not 'this'.
+        self.assertEqual(sm.visitchildrenset(b'subdir/x'), b'this')
+        self.assertEqual(sm.visitchildrenset(b'subdir/z'), b'this')
+        self.assertEqual(sm.visitchildrenset(b'foo'), set())
+
+class PrefixdirMatcherTests(unittest.TestCase):
+
+    def testVisitdir(self):
+        m = matchmod.match(util.localpath(b'root/d'), b'e/f',
+                [b'../a.txt', b'b.txt'])
+        pm = matchmod.prefixdirmatcher(b'root', b'd/e/f', b'd', m)
+
+        # `m` elides 'd' because it's part of the root, and the rest of the
+        # patterns are relative.
+        self.assertEqual(bool(m(b'a.txt')), False)
+        self.assertEqual(bool(m(b'b.txt')), False)
+        self.assertEqual(bool(m(b'e/a.txt')), True)
+        self.assertEqual(bool(m(b'e/b.txt')), False)
+        self.assertEqual(bool(m(b'e/f/b.txt')), True)
+
+        # The prefix matcher re-adds 'd' to the paths, so they need to be
+        # specified when using the prefixdirmatcher.
+        self.assertEqual(bool(pm(b'a.txt')), False)
+        self.assertEqual(bool(pm(b'b.txt')), False)
+        self.assertEqual(bool(pm(b'd/e/a.txt')), True)
+        self.assertEqual(bool(pm(b'd/e/b.txt')), False)
+        self.assertEqual(bool(pm(b'd/e/f/b.txt')), True)
+
+        self.assertEqual(m.visitdir(b'.'), True)
+        self.assertEqual(m.visitdir(b'e'), True)
+        self.assertEqual(m.visitdir(b'e/f'), True)
+        self.assertEqual(m.visitdir(b'e/f/g'), False)
+
+        self.assertEqual(pm.visitdir(b'.'), True)
+        self.assertEqual(pm.visitdir(b'd'), True)
+        self.assertEqual(pm.visitdir(b'd/e'), True)
+        self.assertEqual(pm.visitdir(b'd/e/f'), True)
+        self.assertEqual(pm.visitdir(b'd/e/f/g'), False)
+
+    def testVisitchildrenset(self):
+        m = matchmod.match(util.localpath(b'root/d'), b'e/f',
+                [b'../a.txt', b'b.txt'])
+        pm = matchmod.prefixdirmatcher(b'root', b'd/e/f', b'd', m)
+
+        # OPT: visitchildrenset could possibly return {'e'} and {'f'} for these
+        # next two, respectively; patternmatcher does not have this
+        # optimization.
+        self.assertEqual(m.visitchildrenset(b'.'), b'this')
+        self.assertEqual(m.visitchildrenset(b'e'), b'this')
+        self.assertEqual(m.visitchildrenset(b'e/f'), b'this')
+        self.assertEqual(m.visitchildrenset(b'e/f/g'), set())
+
+        # OPT: visitchildrenset could possibly return {'d'}, {'e'}, and {'f'}
+        # for these next three, respectively; patternmatcher does not have this
+        # optimization.
+        self.assertEqual(pm.visitchildrenset(b'.'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd/e'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd/e/f'), b'this')
+        self.assertEqual(pm.visitchildrenset(b'd/e/f/g'), set())
 
 if __name__ == '__main__':
     silenttestrunner.main(__name__)
--- a/tests/test-merge-changedelete.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-merge-changedelete.t	Mon Oct 22 14:46:06 2018 -0400
@@ -54,9 +54,11 @@
 Non-interactive merge:
 
   $ hg merge -y
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging file3
   warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -121,9 +123,11 @@
   > c
   > d
   > EOF
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? c
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? d
   merging file3
   warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -189,18 +193,23 @@
   > baz
   > c
   > EOF
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? foo
   unrecognized response
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? bar
   unrecognized response
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? d
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? baz
   unrecognized response
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
   merging file3
   warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -262,9 +271,11 @@
   $ hg merge --config ui.interactive=true <<EOF
   > d
   > EOF
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? d
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   merging file3
   warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -473,9 +484,11 @@
   1 other heads for branch "default"
 
   $ hg merge --config ui.interactive=True --tool :prompt
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for file3? 
   0 files updated, 0 files merged, 0 files removed, 3 files unresolved
@@ -532,9 +545,11 @@
   1 other heads for branch "default"
 
   $ hg merge --tool :prompt
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for file3? u
   0 files updated, 0 files merged, 0 files removed, 3 files unresolved
@@ -589,9 +604,11 @@
   1 other heads for branch "default"
 
   $ hg merge --tool :merge3
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging file3
   warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -679,9 +696,11 @@
   (status identical)
   
   === :other -> :prompt ===
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for file3? 
   --- diff of status ---
@@ -707,9 +726,11 @@
   (status identical)
   
   === :local -> :prompt ===
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for file3? 
   --- diff of status ---
@@ -725,9 +746,11 @@
   (status identical)
   
   === :fail -> :prompt ===
-  local [working copy] changed file1 which other [merge rev] deleted
+  file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [merge rev] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for file3? 
   --- diff of status ---
@@ -751,9 +774,11 @@
   $ echo changed >> file1
   $ hg rm file2
   $ hg update 1 -y
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   1 files updated, 0 files merged, 0 files removed, 2 files unresolved
   use 'hg resolve' to retry unresolved file merges
@@ -927,9 +952,11 @@
   $ echo changed >> file1
   $ hg rm file2
   $ hg update 1 --config ui.interactive=True --tool :prompt
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   1 files updated, 0 files merged, 0 files removed, 2 files unresolved
   use 'hg resolve' to retry unresolved file merges
@@ -977,9 +1004,11 @@
   $ echo changed >> file1
   $ hg rm file2
   $ hg update 1 --tool :merge3
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   1 files updated, 0 files merged, 0 files removed, 2 files unresolved
   use 'hg resolve' to retry unresolved file merges
@@ -1033,9 +1062,11 @@
   (status identical)
   
   === :other -> :prompt ===
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   --- diff of status ---
   (status identical)
@@ -1060,9 +1091,11 @@
   (status identical)
   
   === :local -> :prompt ===
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   --- diff of status ---
   (status identical)
@@ -1077,9 +1110,11 @@
   (status identical)
   
   === :fail -> :prompt ===
-  local [working copy] changed file1 which other [destination] deleted
+  file 'file1' was deleted in other [destination] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? 
-  other [destination] changed file2 which local [working copy] deleted
+  file 'file2' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? 
   --- diff of status ---
   (status identical)
--- a/tests/test-merge-default.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-merge-default.t	Mon Oct 22 14:46:06 2018 -0400
@@ -55,9 +55,9 @@
     "bookmarks": [],
     "branch": "default",
     "dirty": "+",
-    "id": "f25cbe84d8b3+2d95304fed5d+",
+    "id": "f25cbe84d8b320e298e7703f18a25a3959518c23+2d95304fed5d89bc9d70b2a0d02f0d567469c3ab+",
     "node": "ffffffffffffffffffffffffffffffffffffffff",
-    "parents": [{"node": "f25cbe84d8b320e298e7703f18a25a3959518c23", "rev": 4}, {"node": "2d95304fed5d89bc9d70b2a0d02f0d567469c3ab", "rev": 2}],
+    "parents": ["f25cbe84d8b320e298e7703f18a25a3959518c23", "2d95304fed5d89bc9d70b2a0d02f0d567469c3ab"],
     "tags": ["tip"]
    }
   ]
@@ -82,7 +82,7 @@
    {
     "bookmarks": [],
     "branch": "default",
-    "id": "1846eede8b68",
+    "id": "1846eede8b6886d8cc8a88c96a687b7fe8f3b9d1",
     "node": "1846eede8b6886d8cc8a88c96a687b7fe8f3b9d1",
     "tags": []
    }
--- a/tests/test-merge-force.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-merge-force.t	Mon Oct 22 14:46:06 2018 -0400
@@ -10,26 +10,26 @@
 
 Create base changeset
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 3 1
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 3 1
   $ hg addremove -q --similarity 0
   $ hg commit -qm 'base'
 
 Create remote changeset
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 3 2
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 3 2
   $ hg addremove -q --similarity 0
   $ hg commit -qm 'remote'
 
 Create local changeset
 
   $ hg update -q 0
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 3 3
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 3 3
   $ hg addremove -q --similarity 0
   $ hg commit -qm 'local'
 
 Set up working directory
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 3 wc
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 3 wc
   $ hg addremove -q --similarity 0
   $ hg forget *_*_*_*-untracked
   $ rm *_*_*_missing-*
@@ -142,55 +142,80 @@
 #   in the same way, so it could potentially be left alone
 
   $ hg merge -f --tool internal:merge3 'desc("remote")' 2>&1 | tee $TESTTMP/merge-output-1
-  local [working copy] changed content1_missing_content1_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_content1_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_content3_content3-tracked which other [merge rev] deleted
+  file 'content1_missing_content3_content3-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_content3_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_content3_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_missing_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_missing_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content1_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content1_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content2_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content2_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_content3-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content3-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content3_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content3_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_content1-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_content2-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_content4-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_missing-tracked which local [working copy] deleted
+  file 'content1_content2_missing_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_missing-untracked which local [working copy] deleted
+  file 'content1_content2_missing_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content1_content4-tracked
   merging content1_content2_content2_content1-tracked
@@ -286,7 +311,7 @@
 the remote side did not touch the file
 
   $ checkstatus() {
-  >   for f in `$PYTHON $TESTDIR/generate-working-copy-states.py filelist 3`
+  >   for f in `"$PYTHON" $TESTDIR/generate-working-copy-states.py filelist 3`
   >   do
   >     echo
   >     hg status -A $f
@@ -667,7 +692,7 @@
   missing_missing_missing_missing-untracked: * (glob)
   <missing>
 
-  $ for f in `$PYTHON $TESTDIR/generate-working-copy-states.py filelist 3`
+  $ for f in `"$PYTHON" $TESTDIR/generate-working-copy-states.py filelist 3`
   > do
   >   if test -f ${f}.orig
   >   then
@@ -703,63 +728,88 @@
   (no more unresolved files)
   $ hg resolve --unmark --all
   $ hg resolve --all --tool internal:merge3
-  other [merge rev] changed content1_content2_content1_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content1_content4-tracked
-  other [merge rev] changed content1_content2_content1_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content1_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content1_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content1_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content1_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content2_content1-tracked
-  other [merge rev] changed content1_content2_content2_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content2_content4-tracked
-  other [merge rev] changed content1_content2_content2_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content2_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content2_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content2_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content2_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content3_content1-tracked
-  other [merge rev] changed content1_content2_content3_content1-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_content2-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content3_content3-tracked
-  other [merge rev] changed content1_content2_content3_content3-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content3-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_content3_content4-tracked
-  other [merge rev] changed content1_content2_content3_content4-untracked which local [working copy] deleted
+  file 'content1_content2_content3_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_missing-tracked which local [working copy] deleted
+  file 'content1_content2_content3_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_content3_missing-untracked which local [working copy] deleted
+  file 'content1_content2_content3_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_missing_content1-tracked
-  other [merge rev] changed content1_content2_missing_content1-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content1-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_content2-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content2-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging content1_content2_missing_content4-tracked
-  other [merge rev] changed content1_content2_missing_content4-untracked which local [working copy] deleted
+  file 'content1_content2_missing_content4-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_missing-tracked which local [working copy] deleted
+  file 'content1_content2_missing_missing-tracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  other [merge rev] changed content1_content2_missing_missing-untracked which local [working copy] deleted
+  file 'content1_content2_missing_missing-untracked' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_content1_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_content1_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_content3_content3-tracked which other [merge rev] deleted
+  file 'content1_missing_content3_content3-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_content3_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_content3_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
-  local [working copy] changed content1_missing_missing_content4-tracked which other [merge rev] deleted
+  file 'content1_missing_missing_content4-tracked' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
   merging missing_content2_content2_content4-tracked
   merging missing_content2_content3_content3-tracked
@@ -784,7 +834,7 @@
 
   $ hg -q update --clean 2
   $ hg --config extensions.purge= purge
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 3 wc
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 3 wc
   $ hg addremove -q --similarity 0
   $ hg forget *_*_*_*-untracked
   $ rm *_*_*_missing-*
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-merge-no-file-change.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,379 @@
+  $ cat <<'EOF' >> "$HGRCPATH"
+  > [extensions]
+  > convert =
+  > [templates]
+  > l = '{rev}:{node|short} p={p1rev},{p2rev} m={manifest} f={files|json}'
+  > EOF
+
+  $ check_convert_identity () {
+  >     hg convert -q "$1" "$1.converted"
+  >     hg outgoing -q -R "$1.converted" "$1"
+  >     if [ "$?" != 1 ]; then
+  >         echo '*** BUG: hash changes on convert ***'
+  >         hg log -R "$1.converted" -GTl
+  >     fi
+  > }
+
+Files added at both parents:
+
+  $ hg init added-both
+  $ cd added-both
+  $ touch a b c
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ hg ci -qAm2 c
+
+  $ hg merge
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  not reusing manifest (no file change in changelog, but manifest differs)
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:7aa8a293f5d97377037afc21e871e036e718d659
+  $ hg log -GTl
+  @    3:7aa8a293f5d9 p=2,1 m=3:8667461869a1 f=[]
+  |\
+  | o  2:e0ea47086fce p=0,-1 m=2:b2e5b07f9374 f=["c"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-both
+
+Files added at both parents, but the other removed at the merge:
+(In this case, ctx.files() after the commit contains the removed file "b", but
+its manifest does not differ from p1.)
+
+  $ hg init added-both-removed-at-merge
+  $ cd added-both-removed-at-merge
+  $ touch a b c
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ hg ci -qAm2 c
+
+  $ hg merge
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg rm -f b
+  $ hg ci --debug -m merge
+  committing files:
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:915745f3ca3d9d699925269474c2d0a9526e8dfa
+  $ hg log -GTl
+  @    3:915745f3ca3d p=2,1 m=3:8e9cf3456921 f=["b"]
+  |\
+  | o  2:e0ea47086fce p=0,-1 m=2:b2e5b07f9374 f=["c"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-both
+
+An identical file added at both parents:
+
+  $ hg init added-identical
+  $ cd added-identical
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b
+  $ hg ci -qAm2 b
+
+  $ hg merge
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:de26182cd210f0c3fb175ca7616704ab963d3024
+  $ hg log -GTl
+  @    3:de26182cd210 p=2,1 m=1:686dbf0aeca4 f=[]
+  |\
+  | o  2:f00991f11eca p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-identical
+
+#if execbit
+
+An identical file added at both parents, but the flag differs. Take local:
+
+  $ hg init flag-change-take-p1
+  $ cd flag-change-take-p1
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b
+  $ chmod +x b
+  $ hg ci -qAm2 b
+
+  $ hg merge
+  warning: cannot merge flags for b without common ancestor - keeping local flags
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ chmod +x b
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  reusing manifest form p1 (listed files actually unchanged)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:c8d50407916ef8a5a97cb6e36ca9bc844a6ee13e
+  $ hg log -GTl
+  @    3:c8d50407916e p=2,1 m=2:36b69ba4b24b f=[]
+  |\
+  | o  2:99451f16b3f5 p=0,-1 m=2:36b69ba4b24b f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+  $ hg files -vr3
+           0   a
+           0 x b
+
+  $ cd ..
+  $ check_convert_identity flag-change-take-p1
+
+An identical file added at both parents, but the flag differs. Take other:
+
+  $ hg init flag-change-take-p2
+  $ cd flag-change-take-p2
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b
+  $ chmod +x b
+  $ hg ci -qAm2 b
+
+  $ hg merge
+  warning: cannot merge flags for b without common ancestor - keeping local flags
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ chmod -x b
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:06a62a687d87c7d8944743dee1ee9d8c66b3f6e3
+  $ hg log -GTl
+  @    3:06a62a687d87 p=2,1 m=3:2a315ba1aa45 f=["b"]
+  |\
+  | o  2:99451f16b3f5 p=0,-1 m=2:36b69ba4b24b f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+  $ hg files -vr3
+           0   a
+           0   b
+
+  $ cd ..
+  $ check_convert_identity flag-change-take-p2
+
+#endif
+
+An identical file added at both parents, one more file added at p2:
+
+  $ hg init added-some-p2
+  $ cd added-some-p2
+  $ touch a b c
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg ci -qAm2 c
+  $ hg up -q 0
+  $ touch b
+  $ hg ci -qAm3 b
+
+  $ hg merge
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  committing files:
+  c
+  not reusing manifest (no file change in changelog, but manifest differs)
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 4:f7fbc4e4d9a8fde03ba475adad675578c8bf472d
+  $ hg log -GTl
+  @    4:f7fbc4e4d9a8 p=3,2 m=3:92acd5bfd716 f=[]
+  |\
+  | o  3:e9d9f3cc981f p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  | |
+  o |  2:93c5529a4ec7 p=1,-1 m=2:ae25a31b30b3 f=["c"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-some-p2
+
+An identical file added at both parents, one more file added at p1:
+(In this case, p1 manifest is reused at the merge commit, which means the
+manifest DAG does not have the same shape as the changelog.)
+
+  $ hg init added-some-p1
+  $ cd added-some-p1
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ touch b c
+  $ hg ci -qAm2 b
+  $ hg ci -qAm3 c
+
+  $ hg merge
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 4:a9f0f589a913f5a149dc10dfbd5af726977c36c4
+  $ hg log -GTl
+  @    4:a9f0f589a913 p=3,1 m=2:ae25a31b30b3 f=[]
+  |\
+  | o  3:b8dc385241b5 p=2,-1 m=2:ae25a31b30b3 f=["c"]
+  | |
+  | o  2:f00991f11eca p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"]
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"]
+  
+
+  $ cd ..
+  $ check_convert_identity added-some-p1
+
+A file added at p2, a named branch created at p1:
+
+  $ hg init named-branch-p1
+  $ cd named-branch-p1
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg ci -qAm1 b
+  $ hg up -q 0
+  $ hg branch -q foo
+  $ hg ci -m2
+
+  $ hg merge default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  committing files:
+  b
+  not reusing manifest (no file change in changelog, but manifest differs)
+  committing manifest
+  committing changelog
+  updating the branch cache
+  committed changeset 3:fb97d83b02fd072295cfc2171f21b7d38509bfd7
+  $ hg log -GT'{l} branch={branch}'
+  @    3:fb97d83b02fd p=2,1 m=2:9091c64f4ea1 f=[] branch=foo
+  |\
+  | o  2:a3a9fa6587e5 p=0,-1 m=0:8515d4bfda76 f=[] branch=foo
+  | |
+  o |  1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] branch=default
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default
+  
+
+  $ cd ..
+  $ check_convert_identity named-branch-p1
+
+A file added at p1, a named branch created at p2:
+(In this case, p1 manifest is reused at the merge commit, which means the
+manifest DAG does not have the same shape as the changelog.)
+
+  $ hg init named-branch-p2
+  $ cd named-branch-p2
+  $ touch a b
+  $ hg ci -qAm0 a
+  $ hg branch -q foo
+  $ hg ci -m1
+  $ hg up -q 0
+  $ hg ci -qAm1 b
+
+  $ hg merge foo
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:036823e24692218324d4af43b07ff89f8a000096
+  $ hg log -GT'{l} branch={branch}'
+  @    3:036823e24692 p=2,1 m=1:686dbf0aeca4 f=[] branch=default
+  |\
+  | o  2:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] branch=default
+  | |
+  o |  1:da38c8e00727 p=0,-1 m=0:8515d4bfda76 f=[] branch=foo
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default
+  
+
+  $ cd ..
+  $ check_convert_identity named-branch-p2
+
+A file changed once at both parents, but amended to have identical content:
+
+  $ hg init amend-p1
+  $ cd amend-p1
+  $ touch a
+  $ hg ci -qAm0 a
+  $ echo foo > a
+  $ hg ci -m1
+  $ hg up -q 0
+  $ echo bar > a
+  $ hg ci -qm2
+  $ echo foo > a
+  $ hg ci -qm3 --amend
+
+  $ hg merge
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ hg ci --debug -m merge
+  reusing manifest from p1 (no file change)
+  committing changelog
+  updating the branch cache
+  committed changeset 3:314e5bc5adf5c58ea571efabe33eedba20a201aa
+  $ hg log -GT'{l} branch={branch}'
+  @    3:314e5bc5adf5 p=2,1 m=1:d33ea248bd73 f=[] branch=default
+  |\
+  | o  2:de9c64f226a3 p=0,-1 m=1:d33ea248bd73 f=["a"] branch=default
+  | |
+  o |  1:6a74aec01b3c p=0,-1 m=1:d33ea248bd73 f=["a"] branch=default
+  |/
+  o  0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default
+  
+
+  $ cd ..
+  $ check_convert_identity amend-p1
--- a/tests/test-merge-remove.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-merge-remove.t	Mon Oct 22 14:46:06 2018 -0400
@@ -20,7 +20,7 @@
   1 files updated, 1 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
 
-  $ hg debugstate --nodates
+  $ hg debugstate --no-dates
   m   0         -2 unset               bar
   m   0         -2 unset               foo1
   copy: foo -> foo1
@@ -36,7 +36,7 @@
   $ cp bar B
   $ hg rm -f foo1 bar
 
-  $ hg debugstate --nodates
+  $ hg debugstate --no-dates
   r   0         -1 set                 bar
   r   0         -1 set                 foo1
   copy: foo -> foo1
@@ -54,7 +54,7 @@
   adding bar
   adding foo1
 
-  $ hg debugstate --nodates
+  $ hg debugstate --no-dates
   n   0         -2 unset               bar
   n   0         -2 unset               foo1
   copy: foo -> foo1
@@ -69,11 +69,11 @@
 
   $ hg revert -vr . foo1 bar
   saving current version of bar as bar.orig
+  saving current version of foo1 as foo1.orig
   reverting bar
-  saving current version of foo1 as foo1.orig
   reverting foo1
 
-  $ hg debugstate --nodates
+  $ hg debugstate --no-dates
   n   0         -2 unset               bar
   n   0         -2 unset               foo1
   copy: foo -> foo1
@@ -102,7 +102,8 @@
 Those who use force will lose
 
   $ hg merge -f
-  other [merge rev] changed bar which local [working copy] deleted
+  file 'bar' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
   merging foo1 and foo to foo1
   0 files updated, 1 files merged, 0 files removed, 1 files unresolved
--- a/tests/test-merge-subrepos.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-merge-subrepos.t	Mon Oct 22 14:46:06 2018 -0400
@@ -110,7 +110,8 @@
   $ hg up -r '.^' --config ui.interactive=True << EOF
   > d
   > EOF
-  other [destination] changed b which local [working copy] deleted
+  file 'b' was deleted in local [working copy] but was modified in other [destination].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? d
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
--- a/tests/test-merge-symlinks.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-merge-symlinks.t	Mon Oct 22 14:46:06 2018 -0400
@@ -38,7 +38,7 @@
 Merge them and display *_ISLINK vars
 merge heads
 
-  $ hg merge --tool="$PYTHON ../echo.py"
+  $ hg merge --tool="\"$PYTHON\" ../echo.py"
   merging l
   HG_FILE l
   HG_MY_ISLINK 1
@@ -54,7 +54,7 @@
   $ hg up -C 2
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg copy l l2
-  $ HGMERGE="$PYTHON ../echo.py" hg up 3
+  $ HGMERGE="\"$PYTHON\" ../echo.py" hg up 3
   merging l2
   HG_FILE l2
   HG_MY_ISLINK 1
--- a/tests/test-merge-tools.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-merge-tools.t	Mon Oct 22 14:46:06 2018 -0400
@@ -68,7 +68,7 @@
 override $PATH to ensure hgmerge not visible; use $PYTHON in case we're
 running from a devel copy, not a temp installation
 
-  $ PATH="$BINDIR:/usr/sbin" $PYTHON "$BINDIR"/hg merge -r 2
+  $ PATH="$BINDIR:/usr/sbin" "$PYTHON" "$BINDIR"/hg merge -r 2
   merging f
   warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
   0 files updated, 0 files merged, 0 files removed, 1 files unresolved
@@ -117,7 +117,7 @@
 
   $ echo "echo fail" > false
   $ hg up -qC 1
-  $ PATH="`pwd`:$BINDIR:/usr/sbin" $PYTHON "$BINDIR"/hg merge -r 2
+  $ PATH="`pwd`:$BINDIR:/usr/sbin" "$PYTHON" "$BINDIR"/hg merge -r 2
   merging f
   warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
   0 files updated, 0 files merged, 0 files removed, 1 files unresolved
@@ -131,7 +131,7 @@
 
   $ mkdir false
   $ hg up -qC 1
-  $ PATH="`pwd`:$BINDIR:/usr/sbin" $PYTHON "$BINDIR"/hg merge -r 2
+  $ PATH="`pwd`:$BINDIR:/usr/sbin" "$PYTHON" "$BINDIR"/hg merge -r 2
   merging f
   warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
   0 files updated, 0 files merged, 0 files removed, 1 files unresolved
@@ -1701,6 +1701,35 @@
   0 files updated, 1 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   $ hg update -C 1 > /dev/null
+
+#else
+
+Match the non-portable filename commits above for test stability
+
+  $ hg import --bypass -q - << EOF
+  > # HG changeset patch
+  > revision 5
+  > 
+  > diff --git a/"; exit 1; echo " b/"; exit 1; echo "
+  > new file mode 100644
+  > --- /dev/null
+  > +++ b/"; exit 1; echo "
+  > @@ -0,0 +1,1 @@
+  > +revision 5
+  > EOF
+
+  $ hg import --bypass -q - << EOF
+  > # HG changeset patch
+  > revision 6
+  > 
+  > diff --git a/"; exit 1; echo " b/"; exit 1; echo "
+  > new file mode 100644
+  > --- /dev/null
+  > +++ b/"; exit 1; echo "
+  > @@ -0,0 +1,1 @@
+  > +revision 6
+  > EOF
+
 #endif
 
 Merge post-processing
@@ -1737,14 +1766,64 @@
   # hg resolve --list
   U f
 
-#if symlink
+missingbinary is a merge-tool that doesn't exist:
+
+  $ echo "missingbinary.executable=doesnotexist" >> .hg/hgrc
+  $ beforemerge
+  [merge-tools]
+  false.whatever=
+  true.priority=1
+  true.executable=cat
+  missingbinary.executable=doesnotexist
+  # hg update -C 1
+  $ hg merge -y -r 2 --config ui.merge=missingbinary
+  couldn't find merge tool missingbinary (for pattern f)
+  merging f
+  couldn't find merge tool missingbinary (for pattern f)
+  revision 1
+  space
+  revision 0
+  space
+  revision 2
+  space
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+  $ hg update -q -C 1
+  $ rm f
 
 internal merge cannot handle symlinks and shouldn't try:
 
-  $ hg update -q -C 1
-  $ rm f
+#if symlink
+
   $ ln -s symlink f
   $ hg commit -qm 'f is symlink'
+
+#else
+
+  $ hg import --bypass -q - << EOF
+  > # HG changeset patch
+  > f is symlink
+  > 
+  > diff --git a/f b/f
+  > old mode 100644
+  > new mode 120000
+  > --- a/f
+  > +++ b/f
+  > @@ -1,2 +1,1 @@
+  > -revision 1
+  > -space
+  > +symlink
+  > \ No newline at end of file
+  > EOF
+
+Resolve 'other [destination] changed f which local [working copy] deleted' prompt
+  $ hg up -q -C --config ui.interactive=True << EOF
+  > c
+  > EOF
+
+#endif
+
   $ hg merge -r 2 --tool internal:merge
   merging f
   warning: internal :merge cannot merge symlinks for f
@@ -1753,8 +1832,6 @@
   use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
   [1]
 
-#endif
-
 Verify naming of temporary files and that extension is preserved:
 
   $ hg update -q -C 1
@@ -1782,6 +1859,89 @@
   0 files updated, 1 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
 
+Binary files capability checking
+
+  $ hg update -q -C 0
+  $ python <<EOF
+  > with open('b', 'wb') as fp:
+  >     fp.write(b'\x00\x01\x02\x03')
+  > EOF
+  $ hg add b
+  $ hg commit -qm "add binary file (#1)"
+
+  $ hg update -q -C 0
+  $ python <<EOF
+  > with open('b', 'wb') as fp:
+  >     fp.write(b'\x03\x02\x01\x00')
+  > EOF
+  $ hg add b
+  $ hg commit -qm "add binary file (#2)"
+
+By default, binary files capability of internal merge tools is not
+checked strictly.
+
+(for merge-patterns, chosen unintentionally)
+
+  $ hg merge 9 \
+  > --config merge-patterns.b=:merge-other \
+  > --config merge-patterns.re:[a-z]=:other
+  warning: check merge-patterns configurations, if ':merge-other' for binary file 'b' is unintentional
+  (see 'hg help merge-tools' for binary files capability)
+  merging b
+  warning: b looks like a binary file.
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+  $ hg merge --abort -q
+
+(for ui.merge, ignored unintentionally)
+
+  $ hg merge 9 \
+  > --config merge-tools.:other.binary=true \
+  > --config ui.merge=:other
+  tool :other (for pattern b) can't handle binary
+  tool true can't handle binary
+  tool :other can't handle binary
+  tool false can't handle binary
+  no tool found to merge b
+  keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for b? u
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+  [1]
+  $ hg merge --abort -q
+
+With merge.strict-capability-check=true, binary files capability of
+internal merge tools is checked strictly.
+
+  $ f --hexdump b
+  b:
+  0000: 03 02 01 00                                     |....|
+
+(for merge-patterns)
+
+  $ hg merge 9 --config merge.strict-capability-check=true \
+  > --config merge-tools.:merge-other.binary=true \
+  > --config merge-patterns.b=:merge-other \
+  > --config merge-patterns.re:[a-z]=:other
+  tool :merge-other (for pattern b) can't handle binary
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ f --hexdump b
+  b:
+  0000: 00 01 02 03                                     |....|
+  $ hg merge --abort -q
+
+(for ui.merge)
+
+  $ hg merge 9 --config merge.strict-capability-check=true \
+  > --config ui.merge=:other
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ f --hexdump b
+  b:
+  0000: 00 01 02 03                                     |....|
+  $ hg merge --abort -q
+
 Check that debugpicktool examines which merge tool is chosen for
 specified file as expected
 
@@ -1790,6 +1950,7 @@
   false.whatever=
   true.priority=1
   true.executable=cat
+  missingbinary.executable=doesnotexist
   # hg update -C 1
 
 (default behavior: checking files in the working parent context)
@@ -1812,9 +1973,9 @@
 
 (-r REV causes checking files in specified revision)
 
-  $ hg manifest -r tip
+  $ hg manifest -r 8
   f.txt
-  $ hg debugpickmergetool -r tip
+  $ hg debugpickmergetool -r 8
   f.txt = true
 
 #if symlink
@@ -1824,6 +1985,45 @@
   $ hg debugpickmergetool -r 6d00b3726f6e
   f = :prompt
 
+(by default, it is assumed that no internal merge tools has symlinks
+capability)
+
+  $ hg debugpickmergetool \
+  > -r 6d00b3726f6e \
+  > --config merge-tools.:merge-other.symlink=true \
+  > --config merge-patterns.f=:merge-other \
+  > --config merge-patterns.re:[f]=:merge-local \
+  > --config merge-patterns.re:[a-z]=:other
+  f = :prompt
+
+  $ hg debugpickmergetool \
+  > -r 6d00b3726f6e \
+  > --config merge-tools.:other.symlink=true \
+  > --config ui.merge=:other
+  f = :prompt
+
+(with strict-capability-check=true, actual symlink capabilities are
+checked striclty)
+
+  $ hg debugpickmergetool --config merge.strict-capability-check=true \
+  > -r 6d00b3726f6e \
+  > --config merge-tools.:merge-other.symlink=true \
+  > --config merge-patterns.f=:merge-other \
+  > --config merge-patterns.re:[f]=:merge-local \
+  > --config merge-patterns.re:[a-z]=:other
+  f = :other
+
+  $ hg debugpickmergetool --config merge.strict-capability-check=true \
+  > -r 6d00b3726f6e \
+  > --config ui.merge=:other
+  f = :other
+
+  $ hg debugpickmergetool --config merge.strict-capability-check=true \
+  > -r 6d00b3726f6e \
+  > --config merge-tools.:merge-other.symlink=true \
+  > --config ui.merge=:merge-other
+  f = :prompt
+
 #endif
 
 (--verbose shows some configurations)
--- a/tests/test-merge1.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-merge1.t	Mon Oct 22 14:46:06 2018 -0400
@@ -30,7 +30,8 @@
 
   $ mkdir b && touch b/nonempty
   $ hg up
-  abort: Directory not empty: '$TESTTMP/t/b'
+  abort: Unlinking directory not permitted: *$TESTTMP/t/b* (glob) (windows !)
+  abort: Directory not empty: '?\$TESTTMP/t/b'? (re) (no-windows !)
   [255]
   $ hg ci
   abort: last update was interrupted
--- a/tests/test-minifileset.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-minifileset.py	Mon Oct 22 14:46:06 2018 -0400
@@ -1,12 +1,6 @@
 from __future__ import absolute_import
 from __future__ import print_function
 
-import os
-import sys
-
-# make it runnable directly without run-tests.py
-sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
-
 from mercurial import minifileset
 
 def check(text, truecases, falsecases):
--- a/tests/test-minirst.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-minirst.py	Mon Oct 22 14:46:06 2018 -0400
@@ -7,6 +7,7 @@
 )
 
 def debugformat(text, form, **kwargs):
+    blocks, pruned = minirst.parse(text, **kwargs)
     if form == b'html':
         print("html format:")
         out = minirst.format(text, style=form, **kwargs)
@@ -15,12 +16,10 @@
         out = minirst.format(text, width=form, **kwargs)
 
     print("-" * 70)
-    if type(out) == tuple:
-        print(out[0][:-1].decode('utf8'))
+    print(out[:-1].decode('utf8'))
+    if kwargs.get('keep'):
         print("-" * 70)
-        print(stringutil.pprint(out[1]).decode('utf8'))
-    else:
-        print(out[:-1].decode('utf8'))
+        print(stringutil.pprint(pruned).decode('utf8'))
     print("-" * 70)
     print()
 
--- a/tests/test-mq-eol.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-mq-eol.t	Mon Oct 22 14:46:06 2018 -0400
@@ -44,11 +44,11 @@
 
 Test different --eol values
 
-  $ $PYTHON -c 'open("a", "wb").write(b"a\nb\nc\nd\ne")'
+  $ "$PYTHON" -c 'open("a", "wb").write(b"a\nb\nc\nd\ne")'
   $ hg ci -Am adda
   adding .hgignore
   adding a
-  $ $PYTHON ../makepatch.py
+  $ "$PYTHON" ../makepatch.py
   $ hg qimport eol.diff
   adding eol.diff to series file
 
@@ -85,7 +85,7 @@
   applying eol.diff
   now at: eol.diff
   $ hg qrefresh
-  $ $PYTHON ../cateol.py .hg/patches/eol.diff
+  $ "$PYTHON" ../cateol.py .hg/patches/eol.diff
   # HG changeset patch<LF>
   # Parent  0d0bf99a8b7a3842c6f8ef09e34f69156c4bd9d0<LF>
   test message<LF>
@@ -106,7 +106,7 @@
   +d<CR><LF>
   +z<LF>
   \ No newline at end of file<LF>
-  $ $PYTHON ../cateol.py a
+  $ "$PYTHON" ../cateol.py a
   a<CR><LF>
   y<CR><LF>
   c<CR><LF>
@@ -121,7 +121,7 @@
   $ hg --config patch.eol='CRLF' qpush
   applying eol.diff
   now at: eol.diff
-  $ $PYTHON ../cateol.py a
+  $ "$PYTHON" ../cateol.py a
   a<CR><LF>
   y<CR><LF>
   c<CR><LF>
@@ -136,7 +136,7 @@
   $ hg qpush
   applying eol.diff
   now at: eol.diff
-  $ $PYTHON ../cateol.py a
+  $ "$PYTHON" ../cateol.py a
   a<CR><LF>
   y<CR><LF>
   c<CR><LF>
@@ -152,15 +152,15 @@
 
   $ hg init testeol
   $ cd testeol
-  $ $PYTHON -c "open('a', 'wb').write(b'1\r\n2\r\n3\r\n4')"
+  $ "$PYTHON" -c "open('a', 'wb').write(b'1\r\n2\r\n3\r\n4')"
   $ hg ci -Am adda
   adding a
-  $ $PYTHON -c "open('a', 'wb').write(b'1\r\n2\r\n33\r\n4')"
+  $ "$PYTHON" -c "open('a', 'wb').write(b'1\r\n2\r\n33\r\n4')"
   $ hg qnew patch1
   $ hg qpop
   popping patch1
   patch queue now empty
-  $ $PYTHON -c "open('a', 'wb').write(b'1\r\n22\r\n33\r\n4')"
+  $ "$PYTHON" -c "open('a', 'wb').write(b'1\r\n22\r\n33\r\n4')"
   $ hg ci -m changea
 
   $ hg --config 'patch.eol=LF' qpush
--- a/tests/test-mq-missingfiles.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-mq-missingfiles.t	Mon Oct 22 14:46:06 2018 -0400
@@ -23,11 +23,11 @@
 
   $ hg init normal
   $ cd normal
-  $ $PYTHON ../writelines.py b 10 'a\n'
+  $ "$PYTHON" ../writelines.py b 10 'a\n'
   $ hg ci -Am addb
   adding b
   $ echo a > a
-  $ $PYTHON ../writelines.py b 2 'b\n' 10 'a\n' 2 'c\n'
+  $ "$PYTHON" ../writelines.py b 2 'b\n' 10 'a\n' 2 'c\n'
   $ echo c > c
   $ hg add a c
   $ hg qnew -f changeb
@@ -82,7 +82,7 @@
   $ hg up -qC 0
   $ echo a > a
   $ hg mv b bb
-  $ $PYTHON ../writelines.py bb 2 'b\n' 10 'a\n' 2 'c\n'
+  $ "$PYTHON" ../writelines.py bb 2 'b\n' 10 'a\n' 2 'c\n'
   $ echo c > c
   $ hg add a c
   $ hg qnew changebb
@@ -129,11 +129,11 @@
 
   $ hg init git
   $ cd git
-  $ $PYTHON ../writelines.py b 1 '\x00'
+  $ "$PYTHON" ../writelines.py b 1 '\x00'
   $ hg ci -Am addb
   adding b
   $ echo a > a
-  $ $PYTHON ../writelines.py b 1 '\x01' 1 '\x00'
+  $ "$PYTHON" ../writelines.py b 1 '\x01' 1 '\x00'
   $ echo c > c
   $ hg add a c
   $ hg qnew -f changeb
--- a/tests/test-mq-pull-from-bundle.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-mq-pull-from-bundle.t	Mon Oct 22 14:46:06 2018 -0400
@@ -90,7 +90,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 3 changes to 3 files
-  new changesets d7553909353d
+  new changesets d7553909353d (1 drafts)
   merging series
   2 files updated, 1 files merged, 0 files removed, 0 files unresolved
   $ test -f .hg/patches/hg-bundle* && echo 'temp. bundle file remained' || true
@@ -122,7 +122,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 3 changes to 3 files
-  new changesets d7553909353d
+  new changesets d7553909353d (1 drafts)
   merging series
   2 files updated, 1 files merged, 0 files removed, 0 files unresolved
 
--- a/tests/test-mq-qimport.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-mq-qimport.t	Mon Oct 22 14:46:06 2018 -0400
@@ -149,10 +149,10 @@
 
 build diff with CRLF
 
-  $ $PYTHON ../writelines.py b 5 'a\n' 5 'a\r\n'
+  $ "$PYTHON" ../writelines.py b 5 'a\n' 5 'a\r\n'
   $ hg ci -Am addb
   adding b
-  $ $PYTHON ../writelines.py b 2 'a\n' 10 'b\n' 2 'a\r\n'
+  $ "$PYTHON" ../writelines.py b 2 'a\n' 10 'b\n' 2 'a\r\n'
   $ hg diff > b.diff
   $ hg up -C
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-mq-qpush-fail.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-mq-qpush-fail.t	Mon Oct 22 14:46:06 2018 -0400
@@ -31,7 +31,7 @@
   popping patch2
   popping patch1
   patch queue now empty
-  $ $PYTHON -c 'import sys; getattr(sys.stdout, "buffer", sys.stdout).write(b"\xe9\n")' > message
+  $ "$PYTHON" -c 'import sys; getattr(sys.stdout, "buffer", sys.stdout).write(b"\xe9\n")' > message
   $ cat .hg/patches/bad-patch >> message
   $ mv message .hg/patches/bad-patch
   $ cat > $TESTTMP/wrapplayback.py <<EOF
--- a/tests/test-mq-subrepo-svn.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-mq-subrepo-svn.t	Mon Oct 22 14:46:06 2018 -0400
@@ -24,9 +24,9 @@
 
   $ SVNREPOPATH=`pwd`/svn-repo-2499/project
 #if windows
-  $ SVNREPOURL=file:///`$PYTHON -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file:///`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
 #else
-  $ SVNREPOURL=file://`$PYTHON -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file://`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
 #endif
 
   $ mkdir -p svn-project-2499/trunk
--- a/tests/test-mq.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-mq.t	Mon Oct 22 14:46:06 2018 -0400
@@ -73,29 +73,46 @@
   
   list of commands:
   
+  Repository creation:
+  
+   qclone        clone main and patch repository at same time
+  
+  Change creation:
+  
+   qnew          create a new patch
+   qrefresh      update the current patch
+  
+  Change manipulation:
+  
+   qfold         fold the named patches into the current patch
+  
+  Change organization:
+  
    qapplied      print the patches already applied
-   qclone        clone main and patch repository at same time
    qdelete       remove patches from queue
-   qdiff         diff of the current patch and subsequent modifications
    qfinish       move applied patches into repository history
-   qfold         fold the named patches into the current patch
    qgoto         push or pop patches until named patch is at top of stack
    qguard        set or print guards for a patch
    qheader       print the header of the topmost or specified patch
-   qimport       import a patch or existing changeset
-   qnew          create a new patch
    qnext         print the name of the next pushable patch
    qpop          pop the current patch off the stack
    qprev         print the name of the preceding applied patch
    qpush         push the next patch onto the stack
    qqueue        manage multiple patch queues
-   qrefresh      update the current patch
    qrename       rename a patch
    qselect       set or print guarded patches to push
    qseries       print the entire series file
    qtop          print the name of the current patch
    qunapplied    print the patches not yet applied
   
+  File content management:
+  
+   qdiff         diff of the current patch and subsequent modifications
+  
+  Change import/export:
+  
+   qimport       import a patch or existing changeset
+  
   (use 'hg help -v mq' to show built-in aliases and global options)
 
   $ hg init a
@@ -782,7 +799,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 770eb8fce608
+  new changesets 770eb8fce608 (1 drafts)
   (run 'hg update' to get a working copy)
 
 
@@ -1128,9 +1145,9 @@
   > path = sys.argv[1]
   > open(path, 'wb').write(b'BIN\x00ARY')
   > EOF
-  $ $PYTHON writebin.py bucephalus
+  $ "$PYTHON" writebin.py bucephalus
 
-  $ $PYTHON "$TESTDIR/md5sum.py" bucephalus
+  $ "$PYTHON" "$TESTDIR/md5sum.py" bucephalus
   8ba2a2f3e77b55d03051ff9c24ad65e7  bucephalus
   $ hg add bucephalus
   $ hg qnew -f --git addbucephalus
@@ -1149,7 +1166,7 @@
   applying addbucephalus
   now at: addbucephalus
   $ test -f bucephalus
-  $ $PYTHON "$TESTDIR/md5sum.py" bucephalus
+  $ "$PYTHON" "$TESTDIR/md5sum.py" bucephalus
   8ba2a2f3e77b55d03051ff9c24ad65e7  bucephalus
 
 
@@ -1565,7 +1582,7 @@
   > from mercurial.hgweb import wsgicgi
   > import cgitb
   > cgitb.enable()
-  > app = hgweb('.', 'test')
+  > app = hgweb(b'.', b'test')
   > wsgicgi.launch(app)
   > HGWEB
   $ . "$TESTDIR/cgienv"
@@ -1575,7 +1592,7 @@
   $ PATH_INFO=/tags; export PATH_INFO
 #endif
   $ QUERY_STRING='style=raw'
-  $ $PYTHON hgweb.cgi | grep '^tip'
+  $ "$PYTHON" hgweb.cgi | grep '^tip'
   tip	[0-9a-f]{40} (re)
 
   $ cd ..
--- a/tests/test-mv-cp-st-diff.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-mv-cp-st-diff.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1666,4 +1666,19 @@
   @@ -0,0 +1,1 @@
   +change
 
+Check debug output for copy tracing
+
+  $ hg status --copies --rev 'desc(dev)' --rev . --config devel.debug.copies=yes --debug
+  debug.copies: searching copies from a51f36ab1704 to 7935fd48a8f9
+  debug.copies: search mode: forward
+  debug.copies:    looking into rename from a51f36ab1704 to 7935fd48a8f9
+  debug.copies:      search limit: 2
+  debug.copies:      missing file to search: 1
+  debug.copies:        tracing file: renamed
+  debug.copies:          rename of: f
+  debug.copies:          time: * seconds (glob)
+  A renamed
+    f
+  R f
+
   $ cd ..
--- a/tests/test-narrow-clone-no-ellipsis.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-clone-no-ellipsis.t	Mon Oct 22 14:46:06 2018 -0400
@@ -30,10 +30,8 @@
   store
   testonly-simplestore (reposimplestore !)
 
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/src/f10
-  [excludes]
+  $ hg tracked
+  I path:dir/src/f10
   $ hg update
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ find * | sort
@@ -55,11 +53,9 @@
   added 40 changesets with 19 changes to 19 files
   new changesets *:* (glob)
   $ cd narrowdir
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/tests
-  [excludes]
-  path:dir/tests/t19
+  $ hg tracked
+  I path:dir/tests
+  X path:dir/tests/t19
   $ hg update
   19 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ find * | sort
@@ -97,11 +93,9 @@
   added 40 changesets with 20 changes to 20 files
   new changesets *:* (glob)
   $ cd narrowroot
-  $ cat .hg/narrowspec
-  [includes]
-  path:.
-  [excludes]
-  path:dir/tests
+  $ hg tracked
+  I path:.
+  X path:dir/tests
   $ hg update
   20 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ find * | sort
@@ -129,3 +123,39 @@
   dir/src/f9
 
   $ cd ..
+
+Testing the --narrowspec flag to clone
+
+  $ cat >> narrowspecs <<EOF
+  > %include foo
+  > [include]
+  > path:dir/tests/
+  > path:dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  abort: cannot specify other files using '%include' in narrowspec
+  [255]
+
+  $ cat > narrowspecs <<EOF
+  > [include]
+  > path:dir/tests/
+  > path:dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 40 changesets with 21 changes to 21 files
+  new changesets 681085829a73:26ce255d5b5d
+  updating to branch default
+  21 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd specfile
+  $ hg tracked
+  I path:dir/src/f12
+  I path:dir/tests
+  $ cd ..
--- a/tests/test-narrow-clone-non-narrow-server.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-clone-non-narrow-server.t	Mon Oct 22 14:46:06 2018 -0400
@@ -31,14 +31,15 @@
   > print(unquote(list(sys.stdin)[1]))
   > EOF
   $ echo hello | hg -R . serve --stdio | \
-  >   $PYTHON unquote.py | grep narrow
+  >   "$PYTHON" unquote.py | tr ' ' '\n' | grep narrow
   narrow=v0
+  exp-narrow-1
 
   $ cd ..
 
   $ hg clone --narrow --include f1 http://localhost:$HGPORT1/ narrowclone
   requesting all changes
-  abort: server doesn't support narrow clones
+  abort: server does not support narrow clones
   [255]
 
 Make a narrow clone (via HGPORT2), then try to narrow and widen
@@ -59,7 +60,5 @@
   looking for local changes to affected paths
   $ hg tracked --addinclude f1 http://localhost:$HGPORT1/
   comparing with http://localhost:$HGPORT1/
-  searching for changes
-  no changes found
-  abort: server doesn't support narrow clones
+  abort: server does not support narrow clones
   [255]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-clone-stream.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,39 @@
+Tests narrow stream clones
+
+  $ . "$TESTDIR/narrow-library.sh"
+
+Server setup
+
+  $ hg init master
+  $ cd master
+  $ mkdir dir
+  $ mkdir dir/src
+  $ cd dir/src
+  $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done
+
+  $ cd ..
+  $ mkdir tests
+  $ cd tests
+  $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done
+  $ cd ../../..
+
+Trying to stream clone when the server does not support it
+
+  $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10" --stream
+  streaming all changes
+  remote: abort: server does not support narrow stream clones
+  abort: pull failed on remote
+  [255]
+
+Enable stream clone on the server
+
+  $ echo "[server]" >> master/.hg/hgrc
+  $ echo "stream-narrow-clones=True" >> master/.hg/hgrc
+
+Cloning a specific file when stream clone is supported
+
+  $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10" --stream
+  streaming all changes
+  remote: abort: server does not support narrow stream clones
+  abort: pull failed on remote
+  [255]
--- a/tests/test-narrow-clone.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-clone.t	Mon Oct 22 14:46:06 2018 -0400
@@ -16,6 +16,18 @@
   $ for x in `$TESTDIR/seq.py 20`; do echo $x > "t$x"; hg add "t$x"; hg commit -m "Commit test $x"; done
   $ cd ../../..
 
+Only path: and rootfilesin: pattern prefixes are allowed
+
+  $ hg clone --narrow ssh://user@dummy/master badnarrow --noupdate --include 'glob:**'
+  abort: invalid prefix on narrow pattern: glob:**
+  (narrow patterns must begin with one of the following: path:, rootfilesin:)
+  [255]
+
+  $ hg clone --narrow ssh://user@dummy/master badnarrow --noupdate --exclude 'set:ignored'
+  abort: invalid prefix on narrow pattern: set:ignored
+  (narrow patterns must begin with one of the following: path:, rootfilesin:)
+  [255]
+
 narrow clone a file, f10
 
   $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10"
@@ -34,10 +46,8 @@
   store
   testonly-simplestore (reposimplestore !)
 
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/src/f10
-  [excludes]
+  $ hg tracked
+  I path:dir/src/f10
   $ hg tracked
   I path:dir/src/f10
   $ hg update
@@ -51,11 +61,21 @@
 
   $ cd ..
 
+BUG: local-to-local narrow clones should work, but don't.
+
+  $ hg clone --narrow master narrow-via-localpeer --noupdate --include "dir/src/f10"
+  requesting all changes
+  abort: server does not support narrow clones
+  [255]
+  $ hg tracked -R narrow-via-localpeer
+  abort: repository narrow-via-localpeer not found!
+  [255]
+  $ rm -Rf narrow-via-localpeer
+
 narrow clone with a newline should fail
 
   $ hg clone --narrow ssh://user@dummy/master narrow_fail --noupdate --include 'dir/src/f10
   > '
-  requesting all changes
   abort: newlines are not allowed in narrowspec paths
   [255]
 
@@ -69,11 +89,9 @@
   added 21 changesets with 19 changes to 19 files
   new changesets *:* (glob)
   $ cd narrowdir
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir/tests
-  [excludes]
-  path:dir/tests/t19
+  $ hg tracked
+  I path:dir/tests
+  X path:dir/tests/t19
   $ hg tracked
   I path:dir/tests
   X path:dir/tests/t19
@@ -114,11 +132,9 @@
   added 21 changesets with 20 changes to 20 files
   new changesets *:* (glob)
   $ cd narrowroot
-  $ cat .hg/narrowspec
-  [includes]
-  path:.
-  [excludes]
-  path:dir/tests
+  $ hg tracked
+  I path:.
+  X path:dir/tests
   $ hg tracked
   I path:.
   X path:dir/tests
@@ -224,3 +240,52 @@
   dir/tests/t9
 
   $ cd ..
+
+Testing the --narrowspec flag to clone
+
+  $ cat >> narrowspecs <<EOF
+  > %include foo
+  > [include]
+  > path:dir/tests/
+  > path:dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  abort: cannot specify other files using '%include' in narrowspec
+  [255]
+
+  $ cat > narrowspecs <<EOF
+  > [include]
+  > path:dir/tests/
+  > path:dir/src/f12
+  > EOF
+
+  $ hg clone ssh://user@dummy/master specfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 23 changesets with 21 changes to 21 files
+  new changesets c13e3773edb4:26ce255d5b5d
+  updating to branch default
+  21 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd specfile
+  $ hg tracked
+  I path:dir/src/f12
+  I path:dir/tests
+  $ cd ..
+
+Narrow spec with invalid patterns is rejected
+
+  $ cat > narrowspecs <<EOF
+  > [include]
+  > glob:**
+  > EOF
+
+  $ hg clone ssh://user@dummy/master badspecfile --narrowspec narrowspecs
+  reading narrowspec from '$TESTTMP/narrowspecs'
+  abort: invalid prefix on narrow pattern: glob:**
+  (narrow patterns must begin with one of the following: path:, rootfilesin:)
+  [255]
--- a/tests/test-narrow-commit.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-commit.t	Mon Oct 22 14:46:06 2018 -0400
@@ -53,6 +53,7 @@
   * matcher:
   <includematcher includes='(?:(?:|.*/)f1(?:/|$))'>
   f  inside/f1  inside/f1
+  $ hg add .
   $ hg add outside/f1
   abort: cannot track 'outside/f1' - it is outside the narrow clone
   [255]
@@ -90,8 +91,6 @@
   created new head
   $ hg files -r .
   inside/f1
-  outside/f1 (flat !)
-  outside/ (tree !)
   $ hg manifest --debug
   3f4197b4a11b9016e77ebc47fe566944885fd11b 644   inside/f1
   7fb3bb6356d28d4dc352c5ba52d7350a81b6bd46 644   outside/f1 (flat !)
@@ -103,5 +102,5 @@
 debugdirstate. If we don't do this, the test can be slightly flaky.
   $ sleep 3
   $ hg status
-  $ hg debugdirstate --nodates
+  $ hg debugdirstate --no-dates
   n 644         10 set                 inside/f1
--- a/tests/test-narrow-debugcommands.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-debugcommands.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,10 +1,10 @@
   $ . "$TESTDIR/narrow-library.sh"
   $ hg init repo
   $ cd repo
-  $ cat << EOF > .hg/narrowspec
-  > [includes]
+  $ cat << EOF > .hg/store/narrowspec
+  > [include]
   > path:foo
-  > [excludes]
+  > [exclude]
   > EOF
   $ echo treemanifest >> .hg/requires
   $ echo narrowhg-experimental >> .hg/requires
--- a/tests/test-narrow-debugrebuilddirstate.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-debugrebuilddirstate.t	Mon Oct 22 14:46:06 2018 -0400
@@ -21,8 +21,8 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd copy
 
-  $ hg debugdirstate
-  n *         20 unset               foo/bar (glob)
+  $ hg debugdirstate --no-dates
+  n *         20 *               foo/bar (glob)
   $ mv .hg/dirstate .hg/old_dirstate
   $ dd bs=40 count=1 if=.hg/old_dirstate of=.hg/dirstate 2>/dev/null
   $ hg debugdirstate
--- a/tests/test-narrow-exchange.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-exchange.t	Mon Oct 22 14:46:06 2018 -0400
@@ -161,7 +161,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 8 changesets, 10 total revisions
+  checked 8 changesets with 10 changes to 3 files
 
 Can not push to wider repo if change affects paths in wider repo that are
 not also in narrower repo
--- a/tests/test-narrow-expanddirstate.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-expanddirstate.t	Mon Oct 22 14:46:06 2018 -0400
@@ -27,16 +27,16 @@
 
   $ mkdir outside
   $ echo other_contents > outside/f2
-  $ grep outside .hg/narrowspec
+  $ hg tracked | grep outside
   [1]
-  $ grep outside .hg/dirstate
+  $ hg files | grep outside
   [1]
   $ hg status
 
 `hg status` did not add outside.
-  $ grep outside .hg/narrowspec
+  $ hg tracked | grep outside
   [1]
-  $ grep outside .hg/dirstate
+  $ hg files | grep outside
   [1]
 
 Unfortunately this is not really a candidate for adding to narrowhg proper,
@@ -115,12 +115,12 @@
 `hg status` will now add outside, but not patchdir.
   $ DIRSTATEINCLUDES=path:outside hg status
   M outside/f2
-  $ grep outside .hg/narrowspec
-  path:outside
-  $ grep outside .hg/dirstate > /dev/null
-  $ grep patchdir .hg/narrowspec
+  $ hg tracked | grep outside
+  I path:outside
+  $ hg files | grep outside > /dev/null
+  $ hg tracked | grep patchdir
   [1]
-  $ grep patchdir .hg/dirstate
+  $ hg files | grep patchdir
   [1]
 
 Get rid of the modification to outside/f2.
@@ -142,9 +142,9 @@
   1 out of 1 hunks FAILED -- saving rejects to file patchdir/f3.rej
   abort: patch failed to apply
   [255]
-  $ grep patchdir .hg/narrowspec
+  $ hg tracked | grep patchdir
   [1]
-  $ grep patchdir .hg/dirstate > /dev/null
+  $ hg files | grep patchdir > /dev/null
   [1]
 
 Let's make it apply cleanly and see that it *did* expand properly
@@ -159,6 +159,6 @@
   applying $TESTTMP/foo.patch
   $ cat patchdir/f3
   patched_this
-  $ grep patchdir .hg/narrowspec
-  path:patchdir
-  $ grep patchdir .hg/dirstate > /dev/null
+  $ hg tracked | grep patchdir
+  I path:patchdir
+  $ hg files | grep patchdir > /dev/null
--- a/tests/test-narrow-patterns.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-patterns.t	Mon Oct 22 14:46:06 2018 -0400
@@ -88,15 +88,13 @@
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
   $ cd narrow
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA
-  path:dir1/dirB
-  path:dir2/dirA
-  path:dir2/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA
+  X path:dir1/dirB
+  X path:dir2/dirA
+  X path:dir2/dirB
   $ hg manifest -r tip
   dir1/bar
   dir1/dirA/bar
@@ -144,14 +142,12 @@
   adding file changes
   added 9 changesets with 6 changes to 6 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirB
-  path:dir2/dirA
-  path:dir2/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirB
+  X path:dir2/dirA
+  X path:dir2/dirB
   $ find * | sort
   dir1
   dir1/bar
@@ -206,14 +202,12 @@
   adding file changes
   added 11 changesets with 7 changes to 7 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA/bar
-  path:dir1/dirB
-  path:dir2/dirA
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA/bar
+  X path:dir1/dirB
+  X path:dir2/dirA
   $ find * | sort
   dir1
   dir1/bar
@@ -266,14 +260,12 @@
   adding file changes
   added 13 changesets with 8 changes to 8 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA
-  path:dir1/dirA/bar
-  path:dir1/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA
+  X path:dir1/dirA/bar
+  X path:dir1/dirB
   $ find * | sort
   dir1
   dir1/bar
@@ -327,13 +319,11 @@
   adding file changes
   added 13 changesets with 9 changes to 9 files
   new changesets *:* (glob)
-  $ cat .hg/narrowspec
-  [includes]
-  path:dir1
-  path:dir2
-  [excludes]
-  path:dir1/dirA/bar
-  path:dir1/dirB
+  $ hg tracked
+  I path:dir1
+  I path:dir2
+  X path:dir1/dirA/bar
+  X path:dir1/dirB
   $ find * | sort
   dir1
   dir1/bar
@@ -437,3 +427,43 @@
   |
   o  0 2a4f0c3b67da... root
   
+
+Illegal patterns are rejected
+
+  $ hg tracked --addinclude glob:**
+  abort: invalid prefix on narrow pattern: glob:**
+  (narrow patterns must begin with one of the following: path:, rootfilesin:)
+  [255]
+
+  $ hg tracked --addexclude set:ignored
+  abort: invalid prefix on narrow pattern: set:ignored
+  (narrow patterns must begin with one of the following: path:, rootfilesin:)
+  [255]
+
+  $ cat .hg/store/narrowspec
+  [include]
+  path:dir1
+  path:dir1/dirA
+  [exclude]
+
+  $ cat > .hg/store/narrowspec << EOF
+  > [include]
+  > glob:**
+  > EOF
+
+  $ hg tracked
+  abort: invalid prefix on narrow pattern: glob:**
+  (narrow patterns must begin with one of the following: path:, rootfilesin:)
+  [255]
+
+  $ cat > .hg/store/narrowspec << EOF
+  > [include]
+  > path:.
+  > [exclude]
+  > set:ignored
+  > EOF
+
+  $ hg tracked
+  abort: invalid prefix on narrow pattern: set:ignored
+  (narrow patterns must begin with one of the following: path:, rootfilesin:)
+  [255]
--- a/tests/test-narrow-pull.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-pull.t	Mon Oct 22 14:46:06 2018 -0400
@@ -166,10 +166,9 @@
 
 We should also be able to unshare without breaking everything:
   $ hg unshare
-  devel-warn: write with no wlock: "narrowspec" at: */hgext/narrow/narrowrepo.py:* (unsharenarrowspec) (glob)
   $ hg verify
   checking changesets
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
--- a/tests/test-narrow-rebase.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-rebase.t	Mon Oct 22 14:46:06 2018 -0400
@@ -86,7 +86,6 @@
 
   $ hg update -q 'desc("conflicting outside/f1")'
   $ hg phase -f -d .
-  no phases changed
   $ hg rebase -d 'desc("modify outside/f1")'
   rebasing 4:707c035aadb6 "conflicting outside/f1"
   abort: conflict in file 'outside/f1' is outside narrow clone
--- a/tests/test-narrow-strip.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-strip.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,4 +1,12 @@
 #testcases flat tree
+#testcases lfs-on lfs-off
+
+#if lfs-on
+  $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > lfs =
+  > EOF
+#endif
 
   $ . "$TESTDIR/narrow-library.sh"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-trackedcmd.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,218 @@
+#testcases flat tree
+  $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+  $ cat << EOF >> $HGRCPATH
+  > [experimental]
+  > treemanifest = 1
+  > EOF
+#endif
+
+  $ hg init master
+  $ cd master
+  $ cat >> .hg/hgrc <<EOF
+  > [narrow]
+  > serveellipses=True
+  > EOF
+
+  $ mkdir inside
+  $ echo 'inside' > inside/f
+  $ hg add inside/f
+  $ hg commit -m 'add inside'
+
+  $ mkdir widest
+  $ echo 'widest' > widest/f
+  $ hg add widest/f
+  $ hg commit -m 'add widest'
+
+  $ mkdir outside
+  $ echo 'outside' > outside/f
+  $ hg add outside/f
+  $ hg commit -m 'add outside'
+
+  $ cd ..
+
+narrow clone the inside file
+
+  $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 1 changes to 1 files
+  new changesets *:* (glob)
+  updating to branch default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd narrow
+  $ hg tracked
+  I path:inside
+  $ ls
+  inside
+  $ cat inside/f
+  inside
+  $ cd ..
+
+add more upstream files which we will include in a wider narrow spec
+
+  $ cd master
+
+  $ mkdir wider
+  $ echo 'wider' > wider/f
+  $ hg add wider/f
+  $ echo 'widest v2' > widest/f
+  $ hg commit -m 'add wider, update widest'
+
+  $ echo 'widest v3' > widest/f
+  $ hg commit -m 'update widest v3'
+
+  $ echo 'inside v2' > inside/f
+  $ hg commit -m 'update inside'
+
+  $ mkdir outside2
+  $ echo 'outside2' > outside2/f
+  $ hg add outside2/f
+  $ hg commit -m 'add outside2'
+
+  $ echo 'widest v4' > widest/f
+  $ hg commit -m 'update widest v4'
+
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  2: add outside
+  1: add widest
+  0: add inside
+
+  $ cd ..
+
+Testing the --import-rules flag of `hg tracked` command
+
+  $ cd narrow
+  $ hg tracked --import-rules
+  hg tracked: option --import-rules requires argument
+  hg tracked [OPTIONS]... [REMOTE]
+  
+  show or change the current narrowspec
+  
+  options ([+] can be repeated):
+  
+      --addinclude VALUE [+]       new paths to include
+      --removeinclude VALUE [+]    old paths to no longer include
+      --addexclude VALUE [+]       new paths to exclude
+      --import-rules VALUE         import narrowspecs from a file
+      --removeexclude VALUE [+]    old paths to no longer exclude
+      --clear                      whether to replace the existing narrowspec
+      --force-delete-local-changes forces deletion of local changes when
+                                   narrowing
+   -e --ssh CMD                    specify ssh command to use
+      --remotecmd CMD              specify hg command to run on the remote side
+      --insecure                   do not verify server certificate (ignoring
+                                   web.cacerts config)
+  
+  (use 'hg tracked -h' to show more help)
+  [255]
+  $ hg tracked --import-rules doesnotexist
+  abort: cannot read narrowspecs from '$TESTTMP/narrow/doesnotexist': $ENOENT$
+  [255]
+
+  $ cat > specs <<EOF
+  > %include foo
+  > [include]
+  > path:widest/
+  > [exclude]
+  > path:inside/
+  > EOF
+
+  $ hg tracked --import-rules specs
+  abort: including other spec files using '%include' is not supported in narrowspec
+  [255]
+
+  $ cat > specs <<EOF
+  > [include]
+  > outisde
+  > [exclude]
+  > inside
+  > EOF
+
+  $ hg tracked --import-rules specs
+  comparing with ssh://user@dummy/master
+  searching for changes
+  looking for local changes to affected paths
+  deleting data/inside/f.i
+  deleting meta/inside/00manifest.i (tree !)
+  no changes found
+  saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 0 changes to 0 files
+  new changesets *:* (glob)
+  $ hg tracked
+  I path:outisde
+  X path:inside
+
+Testing the --import-rules flag with --addinclude and --addexclude
+
+  $ cat > specs <<EOF
+  > [include]
+  > widest
+  > EOF
+
+  $ hg tracked --import-rules specs --addinclude 'wider/'
+  comparing with ssh://user@dummy/master
+  searching for changes
+  no changes found
+  saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 1 changes to 1 files
+  new changesets *:* (glob)
+  $ hg tracked
+  I path:outisde
+  I path:wider
+  I path:widest
+  X path:inside
+
+  $ cat > specs <<EOF
+  > [exclude]
+  > outside2
+  > EOF
+
+  $ hg tracked --import-rules specs --addexclude 'widest'
+  comparing with ssh://user@dummy/master
+  searching for changes
+  looking for local changes to affected paths
+  deleting data/widest/f.i
+  deleting meta/widest/00manifest.i (tree !)
+  $ hg tracked
+  I path:outisde
+  I path:wider
+  X path:inside
+  X path:outside2
+  X path:widest
+
+  $ hg tracked --import-rules specs --clear
+  The --clear option is not yet supported.
+  [1]
+
+Testing with passing a out of wdir file
+
+  $ cat > ../nspecs <<EOF
+  > [include]
+  > widest
+  > EOF
+
+  $ hg tracked --import-rules ../nspecs
+  comparing with ssh://user@dummy/master
+  searching for changes
+  no changes found
+  saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 0 changes to 0 files
+  new changesets *:* (glob)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-widen-no-ellipsis.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,419 @@
+#testcases tree flat
+  $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+  $ cat << EOF >> $HGRCPATH
+  > [experimental]
+  > treemanifest = 1
+  > EOF
+#endif
+
+  $ hg init master
+  $ cd master
+
+  $ mkdir inside
+  $ echo 'inside' > inside/f
+  $ hg add inside/f
+  $ hg commit -m 'add inside'
+
+  $ mkdir widest
+  $ echo 'widest' > widest/f
+  $ hg add widest/f
+  $ hg commit -m 'add widest'
+
+  $ mkdir outside
+  $ echo 'outside' > outside/f
+  $ hg add outside/f
+  $ hg commit -m 'add outside'
+
+  $ cd ..
+
+narrow clone the inside file
+
+  $ hg clone --narrow ssh://user@dummy/master narrow
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 0 changes to 0 files
+  new changesets *:* (glob)
+  updating to branch default
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd narrow
+  $ hg tracked
+  $ hg files
+  [1]
+
+widen from an empty clone
+
+  $ hg tracked --addinclude inside
+  comparing with ssh://user@dummy/master
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 1 changes to 1 files
+  $ hg tracked
+  I path:inside
+  $ ls
+  inside
+  $ cat inside/f
+  inside
+  $ cd ..
+
+add more upstream files which we will include in a wider narrow spec
+
+  $ cd master
+
+  $ mkdir wider
+  $ echo 'wider' > wider/f
+  $ hg add wider/f
+  $ echo 'widest v2' > widest/f
+  $ hg commit -m 'add wider, update widest'
+
+  $ echo 'widest v3' > widest/f
+  $ hg commit -m 'update widest v3'
+
+  $ echo 'inside v2' > inside/f
+  $ hg commit -m 'update inside'
+
+  $ mkdir outside2
+  $ echo 'outside2' > outside2/f
+  $ hg add outside2/f
+  $ hg commit -m 'add outside2'
+
+  $ echo 'widest v4' > widest/f
+  $ hg commit -m 'update widest v4'
+
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  2: add outside
+  1: add widest
+  0: add inside
+
+  $ cd ..
+
+Widen the narrow spec to see the widest file. This should not get the newly
+added upstream revisions.
+
+  $ cd narrow
+  $ hg id -n
+  2
+
+  $ hg tracked --addinclude widest/f --debug
+  comparing with ssh://user@dummy/master
+  running python "*dummyssh" *user@dummy* *hg -R master serve --stdio* (glob)
+  sending hello command
+  sending between command
+  remote: * (glob)
+  remote: capabilities: * (glob)
+  remote: 1
+  sending protocaps command
+  query 1; heads
+  sending batch command
+  searching for changes
+  all local heads known remotely
+  sending narrow_widen command
+  bundle2-input-bundle: with-transaction
+  bundle2-input-part: "changegroup" (params: * mandatory) supported (glob)
+  adding changesets
+  adding manifests
+  adding widest/ revisions (tree !)
+  adding file changes
+  adding widest/f revisions (tree !)
+  added 0 changesets with 1 changes to 1 files
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-bundle: 0 parts total
+   widest/f: add from widened narrow clone -> g
+  getting widest/f
+  $ hg tracked
+  I path:inside
+  I path:widest/f
+
+  $ cat widest/f
+  widest
+
+  $ hg id -n
+  2
+
+Pull down the newly added upstream revision.
+
+  $ hg pull
+  pulling from ssh://user@dummy/master
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 5 changesets with 4 changes to 2 files
+  new changesets *:* (glob)
+  (run 'hg update' to get a working copy)
+  $ hg update -r 'desc("add wider")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ cat widest/f
+  widest v2
+
+  $ hg update -r 'desc("update inside")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat widest/f
+  widest v3
+  $ cat inside/f
+  inside v2
+
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  2: add outside
+  1: add widest
+  0: add inside
+
+Check that widening with a newline fails
+
+  $ hg tracked --addinclude 'widest
+  > '
+  abort: newlines are not allowed in narrowspec paths
+  [255]
+
+widen the narrow spec to include the wider file
+
+  $ hg tracked --addinclude wider
+  comparing with ssh://user@dummy/master
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 1 changes to 1 files
+  $ hg tracked
+  I path:inside
+  I path:wider
+  I path:widest/f
+  $ hg update 'desc("add widest")'
+  2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ cat widest/f
+  widest
+  $ hg update 'desc("add wider, update widest")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat wider/f
+  wider
+  $ cat widest/f
+  widest v2
+  $ hg update 'desc("update widest v3")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat widest/f
+  widest v3
+  $ hg update 'desc("update widest v4")'
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat widest/f
+  widest v4
+
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  2: add outside
+  1: add widest
+  0: add inside
+
+separate suite of tests: files from 0-10 modified in changes 0-10. This allows
+more obvious precise tests tickling particular corner cases.
+
+  $ cd ..
+  $ hg init upstream
+  $ cd upstream
+  $ for x in `$TESTDIR/seq.py 0 10`
+  > do
+  >   mkdir d$x
+  >   echo $x > d$x/f
+  >   hg add d$x/f
+  >   hg commit -m "add d$x/f"
+  > done
+  $ hg log -T "{rev}: {desc}\n"
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+
+make narrow clone with every third node.
+
+  $ cd ..
+  $ hg clone --narrow ssh://user@dummy/upstream narrow2 --include d0 --include d3 --include d6 --include d9
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 11 changesets with 4 changes to 4 files
+  new changesets *:* (glob)
+  updating to branch default
+  4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd narrow2
+  $ hg tracked
+  I path:d0
+  I path:d3
+  I path:d6
+  I path:d9
+  $ hg verify
+  checking changesets
+  checking manifests
+  checking directory manifests (tree !)
+  crosschecking files in changesets and manifests
+  checking files
+  checked 11 changesets with 4 changes to 4 files
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+  $ hg tracked --addinclude d1
+  comparing with ssh://user@dummy/upstream
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 1 changes to 1 files
+  $ hg tracked
+  I path:d0
+  I path:d1
+  I path:d3
+  I path:d6
+  I path:d9
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+
+Verify shouldn't claim the repo is corrupt after a widen.
+
+  $ hg verify
+  checking changesets
+  checking manifests
+  checking directory manifests (tree !)
+  crosschecking files in changesets and manifests
+  checking files
+  checked 11 changesets with 5 changes to 5 files
+
+Widening preserves parent of local commit
+
+  $ cd ..
+  $ hg clone -q --narrow ssh://user@dummy/upstream narrow3 --include d2 -r 2
+  $ cd narrow3
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+  $ hg pull -q -r 3
+  $ hg co -q tip
+  $ hg pull -q -r 4
+  $ echo local > d2/f
+  $ hg ci -m local
+  created new head
+  $ hg tracked -q --addinclude d0 --addinclude d9
+
+Widening preserves bookmarks
+
+  $ cd ..
+  $ hg clone -q --narrow ssh://user@dummy/upstream narrow-bookmarks --include d4
+  $ cd narrow-bookmarks
+  $ echo local > d4/f
+  $ hg ci -m local
+  $ hg bookmarks bookmark
+  $ hg bookmarks
+   * bookmark                  11:* (glob)
+  $ hg -q tracked --addinclude d2
+  $ hg bookmarks
+   * bookmark                  11:* (glob)
+  $ hg log -r bookmark -T '{desc}\n'
+  local
+
+Widening that fails can be recovered from
+
+  $ cd ..
+  $ hg clone -q --narrow ssh://user@dummy/upstream interrupted --include d0
+  $ cd interrupted
+  $ echo local > d0/f
+  $ hg ci -m local
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  11: local
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+  $ hg bookmarks bookmark
+  $ hg --config hooks.pretxnchangegroup.bad=false tracked --addinclude d1
+  comparing with ssh://user@dummy/upstream
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 1 changes to 1 files
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  11: local
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+  $ hg bookmarks
+   * bookmark                  11:* (glob)
+  $ hg unbundle .hg/strip-backup/*-widen.hg
+  abort: .hg/strip-backup/*-widen.hg: $ENOTDIR$ (windows !)
+  abort: $ENOENT$: .hg/strip-backup/*-widen.hg (no-windows !)
+  [255]
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  11: local
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
+  $ hg bookmarks
+   * bookmark                  11:* (glob)
--- a/tests/test-narrow-widen.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow-widen.t	Mon Oct 22 14:46:06 2018 -0400
@@ -76,23 +76,23 @@
   $ echo 'widest v4' > widest/f
   $ hg commit -m 'update widest v4'
 
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  *: update widest v4 (glob)
-  *: add outside2 (glob)
-  *: update inside (glob)
-  *: update widest v3 (glob)
-  *: add wider, update widest (glob)
-  *: add outside (glob)
-  *: add widest (glob)
-  *: add inside (glob)
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  2: add outside
+  1: add widest
+  0: add inside
 
   $ cd ..
 
-Widen the narrow spec to see the wider file. This should not get the newly
+Widen the narrow spec to see the widest file. This should not get the newly
 added upstream revisions.
 
   $ cd narrow
-  $ hg tracked --addinclude wider/f
+  $ hg tracked --addinclude widest/f
   comparing with ssh://user@dummy/master
   searching for changes
   no changes found
@@ -100,11 +100,14 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 1 changes to 1 files
+  added 3 changesets with 2 changes to 2 files
   new changesets *:* (glob)
   $ hg tracked
   I path:inside
-  I path:wider/f
+  I path:widest/f
+
+  $ cat widest/f
+  widest
 
 Pull down the newly added upstream revision.
 
@@ -114,28 +117,30 @@
   adding changesets
   adding manifests
   adding file changes
-  added 4 changesets with 2 changes to 2 files
+  added 5 changesets with 4 changes to 2 files
   new changesets *:* (glob)
   (run 'hg update' to get a working copy)
   $ hg update -r 'desc("add wider")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ cat wider/f
-  wider
+  $ cat widest/f
+  widest v2
 
   $ hg update -r 'desc("update inside")'
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ cat wider/f
-  wider
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat widest/f
+  widest v3
   $ cat inside/f
   inside v2
 
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  ...*: update widest v4 (glob)
-  *: update inside (glob)
-  ...*: update widest v3 (glob)
-  *: add wider, update widest (glob)
-  ...*: add outside (glob)
-  *: add inside (glob)
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  ...6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  ...2: add outside
+  1: add widest
+  0: add inside
 
 Check that widening with a newline fails
 
@@ -144,9 +149,9 @@
   abort: newlines are not allowed in narrowspec paths
   [255]
 
-widen the narrow spec to include the widest file
+widen the narrow spec to include the wider file
 
-  $ hg tracked --addinclude widest
+  $ hg tracked --addinclude wider
   comparing with ssh://user@dummy/master
   searching for changes
   no changes found
@@ -158,8 +163,8 @@
   new changesets *:* (glob)
   $ hg tracked
   I path:inside
-  I path:wider/f
-  I path:widest
+  I path:wider
+  I path:widest/f
   $ hg update 'desc("add widest")'
   2 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ cat widest/f
@@ -179,15 +184,15 @@
   $ cat widest/f
   widest v4
 
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  *: update widest v4 (glob)
-  ...*: add outside2 (glob)
-  *: update inside (glob)
-  *: update widest v3 (glob)
-  *: add wider, update widest (glob)
-  ...*: add outside (glob)
-  *: add widest (glob)
-  *: add inside (glob)
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  7: update widest v4
+  ...6: add outside2
+  5: update inside
+  4: update widest v3
+  3: add wider, update widest
+  ...2: add outside
+  1: add widest
+  0: add inside
 
 separate suite of tests: files from 0-10 modified in changes 0-10. This allows
 more obvious precise tests tickling particular corner cases.
@@ -206,18 +211,18 @@
   >   hg add d$x/f
   >   hg commit -m "add d$x/f"
   > done
-  $ hg log -T "{node|short}: {desc}\n"
-  *: add d10/f (glob)
-  *: add d9/f (glob)
-  *: add d8/f (glob)
-  *: add d7/f (glob)
-  *: add d6/f (glob)
-  *: add d5/f (glob)
-  *: add d4/f (glob)
-  *: add d3/f (glob)
-  *: add d2/f (glob)
-  *: add d1/f (glob)
-  *: add d0/f (glob)
+  $ hg log -T "{rev}: {desc}\n"
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
 
 make narrow clone with every third node.
 
@@ -243,16 +248,16 @@
   checking directory manifests (tree !)
   crosschecking files in changesets and manifests
   checking files
-  4 files, 8 changesets, 4 total revisions
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  ...*: add d10/f (glob)
-  *: add d9/f (glob)
-  ...*: add d8/f (glob)
-  *: add d6/f (glob)
-  ...*: add d5/f (glob)
-  *: add d3/f (glob)
-  ...*: add d2/f (glob)
-  *: add d0/f (glob)
+  checked 8 changesets with 4 changes to 4 files
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  ...7: add d10/f
+  6: add d9/f
+  ...5: add d8/f
+  4: add d6/f
+  ...3: add d5/f
+  2: add d3/f
+  ...1: add d2/f
+  0: add d0/f
   $ hg tracked --addinclude d1
   comparing with ssh://user@dummy/upstream
   searching for changes
@@ -269,16 +274,16 @@
   I path:d3
   I path:d6
   I path:d9
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  ...*: add d10/f (glob)
-  *: add d9/f (glob)
-  ...*: add d8/f (glob)
-  *: add d6/f (glob)
-  ...*: add d5/f (glob)
-  *: add d3/f (glob)
-  ...*: add d2/f (glob)
-  *: add d1/f (glob)
-  *: add d0/f (glob)
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  ...8: add d10/f
+  7: add d9/f
+  ...6: add d8/f
+  5: add d6/f
+  ...4: add d5/f
+  3: add d3/f
+  ...2: add d2/f
+  1: add d1/f
+  0: add d0/f
 
 Verify shouldn't claim the repo is corrupt after a widen.
 
@@ -288,16 +293,16 @@
   checking directory manifests (tree !)
   crosschecking files in changesets and manifests
   checking files
-  5 files, 9 changesets, 5 total revisions
+  checked 9 changesets with 5 changes to 5 files
 
 Widening preserves parent of local commit
 
   $ cd ..
   $ hg clone -q --narrow ssh://user@dummy/upstream narrow3 --include d2 -r 2
   $ cd narrow3
-  $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
-  *: add d2/f (glob)
-  ...*: add d1/f (glob)
+  $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+  1: add d2/f
+  ...0: add d1/f
   $ hg pull -q -r 3
   $ hg co -q tip
   $ hg pull -q -r 4
--- a/tests/test-narrow.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-narrow.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,4 +1,12 @@
 #testcases flat tree
+#testcases lfs-on lfs-off
+
+#if lfs-on
+  $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > lfs =
+  > EOF
+#endif
 
   $ . "$TESTDIR/narrow-library.sh"
 
@@ -22,31 +30,28 @@
   >   hg add d$x/f
   >   hg commit -m "add d$x/f"
   > done
-  $ hg log -T "{node|short}: {desc}\n"
-  *: add d10/f (glob)
-  *: add d9/f (glob)
-  *: add d8/f (glob)
-  *: add d7/f (glob)
-  *: add d6/f (glob)
-  *: add d5/f (glob)
-  *: add d4/f (glob)
-  *: add d3/f (glob)
-  *: add d2/f (glob)
-  *: add d1/f (glob)
-  *: add d0/f (glob)
+  $ hg log -T "{rev}: {desc}\n"
+  10: add d10/f
+  9: add d9/f
+  8: add d8/f
+  7: add d7/f
+  6: add d6/f
+  5: add d5/f
+  4: add d4/f
+  3: add d3/f
+  2: add d2/f
+  1: add d1/f
+  0: add d0/f
   $ cd ..
 
 Error if '.' or '..' are in the directory to track.
   $ hg clone --narrow ssh://user@dummy/master foo --include ./asdf
-  requesting all changes
   abort: "." and ".." are not allowed in narrowspec paths
   [255]
   $ hg clone --narrow ssh://user@dummy/master foo --include asdf/..
-  requesting all changes
   abort: "." and ".." are not allowed in narrowspec paths
   [255]
   $ hg clone --narrow ssh://user@dummy/master foo --include a/./c
-  requesting all changes
   abort: "." and ".." are not allowed in narrowspec paths
   [255]
 
@@ -111,15 +116,15 @@
   d6/f
   $ hg verify -q
 Force deletion of local changes
-  $ hg log -T "{node|short}: {desc} {outsidenarrow}\n"
-  *: local change to d3  (glob)
-  *: local change to d0  (glob)
-  *: add d10/f outsidenarrow (glob)
-  *: add d6/f  (glob)
-  *: add d5/f outsidenarrow (glob)
-  *: add d3/f  (glob)
-  *: add d2/f outsidenarrow (glob)
-  *: add d0/f  (glob)
+  $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
+  8: local change to d3 
+  6: local change to d0 
+  5: add d10/f outsidenarrow
+  4: add d6/f 
+  3: add d5/f outsidenarrow
+  2: add d3/f 
+  1: add d2/f outsidenarrow
+  0: add d0/f 
   $ hg tracked --removeinclude d0 --force-delete-local-changes
   comparing with ssh://user@dummy/master
   searching for changes
@@ -133,14 +138,14 @@
   deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
   deleting data/d0/f/index (reposimplestore !)
 
-  $ hg log -T "{node|short}: {desc} {outsidenarrow}\n"
-  *: local change to d3  (glob)
-  *: add d10/f outsidenarrow (glob)
-  *: add d6/f  (glob)
-  *: add d5/f outsidenarrow (glob)
-  *: add d3/f  (glob)
-  *: add d2/f outsidenarrow (glob)
-  *: add d0/f outsidenarrow (glob)
+  $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
+  7: local change to d3 
+  5: add d10/f outsidenarrow
+  4: add d6/f 
+  3: add d5/f outsidenarrow
+  2: add d3/f 
+  1: add d2/f outsidenarrow
+  0: add d0/f outsidenarrow
 Can restore stripped local changes after widening
   $ hg tracked --addinclude d0 -q
   $ hg unbundle .hg/strip-backup/*-narrow.hg -q
--- a/tests/test-newcgi.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-newcgi.t	Mon Oct 22 14:46:06 2018 -0400
@@ -52,15 +52,15 @@
   $ chmod 755 hgwebdir.cgi
 
   $ . "$TESTDIR/cgienv"
-  $ $PYTHON hgweb.cgi > page1
-  $ $PYTHON hgwebdir.cgi > page2
+  $ "$PYTHON" hgweb.cgi > page1
+  $ "$PYTHON" hgwebdir.cgi > page2
 
   $ PATH_INFO="/test/"
   $ PATH_TRANSLATED="/var/something/test.cgi"
   $ REQUEST_URI="/test/test/"
   $ SCRIPT_URI="http://hg.omnifarious.org/test/test/"
   $ SCRIPT_URL="/test/test/"
-  $ $PYTHON hgwebdir.cgi > page3
+  $ "$PYTHON" hgwebdir.cgi > page3
 
   $ grep -i error page1 page2 page3
   [1]
--- a/tests/test-newercgi.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-newercgi.t	Mon Oct 22 14:46:06 2018 -0400
@@ -16,7 +16,7 @@
   > from mercurial.hgweb import hgweb
   > from mercurial.hgweb import wsgicgi
   > 
-  > application = hgweb("test", "Empty test repository")
+  > application = hgweb(b"test", b"Empty test repository")
   > wsgicgi.launch(application)
   > HGWEB
 
@@ -39,22 +39,22 @@
   > from mercurial.hgweb import hgwebdir
   > from mercurial.hgweb import wsgicgi
   > 
-  > application = hgwebdir("hgweb.config")
+  > application = hgwebdir(b"hgweb.config")
   > wsgicgi.launch(application)
   > HGWEBDIR
 
   $ chmod 755 hgwebdir.cgi
 
   $ . "$TESTDIR/cgienv"
-  $ $PYTHON hgweb.cgi > page1
-  $ $PYTHON hgwebdir.cgi > page2
+  $ "$PYTHON" hgweb.cgi > page1
+  $ "$PYTHON" hgwebdir.cgi > page2
 
   $ PATH_INFO="/test/"
   $ PATH_TRANSLATED="/var/something/test.cgi"
   $ REQUEST_URI="/test/test/"
   $ SCRIPT_URI="http://hg.omnifarious.org/test/test/"
   $ SCRIPT_URL="/test/test/"
-  $ $PYTHON hgwebdir.cgi > page3
+  $ "$PYTHON" hgwebdir.cgi > page3
 
   $ grep -i error page1 page2 page3
   [1]
--- a/tests/test-no-symlinks.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-no-symlinks.t	Mon Oct 22 14:46:06 2018 -0400
@@ -48,7 +48,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 6 changes to 6 files
-  new changesets d326ae2d01ee:71d85cf3ba90
+  new changesets d326ae2d01ee:71d85cf3ba90 (2 drafts)
   (run 'hg update' to get a working copy)
   $ hg update
   5 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-nointerrupt.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-nointerrupt.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,3 +1,5 @@
+#require no-windows
+
 Dummy extension simulating unsafe long running command
   $ cat > sleepext.py <<EOF
   > import itertools
--- a/tests/test-notify-changegroup.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-notify-changegroup.t	Mon Oct 22 14:46:06 2018 -0400
@@ -39,7 +39,7 @@
 push
 
   $ hg --traceback --cwd b push ../a 2>&1 |
-  >     $PYTHON -c 'from __future__ import print_function ; import sys,re; print(re.sub("\n\t", " ", sys.stdin.read()), end="")'
+  >     "$PYTHON" -c 'from __future__ import print_function ; import sys,re; print(re.sub("\n\t", " ", sys.stdin.read()), end="")'
   pushing to ../a
   searching for changes
   adding changesets
@@ -85,7 +85,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 1 files
-  new changesets cb9a9f314b8b:ba677d0156c1
+  new changesets cb9a9f314b8b:ba677d0156c1 (2 drafts)
   (run 'hg update' to get a working copy)
   $ hg --cwd a rollback
   repository tip rolled back to revision -1 (undo unbundle)
@@ -93,12 +93,12 @@
 unbundle with correct source
 
   $ hg --config notify.sources=unbundle --cwd a unbundle ../test.hg 2>&1 |
-  >     $PYTHON -c 'from __future__ import print_function ; import sys,re; print(re.sub("\n\t", " ", sys.stdin.read()), end="")'
+  >     "$PYTHON" -c 'from __future__ import print_function ; import sys,re; print(re.sub("\n\t", " ", sys.stdin.read()), end="")'
   adding changesets
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 1 files
-  new changesets cb9a9f314b8b:ba677d0156c1
+  new changesets cb9a9f314b8b:ba677d0156c1 (2 drafts)
   MIME-Version: 1.0
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
@@ -169,7 +169,7 @@
 push
 
   $ hg --traceback --cwd b --config notify.fromauthor=True push ../a 2>&1 |
-  >     $PYTHON -c 'from __future__ import print_function ; import sys,re; print(re.sub("\n\t", " ", sys.stdin.read()), end="")'
+  >     "$PYTHON" -c 'from __future__ import print_function ; import sys,re; print(re.sub("\n\t", " ", sys.stdin.read()), end="")'
   pushing to ../a
   searching for changes
   adding changesets
--- a/tests/test-notify.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-notify.t	Mon Oct 22 14:46:06 2018 -0400
@@ -190,7 +190,7 @@
 of the very long subject line
 pull (minimal config)
 
-  $ hg --traceback --cwd b pull ../a | $PYTHON $TESTTMP/filter.py
+  $ hg --traceback --cwd b pull ../a | "$PYTHON" $TESTTMP/filter.py
   pulling from ../a
   searching for changes
   adding changesets
@@ -249,7 +249,7 @@
 
   $ hg --cwd b rollback
   repository tip rolled back to revision 0 (undo pull)
-  $ hg --traceback --cwd b pull ../a  | $PYTHON $TESTTMP/filter.py
+  $ hg --traceback --cwd b pull ../a  | "$PYTHON" $TESTTMP/filter.py
   pulling from ../a
   searching for changes
   adding changesets
@@ -297,7 +297,7 @@
 
   $ hg --cwd b rollback
   repository tip rolled back to revision 0 (undo pull)
-  $ hg --traceback --config notify.maxdiffstat=1 --cwd b pull ../a | $PYTHON $TESTTMP/filter.py
+  $ hg --traceback --config notify.maxdiffstat=1 --cwd b pull ../a | "$PYTHON" $TESTTMP/filter.py
   pulling from ../a
   searching for changes
   adding changesets
@@ -348,7 +348,7 @@
   (branch merge, don't forget to commit)
   $ hg ci -m merge -d '3 0'
   $ cd ..
-  $ hg --traceback --cwd b pull ../a | $PYTHON $TESTTMP/filter.py
+  $ hg --traceback --cwd b pull ../a | "$PYTHON" $TESTTMP/filter.py
   pulling from ../a
   searching for changes
   adding changesets
@@ -412,9 +412,9 @@
   > EOF
   $ echo a >> a/a
   $ hg --cwd a --encoding utf-8 commit -A -d '0 0' \
-  >   -m `$PYTHON -c 'print "\xc3\xa0\xc3\xa1\xc3\xa2\xc3\xa3\xc3\xa4"'`
+  >   -m `"$PYTHON" -c 'print("\xc3\xa0\xc3\xa1\xc3\xa2\xc3\xa3\xc3\xa4")'`
   $ hg --traceback --cwd b --encoding utf-8 pull ../a | \
-  >   $PYTHON $TESTTMP/filter.py
+  >   "$PYTHON" $TESTTMP/filter.py
   pulling from ../a
   searching for changes
   adding changesets
@@ -455,7 +455,7 @@
   > test = False
   > mbox = mbox
   > EOF
-  $ $PYTHON -c 'open("a/a", "ab").write("no" * 500 + "\xd1\x84" + "\n")'
+  $ "$PYTHON" -c 'open("a/a", "ab").write("no" * 500 + "\xd1\x84" + "\n")'
   $ hg --cwd a commit -A -m "long line"
   $ hg --traceback --cwd b pull ../a
   pulling from ../a
@@ -467,7 +467,7 @@
   new changesets a846b5f6ebb7
   notify: sending 2 subscribers 1 changes
   (run 'hg update' to get a working copy)
-  $ $PYTHON $TESTTMP/filter.py < b/mbox
+  $ "$PYTHON" $TESTTMP/filter.py < b/mbox
   From test@test.com ... ... .. ..:..:.. .... (re)
   MIME-Version: 1.0
   Content-Type: text/plain; charset="*" (glob)
@@ -527,7 +527,7 @@
   (branches are permanent and global, did you want a bookmark?)
   $ echo a >> a/a
   $ hg --cwd a ci -m test -d '1 0'
-  $ hg --traceback --cwd b pull ../a | $PYTHON $TESTTMP/filter.py
+  $ hg --traceback --cwd b pull ../a | "$PYTHON" $TESTTMP/filter.py
   pulling from ../a
   searching for changes
   adding changesets
@@ -557,7 +557,7 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ echo a >> a/a
   $ hg --cwd a ci -m test -d '1 0'
-  $ hg --traceback --cwd b pull ../a | $PYTHON $TESTTMP/filter.py
+  $ hg --traceback --cwd b pull ../a | "$PYTHON" $TESTTMP/filter.py
   pulling from ../a
   searching for changes
   adding changesets
@@ -586,7 +586,7 @@
   $ mv "$HGRCPATH.new" $HGRCPATH
   $ echo a >> a/a
   $ hg --cwd a commit -m 'default template'
-  $ hg --cwd b pull ../a -q | $PYTHON $TESTTMP/filter.py
+  $ hg --cwd b pull ../a -q | "$PYTHON" $TESTTMP/filter.py
   MIME-Version: 1.0
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
@@ -615,7 +615,7 @@
   > EOF
   $ echo a >> a/a
   $ hg --cwd a commit -m 'with style'
-  $ hg --cwd b pull ../a -q | $PYTHON $TESTTMP/filter.py
+  $ hg --cwd b pull ../a -q | "$PYTHON" $TESTTMP/filter.py
   MIME-Version: 1.0
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
@@ -638,7 +638,7 @@
   > EOF
   $ echo a >> a/a
   $ hg --cwd a commit -m 'with template'
-  $ hg --cwd b pull ../a -q | $PYTHON $TESTTMP/filter.py
+  $ hg --cwd b pull ../a -q | "$PYTHON" $TESTTMP/filter.py
   MIME-Version: 1.0
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
--- a/tests/test-obsmarker-template.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-obsmarker-template.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1450,7 +1450,7 @@
   added 1 changesets with 0 changes to 1 files (+1 heads)
   2 new obsolescence markers
   obsoleted 1 changesets
-  new changesets 7a230b46bf61
+  new changesets 7a230b46bf61 (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg log --hidden -G
   o  changeset:   2:7a230b46bf61
@@ -2591,7 +2591,7 @@
   > [extensions]
   > amend =
   > EOF
-  $ $PYTHON <<'EOF'
+  $ "$PYTHON" <<'EOF'
   > with open('test1', 'wb') as f:
   >    f.write(b't\xe8st1') and None
   > with open('test2', 'wb') as f:
--- a/tests/test-obsolete-bundle-strip.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-obsolete-bundle-strip.t	Mon Oct 22 14:46:06 2018 -0400
@@ -170,6 +170,7 @@
   # unbundling: adding manifests
   # unbundling: adding file changes
   # unbundling: added 1 changesets with 1 changes to 1 files (+1 heads)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
   $ testrevs 'desc("C-A1")'
@@ -207,7 +208,7 @@
   # unbundling: added 1 changesets with 1 changes to 1 files (+1 heads)
   # unbundling: 2 new obsolescence markers
   # unbundling: obsoleted 1 changesets
-  # unbundling: new changesets cf2c22470d67
+  # unbundling: new changesets cf2c22470d67 (1 drafts)
   # unbundling: (run 'hg heads' to see heads)
 
   $ testrevs 'desc("C-A")'
@@ -247,7 +248,8 @@
   # unbundling: adding file changes
   # unbundling: added 2 changesets with 2 changes to 2 files (+1 heads)
   # unbundling: 3 new obsolescence markers
-  # unbundling: new changesets cf2c22470d67
+  # unbundling: new changesets cf2c22470d67 (1 drafts)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
 chain with prune children
@@ -339,6 +341,7 @@
   # unbundling: adding file changes
   # unbundling: added 1 changesets with 1 changes to 1 files
   # unbundling: 1 new obsolescence markers
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg update' to get a working copy)
 
   $ testrevs 'desc("C-A1")'
@@ -374,7 +377,7 @@
   # unbundling: added 1 changesets with 1 changes to 1 files (+1 heads)
   # unbundling: 1 new obsolescence markers
   # unbundling: obsoleted 1 changesets
-  # unbundling: new changesets cf2c22470d67
+  # unbundling: new changesets cf2c22470d67 (1 drafts)
   # unbundling: (run 'hg heads' to see heads)
 
 bundling multiple revisions
@@ -436,7 +439,8 @@
   # unbundling: adding file changes
   # unbundling: added 3 changesets with 3 changes to 3 files (+1 heads)
   # unbundling: 3 new obsolescence markers
-  # unbundling: new changesets cf2c22470d67
+  # unbundling: new changesets cf2c22470d67 (1 drafts)
+  # unbundling: (2 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
 chain with precursors also pruned
@@ -503,6 +507,7 @@
   # unbundling: adding manifests
   # unbundling: adding file changes
   # unbundling: added 1 changesets with 1 changes to 1 files (+1 heads)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
   $ testrevs 'desc("C-A1")'
@@ -537,7 +542,7 @@
   # unbundling: adding file changes
   # unbundling: added 1 changesets with 1 changes to 1 files (+1 heads)
   # unbundling: 1 new obsolescence markers
-  # unbundling: new changesets cf2c22470d67
+  # unbundling: new changesets cf2c22470d67 (1 drafts)
   # unbundling: (run 'hg heads' to see heads)
 
   $ testrevs 'desc("C-A")'
@@ -577,7 +582,8 @@
   # unbundling: adding file changes
   # unbundling: added 2 changesets with 2 changes to 2 files (+1 heads)
   # unbundling: 3 new obsolescence markers
-  # unbundling: new changesets cf2c22470d67
+  # unbundling: new changesets cf2c22470d67 (1 drafts)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
 chain with missing prune
@@ -661,7 +667,7 @@
   # unbundling: adding file changes
   # unbundling: added 1 changesets with 1 changes to 1 files
   # unbundling: 3 new obsolescence markers
-  # unbundling: new changesets cf2c22470d67
+  # unbundling: new changesets cf2c22470d67 (1 drafts)
   # unbundling: (run 'hg update' to get a working copy)
 
 chain with precursors also pruned
@@ -741,7 +747,7 @@
   # unbundling: adding file changes
   # unbundling: added 1 changesets with 1 changes to 1 files
   # unbundling: 3 new obsolescence markers
-  # unbundling: new changesets cf2c22470d67
+  # unbundling: new changesets cf2c22470d67 (1 drafts)
   # unbundling: (run 'hg update' to get a working copy)
 
 Chain with fold and split
@@ -836,6 +842,7 @@
   # unbundling: adding manifests
   # unbundling: adding file changes
   # unbundling: added 1 changesets with 1 changes to 1 files (+1 heads)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
   $ testrevs 'desc("C-B")'
@@ -864,6 +871,7 @@
   # unbundling: adding manifests
   # unbundling: adding file changes
   # unbundling: added 1 changesets with 1 changes to 1 files (+1 heads)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
   $ testrevs 'desc("C-C")'
@@ -892,6 +900,7 @@
   # unbundling: adding manifests
   # unbundling: adding file changes
   # unbundling: added 1 changesets with 1 changes to 1 files (+1 heads)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
   $ testrevs 'desc("C-D")'
@@ -920,6 +929,7 @@
   # unbundling: adding manifests
   # unbundling: adding file changes
   # unbundling: added 1 changesets with 1 changes to 1 files (+1 heads)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
   $ testrevs 'desc("C-E")'
@@ -984,7 +994,7 @@
   # unbundling: added 1 changesets with 1 changes to 1 files (+1 heads)
   # unbundling: 6 new obsolescence markers
   # unbundling: obsoleted 3 changesets
-  # unbundling: new changesets 2f20ff6509f0
+  # unbundling: new changesets 2f20ff6509f0 (1 drafts)
   # unbundling: (run 'hg heads' to see heads)
 
 Bundle multiple revisions
@@ -1018,6 +1028,7 @@
   # unbundling: adding manifests
   # unbundling: adding file changes
   # unbundling: added 2 changesets with 2 changes to 2 files (+2 heads)
+  # unbundling: (2 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
 * top one and other divergent
@@ -1086,7 +1097,8 @@
   # unbundling: added 2 changesets with 2 changes to 2 files (+2 heads)
   # unbundling: 7 new obsolescence markers
   # unbundling: obsoleted 2 changesets
-  # unbundling: new changesets 2f20ff6509f0
+  # unbundling: new changesets 2f20ff6509f0 (1 drafts)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
 * top one and initial precursors
@@ -1154,7 +1166,8 @@
   # unbundling: added 2 changesets with 2 changes to 2 files (+2 heads)
   # unbundling: 6 new obsolescence markers
   # unbundling: obsoleted 3 changesets
-  # unbundling: new changesets 2f20ff6509f0
+  # unbundling: new changesets 2f20ff6509f0 (1 drafts)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
 * top one and one of the split
@@ -1223,7 +1236,8 @@
   # unbundling: added 2 changesets with 2 changes to 2 files (+2 heads)
   # unbundling: 7 new obsolescence markers
   # unbundling: obsoleted 2 changesets
-  # unbundling: new changesets 2f20ff6509f0
+  # unbundling: new changesets 2f20ff6509f0 (1 drafts)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
 * all
@@ -1298,7 +1312,8 @@
   # unbundling: adding file changes
   # unbundling: added 5 changesets with 5 changes to 5 files (+4 heads)
   # unbundling: 9 new obsolescence markers
-  # unbundling: new changesets 2f20ff6509f0
+  # unbundling: new changesets 2f20ff6509f0 (1 drafts)
+  # unbundling: (4 other changesets obsolete on arrival)
   # unbundling: (run 'hg heads' to see heads)
 
 changeset pruned on its own
@@ -1372,6 +1387,7 @@
   # unbundling: adding file changes
   # unbundling: added 1 changesets with 1 changes to 1 files
   # unbundling: 1 new obsolescence markers
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg update' to get a working copy)
   $ testrevs 'desc("C-")'
   ### Matched revisions###
@@ -1400,5 +1416,6 @@
   # unbundling: adding file changes
   # unbundling: added 2 changesets with 2 changes to 2 files
   # unbundling: 1 new obsolescence markers
-  # unbundling: new changesets 9ac430e15fca
+  # unbundling: new changesets 9ac430e15fca (1 drafts)
+  # unbundling: (1 other changesets obsolete on arrival)
   # unbundling: (run 'hg update' to get a working copy)
--- a/tests/test-obsolete-changeset-exchange.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-obsolete-changeset-exchange.t	Mon Oct 22 14:46:06 2018 -0400
@@ -51,7 +51,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 2 files
 
 Adding a changeset going extinct locally
 ------------------------------------------
@@ -85,6 +85,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 0 changes to 1 files (+1 heads)
+  (1 other changesets obsolete on arrival)
   (run 'hg heads' to see heads)
 
 check-that bundle can contain markers:
--- a/tests/test-obsolete-checkheads.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-obsolete-checkheads.t	Mon Oct 22 14:46:06 2018 -0400
@@ -277,6 +277,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
+  (1 other changesets obsolete on arrival)
   (run 'hg heads' to see heads)
   $ hg push
   pushing to $TESTTMP/remote
--- a/tests/test-obsolete-distributed.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-obsolete-distributed.t	Mon Oct 22 14:46:06 2018 -0400
@@ -144,7 +144,7 @@
   added 1 changesets with 1 changes to 1 files (+1 heads)
   1 new obsolescence markers
   obsoleted 1 changesets
-  new changesets 391a2bf12b1b
+  new changesets 391a2bf12b1b (1 drafts)
   (run 'hg heads' to see heads)
   $ hg log -G
   o  4:391a2bf12b1b c_B1
@@ -271,7 +271,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files
-  new changesets d33b0a3a6464:ef908e42ce65
+  new changesets d33b0a3a6464:ef908e42ce65 (2 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 'desc("c_A")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -319,7 +319,7 @@
   adding file changes
   added 2 changesets with 2 changes to 2 files
   3 new obsolescence markers
-  new changesets 5b5708a437f2:956063ac4557
+  new changesets 5b5708a437f2:956063ac4557 (2 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 'desc("c_A")'
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -390,7 +390,7 @@
   added 2 changesets with 0 changes to 2 files (+1 heads)
   6 new obsolescence markers
   obsoleted 2 changesets
-  new changesets 9866d64649a5:77ae25d99ff0
+  new changesets 9866d64649a5:77ae25d99ff0 (2 drafts)
   (run 'hg heads' to see heads)
   $ hg debugobsolete
   3cf8de21cc2282186857d2266eb6b1f9cb85ecf3 77ae25d99ff07889e181126b1171b94bec8e5227 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '1', 'operation': 'amend', 'user': 'celeste'}
@@ -411,6 +411,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 0 changes to 2 files (+1 heads)
+  (2 other changesets obsolete on arrival)
   (run 'hg heads' to see heads)
   $ hg log -G
   o  4:77ae25d99ff0 c_B2
@@ -455,6 +456,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 0 changes to 2 files (+1 heads)
+  (2 other changesets obsolete on arrival)
   (run 'hg heads' to see heads)
   $ hg log -G
   o  4:77ae25d99ff0 c_B2
@@ -487,3 +489,55 @@
   ef908e42ce65ef57f970d799acaddde26f58a4cc 5ffb9e311b35f6ab6f76f667ca5d6e595645481b 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'rebase', 'user': 'bob'}
 
   $ cd ..
+
+Test pull report consistency
+============================
+
+obsolete but visible should be reported
+---------------------------------------
+
+Setup
+
+  $ hg init repo-a
+  $ cat << EOF >> repo-a/.hg/hgrc
+  > [ui]
+  > username=test
+  > EOF
+  $ cd repo-a
+  $ hg debugbuilddag ..
+  $ hg debugobsolete `getid tip`
+  obsoleted 1 changesets
+  $ cd ../
+  $ hg clone --pull repo-a repo-b
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 0 changes to 0 files
+  new changesets 1ea73414a91b (1 drafts)
+  updating to branch default
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg -R repo-a up tip --hidden
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  updated to hidden changeset 66f7d451a68b
+  (hidden revision '66f7d451a68b' is pruned)
+  $ hg -R repo-a branch foo
+  marked working directory as branch foo
+  (branches are permanent and global, did you want a bookmark?)
+  $ hg -R repo-a commit -m foo
+  1 new orphan changesets
+
+Actual test
+(BROKEN)
+
+  $ hg -R repo-b pull
+  pulling from $TESTTMP/distributed-chain-building/distributed-chain-building/repo-a
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 0 changes to 0 files
+  1 new obsolescence markers
+  1 new orphan changesets
+  new changesets 66f7d451a68b:95d586532b49 (2 drafts)
+  (run 'hg update' to get a working copy)
--- a/tests/test-obsolete.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-obsolete.t	Mon Oct 22 14:46:06 2018 -0400
@@ -380,7 +380,7 @@
   adding file changes
   added 4 changesets with 4 changes to 4 files (+1 heads)
   5 new obsolescence markers
-  new changesets 1f0dee641bb7:6f9641995072
+  new changesets 1f0dee641bb7:6f9641995072 (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg debugobsolete
   1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
@@ -486,7 +486,7 @@
   adding file changes
   added 4 changesets with 4 changes to 4 files (+1 heads)
   5 new obsolescence markers
-  new changesets 1f0dee641bb7:6f9641995072
+  new changesets 1f0dee641bb7:6f9641995072 (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg debugobsolete
   1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
@@ -805,7 +805,8 @@
   adding manifests
   adding file changes
   added 62 changesets with 63 changes to 9 files (+60 heads)
-  new changesets 50c51b361e60:c15e9edfca13
+  new changesets 50c51b361e60:c15e9edfca13 (62 drafts)
+  (2 other changesets obsolete on arrival)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ for node in `hg log -r 'desc(babar_)' --template '{node}\n'`;
   > do
@@ -1244,7 +1245,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 1 files
-  new changesets 4b34ecfb0d56:44526ebb0f98
+  new changesets 4b34ecfb0d56:44526ebb0f98 (2 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd ../other-bundleoverlay
@@ -1513,7 +1514,7 @@
   adding file changes
   added 2 changesets with 2 changes to 2 files
   1 new obsolescence markers
-  new changesets e016b03fd86f:b0551702f918
+  new changesets e016b03fd86f:b0551702f918 (2 drafts)
   (run 'hg update' to get a working copy)
   $ hg debugobsolete | sort
   e008cf2834908e5d6b0f792a9d4b0e2272260fb8 b0551702f918510f01ae838ab03a463054c67b46 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
@@ -1602,6 +1603,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
+  (1 other changesets obsolete on arrival)
   (run 'hg update' to get a working copy)
   $ hg log -G
   @  7:7ae79c5d60f0 (draft) [tip ] dd
@@ -1618,3 +1620,22 @@
   
 
   $ cd ..
+
+Test issue 5783
+
+  $ hg init issue-5783 --config format.obsstore-version=0
+  $ cd issue-5783
+  $ touch a.cpp
+  $ hg add a.cpp
+  $ hg commit -m 'Add a.cpp'
+  $ echo 'Hello' > a.cpp
+  $ hg amend -n 'Testing::Obsstore' --config format.obsstore-version=0 --config extensions.amend=
+  $ touch b.cpp
+  $ hg add b.cpp
+  $ hg commit -m 'Add b.cpp'
+  $ echo 'Hello' > b.cpp
+  $ hg amend -n 'Testing::Obsstore2' --config extensions.amend=
+  $ hg debugobsolete
+  d1b09fe3ad2b2a03e23a72f0c582e29a49570145 1a1a11184d2588af24e767e5335d5d9d07e8c550 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'note': 'Testing::Obsstore', 'operation': 'amend', 'user': 'test'}
+  1bfd8e3868f641e048b6667cd672c68932f26d00 79959ca316d5b27ac6be1dd0cfd0843a5b5412eb 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'note': 'Testing::Obsstore2', 'operation': 'amend', 'user': 'test'}
+  $ cd ..
--- a/tests/test-oldcgi.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-oldcgi.t	Mon Oct 22 14:46:06 2018 -0400
@@ -4,7 +4,7 @@
 
   $ hg init test
   $ cat >hgweb.cgi <<HGWEB
-  > #!$PYTHON
+  > #!"$PYTHON"
   > #
   > # An example CGI script to use hgweb, edit as necessary
   > 
@@ -14,7 +14,7 @@
   > # sys.path.insert(0, "/path/to/python/lib") # if not a system-wide install
   > from mercurial import hgweb
   > 
-  > h = hgweb.hgweb("test", "Empty test repository")
+  > h = hgweb.hgweb(b"test", b"Empty test repository")
   > h.run()
   > HGWEB
 
@@ -26,7 +26,7 @@
   > HGWEBDIRCONF
 
   $ cat >hgwebdir.cgi <<HGWEBDIR
-  > #!$PYTHON
+  > #!"$PYTHON"
   > #
   > # An example CGI script to export multiple hgweb repos, edit as necessary
   > 
@@ -62,15 +62,15 @@
   $ chmod 755 hgwebdir.cgi
 
   $ . "$TESTDIR/cgienv"
-  $ $PYTHON hgweb.cgi > page1
-  $ $PYTHON hgwebdir.cgi > page2
+  $ "$PYTHON" hgweb.cgi > page1
+  $ "$PYTHON" hgwebdir.cgi > page2
 
   $ PATH_INFO="/test/"
   $ PATH_TRANSLATED="/var/something/test.cgi"
   $ REQUEST_URI="/test/test/"
   $ SCRIPT_URI="http://hg.omnifarious.org/test/test/"
   $ SCRIPT_URL="/test/test/"
-  $ $PYTHON hgwebdir.cgi > page3
+  $ "$PYTHON" hgwebdir.cgi > page3
 
   $ grep -i error page1 page2 page3
   [1]
--- a/tests/test-origbackup-conflict.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-origbackup-conflict.t	Mon Oct 22 14:46:06 2018 -0400
@@ -129,7 +129,7 @@
   b/c: replacing untracked file
   getting b/c
   creating directory: $TESTTMP/repo/.hg/badorigbackups/b
-  abort: $ENOTDIR$: '$TESTTMP/repo/.hg/badorigbackups/b'
+  abort: $ENOTDIR$: *$TESTTMP/repo/.hg/badorigbackups/b* (glob)
   [255]
   $ cat .hg/badorigbackups
   data
--- a/tests/test-pager-legacy.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-pager-legacy.t	Mon Oct 22 14:46:06 2018 -0400
@@ -14,7 +14,7 @@
   > [extensions]
   > pager=
   > [pager]
-  > pager = $PYTHON $TESTTMP/fakepager.py
+  > pager = "$PYTHON" $TESTTMP/fakepager.py
   > EOF
 
   $ hg init repo
@@ -22,7 +22,7 @@
   $ echo a >> a
   $ hg add a
   $ hg ci -m 'add a'
-  $ for x in `$PYTHON $TESTDIR/seq.py 1 10`; do
+  $ for x in `"$PYTHON" $TESTDIR/seq.py 1 10`; do
   >   echo a $x >> a
   >   hg ci -m "modify a $x"
   > done
@@ -244,7 +244,7 @@
 Pager should not override the exit code of other commands
 
   $ cat >> $TESTTMP/fortytwo.py <<'EOF'
-  > from mercurial import registrar, commands
+  > from mercurial import commands, registrar
   > cmdtable = {}
   > command = registrar.command(cmdtable)
   > @command(b'fortytwo', [], b'fortytwo', norepo=True)
--- a/tests/test-pager.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-pager.t	Mon Oct 22 14:46:06 2018 -0400
@@ -16,7 +16,7 @@
   > formatted = yes
   > color = no
   > [pager]
-  > pager = $PYTHON $TESTTMP/fakepager.py
+  > pager = "$PYTHON" $TESTTMP/fakepager.py
   > EOF
 
   $ hg init repo
@@ -24,7 +24,7 @@
   $ echo a >> a
   $ hg add a
   $ hg ci -m 'add a'
-  $ for x in `$PYTHON $TESTDIR/seq.py 1 10`; do
+  $ for x in `"$PYTHON" $TESTDIR/seq.py 1 10`; do
   >   echo a $x >> a
   >   hg ci -m "modify a $x"
   > done
@@ -404,7 +404,7 @@
   > [ui]
   > formatted=1
   > [pager]
-  > pager = $PYTHON $TESTTMP/printlesslv.py
+  > pager = "$PYTHON" $TESTTMP/printlesslv.py
   > EOF
   $ unset LESS
   $ unset LV
--- a/tests/test-parseindex.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-parseindex.t	Mon Oct 22 14:46:06 2018 -0400
@@ -27,8 +27,7 @@
   
   $ cat >> test.py << EOF
   > from __future__ import print_function
-  > from mercurial import changelog, vfs
-  > from mercurial.node import *
+  > from mercurial import changelog, node, vfs
   > 
   > class singlebyteread(object):
   >     def __init__(self, real):
@@ -59,9 +58,9 @@
   > cl = changelog.changelog(opener('.hg/store'))
   > print(len(cl), 'revisions:')
   > for r in cl:
-  >     print(short(cl.node(r)))
+  >     print(node.short(cl.node(r)))
   > EOF
-  $ $PYTHON test.py
+  $ "$PYTHON" test.py
   2 revisions:
   7c31755bf9b5
   26333235a41c
@@ -74,7 +73,7 @@
 
   $ cd a
 
-  $ $PYTHON <<EOF
+  $ "$PYTHON" <<EOF
   > from __future__ import print_function
   > from mercurial import changelog, vfs
   > cl = changelog.changelog(vfs.vfs('.hg/store'))
@@ -137,7 +136,7 @@
   $ hg clone --pull -q --config phases.publish=False ../a segv
   $ rm -R limit/.hg/cache segv/.hg/cache
 
-  $ $PYTHON <<EOF
+  $ "$PYTHON" <<EOF
   > data = open("limit/.hg/store/00changelog.i", "rb").read()
   > for n, p in [(b'limit', b'\0\0\0\x02'), (b'segv', b'\0\x01\0\0')]:
   >     # corrupt p1 at rev0 and p2 at rev1
@@ -145,7 +144,7 @@
   >     open(n + b"/.hg/store/00changelog.i", "wb").write(d)
   > EOF
 
-  $ hg -R limit debugindex -f1 -c
+  $ hg -R limit debugrevlogindex -f1 -c
      rev flag     size   link     p1     p2       nodeid
        0 0000       62      0      2     -1 7c31755bf9b5
        1 0000       65      1      0      2 26333235a41c
@@ -155,7 +154,7 @@
         0       1        1       -1    base         63         62         63   1.01613        63         0    0.00000
         1       2        1       -1    base         66         65         66   1.01538        66         0    0.00000
 
-  $ hg -R segv debugindex -f1 -c
+  $ hg -R segv debugrevlogindex -f1 -c
      rev flag     size   link     p1     p2       nodeid
        0 0000       62      0  65536     -1 7c31755bf9b5
        1 0000       65      1      0  65536 26333235a41c
@@ -188,13 +187,13 @@
   >         print(inst)
   > EOF
 
-  $ $PYTHON test.py limit/.hg/store
+  $ "$PYTHON" test.py limit/.hg/store
   reachableroots: parent out of range
   compute_phases_map_sets: parent out of range
   index_headrevs: parent out of range
   find_gca_candidates: parent out of range
   find_deepest: parent out of range
-  $ $PYTHON test.py segv/.hg/store
+  $ "$PYTHON" test.py segv/.hg/store
   reachableroots: parent out of range
   compute_phases_map_sets: parent out of range
   index_headrevs: parent out of range
--- a/tests/test-parseindex2.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-parseindex2.py	Mon Oct 22 14:46:06 2018 -0400
@@ -8,12 +8,14 @@
 import struct
 import subprocess
 import sys
+import unittest
 
 from mercurial.node import (
     nullid,
     nullrev,
 )
 from mercurial import (
+    node as nodemod,
     policy,
     pycompat,
 )
@@ -61,9 +63,6 @@
     e[0] = offset_type(0, type)
     index[0] = tuple(e)
 
-    # add the magic null revision at -1
-    index.append((0, 0, 0, -1, -1, -1, -1, nullid))
-
     return index, cache
 
 data_inlined = (
@@ -132,88 +131,92 @@
                          stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
     return p.communicate()  # returns stdout, stderr
 
-def printhexfail(testnumber, hexversion, stdout, expected):
+def hexfailmsg(testnumber, hexversion, stdout, expected):
     try:
         hexstring = hex(hexversion)
     except TypeError:
         hexstring = None
-    print("FAILED: version test #%s with Python %s and patched "
-          "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n" %
-          (testnumber, sys.version_info, hexversion, hexstring, expected,
-           stdout))
-
-def testversionokay(testnumber, hexversion):
-    stdout, stderr = importparsers(hexversion)
-    if stdout:
-        printhexfail(testnumber, hexversion, stdout, expected="no stdout")
-
-def testversionfail(testnumber, hexversion):
-    stdout, stderr = importparsers(hexversion)
-    # We include versionerrortext to distinguish from other ImportErrors.
-    errtext = b"ImportError: %s" % pycompat.sysbytes(parsers.versionerrortext)
-    if errtext not in stdout:
-        printhexfail(testnumber, hexversion, stdout,
-                     expected="stdout to contain %r" % errtext)
+    return ("FAILED: version test #%s with Python %s and patched "
+            "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n" %
+            (testnumber, sys.version_info, hexversion, hexstring, expected,
+             stdout))
 
 def makehex(major, minor, micro):
     return int("%x%02x%02x00" % (major, minor, micro), 16)
 
-def runversiontests():
-    """Check the version-detection logic when importing parsers."""
-    info = sys.version_info
-    major, minor, micro = info[0], info[1], info[2]
-    # Test same major-minor versions.
-    testversionokay(1, makehex(major, minor, micro))
-    testversionokay(2, makehex(major, minor, micro + 1))
-    # Test different major-minor versions.
-    testversionfail(3, makehex(major + 1, minor, micro))
-    testversionfail(4, makehex(major, minor + 1, micro))
-    testversionfail(5, "'foo'")
+class parseindex2tests(unittest.TestCase):
+
+    def assertversionokay(self, testnumber, hexversion):
+        stdout, stderr = importparsers(hexversion)
+        self.assertFalse(
+            stdout, hexfailmsg(testnumber, hexversion, stdout, 'no stdout'))
+
+    def assertversionfail(self, testnumber, hexversion):
+        stdout, stderr = importparsers(hexversion)
+        # We include versionerrortext to distinguish from other ImportErrors.
+        errtext = b"ImportError: %s" % pycompat.sysbytes(
+            parsers.versionerrortext)
+        self.assertIn(errtext, stdout,
+                      hexfailmsg(testnumber, hexversion, stdout,
+                                 expected="stdout to contain %r" % errtext))
 
-def runtest() :
-    # Only test the version-detection logic if it is present.
-    try:
-        parsers.versionerrortext
-    except AttributeError:
-        pass
-    else:
-        runversiontests()
+    def testversiondetection(self):
+        """Check the version-detection logic when importing parsers."""
+        # Only test the version-detection logic if it is present.
+        try:
+            parsers.versionerrortext
+        except AttributeError:
+            return
+        info = sys.version_info
+        major, minor, micro = info[0], info[1], info[2]
+        # Test same major-minor versions.
+        self.assertversionokay(1, makehex(major, minor, micro))
+        self.assertversionokay(2, makehex(major, minor, micro + 1))
+        # Test different major-minor versions.
+        self.assertversionfail(3, makehex(major + 1, minor, micro))
+        self.assertversionfail(4, makehex(major, minor + 1, micro))
+        self.assertversionfail(5, "'foo'")
 
-    # Check that parse_index2() raises TypeError on bad arguments.
-    try:
-        parse_index2(0, True)
-    except TypeError:
-        pass
-    else:
-        print("Expected to get TypeError.")
+    def testbadargs(self):
+        # Check that parse_index2() raises TypeError on bad arguments.
+        with self.assertRaises(TypeError):
+            parse_index2(0, True)
 
-   # Check parsers.parse_index2() on an index file against the original
-   # Python implementation of parseindex, both with and without inlined data.
-
-    py_res_1 = py_parseindex(data_inlined, True)
-    c_res_1 = parse_index2(data_inlined, True)
+    def testparseindexfile(self):
+        # Check parsers.parse_index2() on an index file against the
+        # original Python implementation of parseindex, both with and
+        # without inlined data.
 
-    py_res_2 = py_parseindex(data_non_inlined, False)
-    c_res_2 = parse_index2(data_non_inlined, False)
+        want = py_parseindex(data_inlined, True)
+        got = parse_index2(data_inlined, True)
+        self.assertEqual(want, got) # inline data
 
-    if py_res_1 != c_res_1:
-        print("Parse index result (with inlined data) differs!")
-
-    if py_res_2 != c_res_2:
-        print("Parse index result (no inlined data) differs!")
+        want = py_parseindex(data_non_inlined, False)
+        got = parse_index2(data_non_inlined, False)
+        self.assertEqual(want, got) # no inline data
 
-    ix = parsers.parse_index2(data_inlined, True)[0]
-    for i, r in enumerate(ix):
-        if r[7] == nullid:
-            i = -1
-        try:
-            if ix[r[7]] != i:
-                print('Reverse lookup inconsistent for %r'
-                    % r[7].encode('hex'))
-        except TypeError:
-            # pure version doesn't support this
-            break
+        ix = parsers.parse_index2(data_inlined, True)[0]
+        for i, r in enumerate(ix):
+            if r[7] == nullid:
+                i = -1
+            try:
+                self.assertEqual(
+                    ix[r[7]], i,
+                    'Reverse lookup inconsistent for %r' % nodemod.hex(r[7]))
+            except TypeError:
+                # pure version doesn't support this
+                break
 
-    print("done")
+    def testminusone(self):
+        want = (0, 0, 0, -1, -1, -1, -1, nullid)
+        index, junk = parsers.parse_index2(data_inlined, True)
+        got = index[-1]
+        self.assertEqual(want, got) # inline data
 
-runtest()
+        index, junk = parsers.parse_index2(data_non_inlined, False)
+        got = index[-1]
+        self.assertEqual(want, got) # no inline data
+
+if __name__ == '__main__':
+    import silenttestrunner
+    silenttestrunner.main(__name__)
--- a/tests/test-parseindex2.py.out	Wed Oct 10 12:25:28 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-done
--- a/tests/test-patch-offset.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-patch-offset.t	Mon Oct 22 14:46:06 2018 -0400
@@ -23,7 +23,7 @@
 within this file.  If the offset isn't tracked then the hunks can be
 applied to the wrong lines of this file.
 
-  $ $PYTHON ../writepatterns.py a 34X 10A 1B 10A 1C 10A 1B 10A 1D 10A 1B 10A 1E 10A 1B 10A
+  $ "$PYTHON" ../writepatterns.py a 34X 10A 1B 10A 1C 10A 1B 10A 1D 10A 1B 10A 1E 10A 1B 10A
   $ hg commit -Am adda
   adding a
 
@@ -76,7 +76,7 @@
 
 compare imported changes against reference file
 
-  $ $PYTHON ../writepatterns.py aref 34X 10A 1B 1a 9A 1C 10A 1B 10A 1D 10A 1B 1a 9A 1E 10A 1B 1a 9A
+  $ "$PYTHON" ../writepatterns.py aref 34X 10A 1B 1a 9A 1C 10A 1B 10A 1D 10A 1B 1a 9A 1E 10A 1B 1a 9A
   $ diff aref a
 
   $ cd ..
--- a/tests/test-patch.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-patch.t	Mon Oct 22 14:46:06 2018 -0400
@@ -7,7 +7,7 @@
   > EOF
 
   $ echo "[ui]" >> $HGRCPATH
-  $ echo "patch=$PYTHON ../patchtool.py" >> $HGRCPATH
+  $ echo "patch=\"$PYTHON\" ../patchtool.py" >> $HGRCPATH
 
   $ hg init a
   $ cd a
--- a/tests/test-patchbomb-bookmark.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-patchbomb-bookmark.t	Mon Oct 22 14:46:06 2018 -0400
@@ -35,7 +35,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] bookmark
-  Message-Id: <patchbomb.347155260@*> (glob)
+  Message-Id: <patchbomb.347155260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1981 00:01:00 +0000
   From: test
@@ -50,10 +50,10 @@
   X-Mercurial-Node: accde9b8b6dce861c185d0825c1affc09a79cb26
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <accde9b8b6dce861c185.347155261@*> (glob)
-  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@*> (glob)
-  In-Reply-To: <patchbomb.347155260@*> (glob)
-  References: <patchbomb.347155260@*> (glob)
+  Message-Id: <accde9b8b6dce861c185.347155261@test-hostname>
+  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@test-hostname>
+  In-Reply-To: <patchbomb.347155260@test-hostname>
+  References: <patchbomb.347155260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1981 00:01:01 +0000
   From: test
@@ -81,10 +81,10 @@
   X-Mercurial-Node: 417defd1559c396ba06a44dce8dc1c2d2d653f3f
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <417defd1559c396ba06a.347155262@*> (glob)
-  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@*> (glob)
-  In-Reply-To: <patchbomb.347155260@*> (glob)
-  References: <patchbomb.347155260@*> (glob)
+  Message-Id: <417defd1559c396ba06a.347155262@test-hostname>
+  X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@test-hostname>
+  In-Reply-To: <patchbomb.347155260@test-hostname>
+  References: <patchbomb.347155260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1981 00:01:02 +0000
   From: test
@@ -145,8 +145,8 @@
   X-Mercurial-Node: 8dab2639fd35f1e337ad866c372a5c44f1064e3c
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8dab2639fd35f1e337ad.378691260@*> (glob)
-  X-Mercurial-Series-Id: <8dab2639fd35f1e337ad.378691260@*> (glob)
+  Message-Id: <8dab2639fd35f1e337ad.378691260@test-hostname>
+  X-Mercurial-Series-Id: <8dab2639fd35f1e337ad.378691260@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Fri, 01 Jan 1982 00:01:00 +0000
   From: test
--- a/tests/test-patchbomb-tls.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-patchbomb-tls.t	Mon Oct 22 14:46:06 2018 -0400
@@ -5,7 +5,7 @@
   $ CERTSDIR="$TESTDIR/sslcerts"
   $ cat "$CERTSDIR/priv.pem" "$CERTSDIR/pub.pem" >> server.pem
 
-  $ $PYTHON "$TESTDIR/dummysmtpd.py" -p $HGPORT --pid-file a.pid -d \
+  $ "$PYTHON" "$TESTDIR/dummysmtpd.py" -p $HGPORT --pid-file a.pid -d \
   > --tls smtps --certificate `pwd`/server.pem
   listening at localhost:$HGPORT (?)
   $ cat a.pid >> $DAEMON_PIDS
--- a/tests/test-patchbomb.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-patchbomb.t	Mon Oct 22 14:46:06 2018 -0400
@@ -2,7 +2,6 @@
 wildcards in test expectations due to how many things like hostnames
 tend to make it into outputs. As a result, you may need to perform the
 following regular expression substitutions:
-@$HOSTNAME> -> @*> (glob)
 Mercurial-patchbomb/.* -> Mercurial-patchbomb/* (glob)
 /mixed; boundary="===+[0-9]+==" -> /mixed; boundary="===*== (glob)"
 --===+[0-9]+=+--$ -> --===*=-- (glob)
@@ -45,8 +44,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -84,8 +83,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <*@*> (glob)
-  X-Mercurial-Series-Id: <*@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -159,8 +158,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -197,8 +196,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -236,7 +235,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.120@*> (glob)
+  Message-Id: <patchbomb.120@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:02:00 +0000
   From: quux
@@ -252,10 +251,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.121@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@*> (glob)
-  In-Reply-To: <patchbomb.120@*> (glob)
-  References: <patchbomb.120@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.121@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@test-hostname>
+  In-Reply-To: <patchbomb.120@test-hostname>
+  References: <patchbomb.120@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:02:01 +0000
   From: quux
@@ -284,10 +283,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.122@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@*> (glob)
-  In-Reply-To: <patchbomb.120@*> (glob)
-  References: <patchbomb.120@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.122@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@test-hostname>
+  In-Reply-To: <patchbomb.120@test-hostname>
+  References: <patchbomb.120@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:02:02 +0000
   From: quux
@@ -366,7 +365,7 @@
   Content-Type: multipart/mixed; boundary="===*==" (glob)
   MIME-Version: 1.0
   Subject: test
-  Message-Id: <patchbomb.180@*> (glob)
+  Message-Id: <patchbomb.180@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:03:00 +0000
   From: quux
@@ -412,7 +411,7 @@
   Content-Type: multipart/mixed; boundary="===*==" (glob)
   MIME-Version: 1.0
   Subject: test
-  Message-Id: <patchbomb.180@*> (glob)
+  Message-Id: <patchbomb.180@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:03:00 +0000
   From: quux
@@ -439,10 +438,11 @@
   CgZcySARUyA2A2LGZKiZ3Y+Lu786z4z4MWXmsrAZCsqrl1az5y21PMcjpbThzWeXGT+/nutbmvvz
   zXYS3BoGxdrJDIYmlimJJiZpRokmqYYmaSYWFknmSSkmhqbmliamiZYWxuYmBhbJBgZcUBNZQe5K
   Epm7xF/LT+RLx/a9juFTomaYO/Rgsx4rwBN+IMCUDLOKAQBrsmti
+   (?)
   --===============*==-- (glob)
 
 utf-8 patch:
-  $ $PYTHON -c 'fp = open("utf", "wb"); fp.write("h\xC3\xB6mma!\n"); fp.close();'
+  $ "$PYTHON" -c 'fp = open("utf", "wb"); fp.write(b"h\xC3\xB6mma!\n"); fp.close();'
   $ hg commit -A -d '4 0' -m 'utf-8 content'
   adding description
   adding utf
@@ -454,14 +454,14 @@
   
   displaying [PATCH] utf-8 content ...
   MIME-Version: 1.0
-  Content-Type: text/plain; charset="us-ascii"
-  Content-Transfer-Encoding: 8bit
+  Content-Type: text/plain; charset="iso-8859-1"
+  Content-Transfer-Encoding: quoted-printable
   Subject: [PATCH] utf-8 content
   X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <909a00e13e9d78b575ae.240@*> (glob)
-  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@*> (glob)
+  Message-Id: <909a00e13e9d78b575ae.240@test-hostname>
+  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: quux
@@ -487,7 +487,7 @@
   --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   +++ b/utf	Thu Jan 01 00:00:04 1970 +0000
   @@ -0,0 +1,1 @@
-  +h\xc3\xb6mma! (esc)
+  +h=C3=B6mma!
   
 
 mime encoded mbox (base64):
@@ -506,8 +506,8 @@
   X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <909a00e13e9d78b575ae.240@*> (glob)
-  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@*> (glob)
+  Message-Id: <909a00e13e9d78b575ae.240@test-hostname>
+  X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: Q <quux>
@@ -526,7 +526,14 @@
   QEAgLTAsMCArMSwxIEBACitow7ZtbWEhCg==
   
   
-  $ $PYTHON -c 'print open("mbox").read().split("\n\n")[1].decode("base64")'
+  >>> import base64
+  >>> patch = base64.b64decode(open("mbox").read().split("\n\n")[1])
+  >>> if not isinstance(patch, str):
+  ...     import sys
+  ...     sys.stdout.flush()
+  ...     junk = sys.stdout.buffer.write(patch + b"\n")
+  ... else:
+  ...     print(patch)
   # HG changeset patch
   # User test
   # Date 4 0
@@ -551,7 +558,7 @@
   $ rm mbox
 
 mime encoded mbox (quoted-printable):
-  $ $PYTHON -c 'fp = open("long", "wb"); fp.write("%s\nfoo\n\nbar\n" % ("x" * 1024)); fp.close();'
+  $ "$PYTHON" -c 'fp = open("long", "wb"); fp.write(b"%s\nfoo\n\nbar\n" % (b"x" * 1024)); fp.close();'
   $ hg commit -A -d '4 0' -m 'long line'
   adding long
 
@@ -568,8 +575,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: quux
@@ -622,8 +629,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
   From: quux
@@ -665,7 +672,7 @@
   $ rm mbox
 
 iso-8859-1 patch:
-  $ $PYTHON -c 'fp = open("isolatin", "wb"); fp.write("h\xF6mma!\n"); fp.close();'
+  $ "$PYTHON" -c 'fp = open("isolatin", "wb"); fp.write(b"h\xF6mma!\n"); fp.close();'
   $ hg commit -A -d '5 0' -m 'isolatin 8-bit encoding'
   adding isolatin
 
@@ -684,8 +691,8 @@
   X-Mercurial-Node: 240fb913fc1b7ff15ddb9f33e73d82bf5277c720
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <240fb913fc1b7ff15ddb.300@*> (glob)
-  X-Mercurial-Series-Id: <240fb913fc1b7ff15ddb.300@*> (glob)
+  Message-Id: <240fb913fc1b7ff15ddb.300@test-hostname>
+  X-Mercurial-Series-Id: <240fb913fc1b7ff15ddb.300@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:05:00 +0000
   From: quux
@@ -732,8 +739,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -791,7 +798,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -811,10 +818,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -847,10 +854,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -888,8 +895,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -931,8 +938,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -991,7 +998,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 3] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1006,10 +1013,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 3
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1044,10 +1051,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 3
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1082,10 +1089,10 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 3
   X-Mercurial-Series-Total: 3
-  Message-Id: <a2ea8fc83dd8b93cfd86.63@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.63@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:03 +0000
   From: quux
@@ -1142,8 +1149,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1193,8 +1200,8 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
-  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
+  X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1260,8 +1267,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1323,7 +1330,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 3] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1338,10 +1345,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 3
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1385,10 +1392,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 3
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1432,10 +1439,10 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 3
   X-Mercurial-Series-Total: 3
-  Message-Id: <a2ea8fc83dd8b93cfd86.63@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.63@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:03 +0000
   From: quux
@@ -1503,7 +1510,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 1] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1519,10 +1526,10 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1556,7 +1563,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 1] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1573,10 +1580,10 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1612,7 +1619,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1628,10 +1635,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1660,10 +1667,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1699,8 +1706,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1737,8 +1744,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1779,8 +1786,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1823,7 +1830,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -1838,10 +1845,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -1876,10 +1883,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -1923,8 +1930,8 @@
   X-Mercurial-Node: 7aead2484924c445ad8ce2613df91f52f9e502ed
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <7aead2484924c445ad8c.60@*> (glob)
-  X-Mercurial-Series-Id: <7aead2484924c445ad8c.60@*> (glob)
+  Message-Id: <7aead2484924c445ad8c.60@test-hostname>
+  X-Mercurial-Series-Id: <7aead2484924c445ad8c.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -1966,8 +1973,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -1998,8 +2005,8 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -2038,7 +2045,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   In-Reply-To: <baz>
   References: <baz>
   User-Agent: Mercurial-patchbomb/* (glob)
@@ -2056,10 +2063,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2088,10 +2095,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2129,8 +2136,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2167,7 +2174,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2 fooFlag] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2183,10 +2190,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2215,10 +2222,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2256,8 +2263,8 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2293,7 +2300,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2 fooFlag barFlag] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2309,10 +2316,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2341,10 +2348,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2383,8 +2390,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.315532860@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.315532860@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: quux
@@ -2422,7 +2429,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 2 R1] test
-  Message-Id: <patchbomb.60@*> (glob)
+  Message-Id: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2438,10 +2445,10 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 2
-  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.61@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:01 +0000
   From: quux
@@ -2469,10 +2476,10 @@
   X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 2
-  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
-  In-Reply-To: <patchbomb.60@*> (glob)
-  References: <patchbomb.60@*> (glob)
+  Message-Id: <97d72e5f12c7e84f8506.62@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname>
+  In-Reply-To: <patchbomb.60@test-hostname>
+  References: <patchbomb.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:02 +0000
   From: quux
@@ -2508,8 +2515,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.60@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:01:00 +0000
   From: quux
@@ -2531,10 +2538,11 @@
   
 
 test multi-byte domain parsing:
-  $ UUML=`$PYTHON -c 'import sys; sys.stdout.write("\374")'`
+  >>> with open('toaddress.txt', 'wb') as f:
+  ...  f.write(b'bar@\xfcnicode.com') and None
   $ HGENCODING=iso-8859-1
   $ export HGENCODING
-  $ hg email --date '1980-1-1 0:1' -m tmp.mbox -f quux -t "bar@${UUML}nicode.com" -s test -r 0
+  $ hg email --date '1980-1-1 0:1' -m tmp.mbox -f quux -t "`cat toaddress.txt`" -s test -r 0
   this patch series consists of 1 patches.
   
   Cc: 
@@ -2550,8 +2558,8 @@
   X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <8580ff50825a50c8f716.315532860@*> (glob)
-  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@*> (glob)
+  Message-Id: <8580ff50825a50c8f716.315532860@test-hostname>
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: quux
@@ -2625,7 +2633,7 @@
   Content-Type: text/plain; charset="us-ascii"
   Content-Transfer-Encoding: 7bit
   Subject: [PATCH 0 of 6] test
-  Message-Id: <patchbomb.315532860@*> (glob)
+  Message-Id: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: test
@@ -2640,10 +2648,10 @@
   X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 6
-  Message-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:01 +0000
   From: test
@@ -2665,16 +2673,16 @@
   
   displaying [PATCH 2 of 6] utf-8 content ...
   MIME-Version: 1.0
-  Content-Type: text/plain; charset="us-ascii"
-  Content-Transfer-Encoding: 8bit
+  Content-Type: text/plain; charset="iso-8859-1"
+  Content-Transfer-Encoding: quoted-printable
   Subject: [PATCH 2 of 6] utf-8 content
   X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 6
-  Message-Id: <909a00e13e9d78b575ae.315532862@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <909a00e13e9d78b575ae.315532862@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:02 +0000
   From: test
@@ -2699,7 +2707,7 @@
   --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   +++ b/utf	Thu Jan 01 00:00:04 1970 +0000
   @@ -0,0 +1,1 @@
-  +h\xc3\xb6mma! (esc)
+  +h=C3=B6mma!
   
   displaying [PATCH 3 of 6] long line ...
   MIME-Version: 1.0
@@ -2709,10 +2717,10 @@
   X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1
   X-Mercurial-Series-Index: 3
   X-Mercurial-Series-Total: 6
-  Message-Id: <a2ea8fc83dd8b93cfd86.315532863@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <a2ea8fc83dd8b93cfd86.315532863@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:03 +0000
   From: test
@@ -2750,16 +2758,16 @@
   
   displaying [PATCH 4 of 6] isolatin 8-bit encoding ...
   MIME-Version: 1.0
-  Content-Type: text/plain; charset="us-ascii"
-  Content-Transfer-Encoding: 8bit
+  Content-Type: text/plain; charset="iso-8859-1"
+  Content-Transfer-Encoding: quoted-printable
   Subject: [PATCH 4 of 6] isolatin 8-bit encoding
   X-Mercurial-Node: 240fb913fc1b7ff15ddb9f33e73d82bf5277c720
   X-Mercurial-Series-Index: 4
   X-Mercurial-Series-Total: 6
-  Message-Id: <240fb913fc1b7ff15ddb.315532864@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <240fb913fc1b7ff15ddb.315532864@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:04 +0000
   From: test
@@ -2777,7 +2785,7 @@
   --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   +++ b/isolatin	Thu Jan 01 00:00:05 1970 +0000
   @@ -0,0 +1,1 @@
-  +h\xf6mma! (esc)
+  +h=F6mma!
   
   displaying [PATCH 5 of 6] Added tag zero, zero.foo for changeset 8580ff50825a ...
   MIME-Version: 1.0
@@ -2787,10 +2795,10 @@
   X-Mercurial-Node: 5d5ef15dfe5e7bd3a4ee154b5fff76c7945ec433
   X-Mercurial-Series-Index: 5
   X-Mercurial-Series-Total: 6
-  Message-Id: <5d5ef15dfe5e7bd3a4ee.315532865@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <5d5ef15dfe5e7bd3a4ee.315532865@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:05 +0000
   From: test
@@ -2819,10 +2827,10 @@
   X-Mercurial-Node: 2f9fa9b998c5fe3ac2bd9a2b14bfcbeecbc7c268
   X-Mercurial-Series-Index: 6
   X-Mercurial-Series-Total: 6
-  Message-Id: <2f9fa9b998c5fe3ac2bd.315532866@*> (glob)
-  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob)
-  In-Reply-To: <patchbomb.315532860@*> (glob)
-  References: <patchbomb.315532860@*> (glob)
+  Message-Id: <2f9fa9b998c5fe3ac2bd.315532866@test-hostname>
+  X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname>
+  In-Reply-To: <patchbomb.315532860@test-hostname>
+  References: <patchbomb.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:06 +0000
   From: test
@@ -2864,8 +2872,8 @@
   X-Mercurial-Node: 2f9fa9b998c5fe3ac2bd9a2b14bfcbeecbc7c268
   X-Mercurial-Series-Index: 1
   X-Mercurial-Series-Total: 1
-  Message-Id: <2f9fa9b998c5fe3ac2bd.315532860@*> (glob)
-  X-Mercurial-Series-Id: <2f9fa9b998c5fe3ac2bd.315532860@*> (glob)
+  Message-Id: <2f9fa9b998c5fe3ac2bd.315532860@test-hostname>
+  X-Mercurial-Series-Id: <2f9fa9b998c5fe3ac2bd.315532860@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: test
--- a/tests/test-pathconflicts-basic.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-pathconflicts-basic.t	Mon Oct 22 14:46:06 2018 -0400
@@ -88,7 +88,7 @@
   $ mkdir a
   $ echo 4 > a/b
   $ hg up file2
-  abort: *: '$TESTTMP/repo/a' (glob)
+  abort: *: *$TESTTMP/repo/a* (glob)
   [255]
   $ hg up --clean file2
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-pending.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-pending.t	Mon Oct 22 14:46:06 2018 -0400
@@ -41,8 +41,8 @@
 python hook
 
   $ cat <<EOF > reject.py
-  > import os, time
-  > from mercurial import ui, localrepo
+  > import os
+  > import time
   > def rejecthook(ui, repo, hooktype, node, **opts):
   >     ui.write(b'hook %s\\n' % repo[b'tip'].hex())
   >     # create the notify file so caller knows we're running
@@ -50,7 +50,7 @@
   >     f = open(fpath, 'w')
   >     f.close()
   >     # wait for ack - caller should delete the notify file
-  >     i = $maxwait
+  >     i = int("$maxwait")
   >     while os.path.exists(fpath) and i > 0:
   >         time.sleep(1)
   >         i -= 1
--- a/tests/test-permissions.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-permissions.t	Mon Oct 22 14:46:06 2018 -0400
@@ -13,7 +13,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
 
   $ chmod -r .hg/store/data/a.i
 
@@ -32,7 +32,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
 
   $ chmod -w .hg/store/data/a.i
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-phabricator.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,77 @@
+#require vcr
+  $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > phabricator = 
+  > EOF
+  $ hg init repo
+  $ cd repo
+  $ cat >> .hg/hgrc <<EOF
+  > [phabricator]
+  > url = https://phab.mercurial-scm.org/
+  > callsign = HG
+  > 
+  > [auth]
+  > hgphab.schemes = https
+  > hgphab.prefix = phab.mercurial-scm.org
+  > # When working on the extension and making phabricator interaction
+  > # changes, edit this to be a real phabricator token. When done, edit
+  > # it back, and make sure to also edit your VCR transcripts to match
+  > # whatever value you put here.
+  > hgphab.phabtoken = cli-hahayouwish
+  > EOF
+  $ VCR="$TESTDIR/phabricator"
+
+Error is handled reasonably. We override the phabtoken here so that
+when you're developing changes to phabricator.py you can edit the
+above config and have a real token in the test but not have to edit
+this test.
+  $ hg phabread --config auth.hgphab.phabtoken=cli-notavalidtoken \
+  >  --test-vcr "$VCR/phabread-conduit-error.json" D4480 | head
+  abort: Conduit Error (ERR-INVALID-AUTH): API token "cli-notavalidtoken" has the wrong length. API tokens should be 32 characters long.
+
+Basic phabread:
+  $ hg phabread --test-vcr "$VCR/phabread-4480.json" D4480 | head
+  # HG changeset patch
+  exchangev2: start to implement pull with wire protocol v2
+  
+  Wire protocol version 2 will take a substantially different
+  approach to exchange than version 1 (at least as far as pulling
+  is concerned).
+  
+  This commit establishes a new exchangev2 module for holding
+  code related to exchange using wire protocol v2. I could have
+  added things to the existing exchange module. But it is already
+
+phabupdate with an accept:
+  $ hg phabupdate --accept D4564 \
+  > -m 'I think I like where this is headed. Will read rest of series later.'\
+  >  --test-vcr "$VCR/accept-4564.json"
+
+Create a differential diff:
+  $ echo alpha > alpha
+  $ hg ci --addremove -m 'create alpha for phabricator test'
+  adding alpha
+  $ hg phabsend -r . --test-vcr "$VCR/phabsend-create-alpha.json"
+  D4596 - created - 5206a4fa1e6c: create alpha for phabricator test
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/5206a4fa1e6c-dec9e777-phabsend.hg
+  $ echo more >> alpha
+  $ HGEDITOR=true hg ci --amend
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/d8f232f7d799-c573510a-amend.hg
+  $ echo beta > beta
+  $ hg ci --addremove -m 'create beta for phabricator test'
+  adding beta
+  $ hg phabsend -r ".^::" --test-vcr "$VCR/phabsend-update-alpha-create-beta.json"
+  D4596 - updated - f70265671c65: create alpha for phabricator test
+  D4597 - created - 1a5640df7bbf: create beta for phabricator test
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/1a5640df7bbf-6daf3e6e-phabsend.hg
+
+Template keywords
+  $ hg log -T'{rev} {phabreview|json}\n'
+  1 {"id": "D4597", "url": "https://phab.mercurial-scm.org/D4597"}
+  0 {"id": "D4596", "url": "https://phab.mercurial-scm.org/D4596"}
+
+  $ hg log -T'{rev} {phabreview.url} {phabreview.id}\n'
+  1 https://phab.mercurial-scm.org/D4597 D4597
+  0 https://phab.mercurial-scm.org/D4596 D4596
+
+  $ cd ..
--- a/tests/test-phases-exchange.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-phases-exchange.t	Mon Oct 22 14:46:06 2018 -0400
@@ -204,7 +204,7 @@
   adding manifests
   adding file changes
   added 5 changesets with 5 changes to 5 files (+1 heads)
-  new changesets 054250a37db4:b555f63b6063
+  new changesets 054250a37db4:b555f63b6063 (5 drafts)
   test-debug-phase: new rev 0:  x -> 1
   test-debug-phase: new rev 1:  x -> 1
   test-debug-phase: new rev 2:  x -> 1
@@ -238,7 +238,7 @@
   adding manifests
   adding file changes
   added 3 changesets with 3 changes to 3 files
-  new changesets 054250a37db4:54acac6f23ab
+  new changesets 054250a37db4:54acac6f23ab (3 drafts)
   test-debug-phase: new rev 0:  x -> 1
   test-debug-phase: new rev 1:  x -> 1
   test-debug-phase: new rev 2:  x -> 1
@@ -260,7 +260,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets f54f1bb90ff3
+  new changesets f54f1bb90ff3 (1 drafts)
   test-debug-phase: new rev 3:  x -> 1
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hgph
@@ -333,7 +333,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files
-  new changesets d6bcb4f74035:145e75495359
+  new changesets d6bcb4f74035:145e75495359 (2 drafts)
   4 local changesets published
   test-debug-phase: move rev 0: 1 -> 0
   test-debug-phase: move rev 1: 1 -> 0
@@ -380,7 +380,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files
-  new changesets d6bcb4f74035:145e75495359
+  new changesets d6bcb4f74035:145e75495359 (2 drafts)
   test-debug-phase: new rev 5:  x -> 1
   test-debug-phase: new rev 6:  x -> 1
   (run 'hg update' to get a working copy)
@@ -943,7 +943,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 435b5d83910c
+  new changesets 435b5d83910c (1 drafts)
   test-debug-phase: new rev 10:  x -> 1
   (run 'hg update' to get a working copy)
   $ hgph -R ../mu
@@ -1073,7 +1073,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 5237fb433fc8
+  new changesets 5237fb433fc8 (1 drafts)
   test-debug-phase: new rev 13:  x -> 1
   (run 'hg update' to get a working copy)
   $ hgph
@@ -1448,7 +1448,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 7 changes to 7 files (+1 heads)
-  new changesets 426bada5c675:bb94757e651a
+  new changesets 426bada5c675:bb94757e651a (4 drafts)
   test-debug-phase: new rev 0:  x -> 0
   test-debug-phase: new rev 1:  x -> 0
   test-debug-phase: new rev 2:  x -> 0
@@ -1490,7 +1490,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 7 changes to 7 files (+1 heads)
-  new changesets 426bada5c675:bb94757e651a
+  new changesets 426bada5c675:bb94757e651a (4 drafts)
   test-debug-phase: new rev 0:  x -> 0
   test-debug-phase: new rev 1:  x -> 0
   test-debug-phase: new rev 2:  x -> 0
@@ -1532,7 +1532,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 7 changes to 7 files (+1 heads)
-  new changesets 426bada5c675:bb94757e651a
+  new changesets 426bada5c675:bb94757e651a (4 drafts)
   test-debug-phase: new rev 0:  x -> 0
   test-debug-phase: new rev 1:  x -> 0
   test-debug-phase: new rev 2:  x -> 0
--- a/tests/test-phases.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-phases.t	Mon Oct 22 14:46:06 2018 -0400
@@ -690,7 +690,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  7 files, 8 changesets, 7 total revisions
+  checked 8 changesets with 7 changes to 7 files
 
   $ cd ..
 
@@ -826,3 +826,81 @@
   rollback completed
   abort: pretxnclose-phase.nopublish_D hook exited with status 1
   [255]
+
+  $ cd ..
+
+Test for the "internal" phase
+=============================
+
+Check we deny its usage on older repository
+
+  $ hg init no-internal-phase --config format.internal-phase=no
+  $ cd no-internal-phase
+  $ cat .hg/requires
+  dotencode
+  fncache
+  generaldelta
+  revlogv1
+  store
+  $ echo X > X
+  $ hg add X
+  $ hg status
+  A X
+  $ hg --config "phases.new-commit=internal" commit -m "my test internal commit" 2>&1 | grep ProgrammingError
+  ** ProgrammingError: this repository does not support the internal phase
+      raise error.ProgrammingError(msg)
+  mercurial.error.ProgrammingError: this repository does not support the internal phase
+
+  $ cd ..
+
+Check it works fine with repository that supports it.
+
+  $ hg init internal-phase --config format.internal-phase=yes
+  $ cd internal-phase
+  $ cat .hg/requires
+  dotencode
+  fncache
+  generaldelta
+  internal-phase
+  revlogv1
+  store
+  $ mkcommit A
+  test-debug-phase: new rev 0:  x -> 1
+  test-hook-close-phase: 4a2df7238c3b48766b5e22fafbb8a2f506ec8256:   -> draft
+
+Commit an internal changesets
+
+  $ echo B > B
+  $ hg add B
+  $ hg status
+  A B
+  $ hg --config "phases.new-commit=internal" commit -m "my test internal commit"
+  test-debug-phase: new rev 1:  x -> 96
+  test-hook-close-phase: c01c42dffc7f81223397e99652a0703f83e1c5ea:   -> internal
+
+Usual visibility rules apply when working directory parents
+
+  $ hg log -G -l 3
+  @  changeset:   1:c01c42dffc7f
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     my test internal commit
+  |
+  o  changeset:   0:4a2df7238c3b
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     A
+  
+
+Commit is hidden as expected
+
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg log -G
+  @  changeset:   0:4a2df7238c3b
+     tag:         tip
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     A
+  
--- a/tests/test-profile.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-profile.t	Mon Oct 22 14:46:06 2018 -0400
@@ -66,7 +66,7 @@
 
   $ cat >> sleepext.py << EOF
   > import time
-  > from mercurial import registrar, commands
+  > from mercurial import registrar
   > cmdtable = {}
   > command = registrar.command(cmdtable)
   > @command(b'sleep', [], b'hg sleep')
@@ -87,8 +87,10 @@
 Various statprof formatters work
 
   $ hg --profile --config profiling.statformat=byline sleep 2>../out
-  $ head -n 1 ../out
+  $ head -n 3 ../out
     %   cumulative      self          
+   time    seconds   seconds  name    
+   * sleepext.py:*:sleep (glob)
   $ cat ../out | statprofran
 
   $ hg --profile --config profiling.statformat=bymethod sleep 2>../out
@@ -105,7 +107,7 @@
 
 statprof can be used as a standalone module
 
-  $ $PYTHON -m mercurial.statprof hotpath
+  $ "$PYTHON" -m mercurial.statprof hotpath
   must specify --file to load
   [1]
 
@@ -117,11 +119,14 @@
   $ cat > fooprof.py <<EOF
   > from __future__ import absolute_import
   > import contextlib
+  > import sys
   > @contextlib.contextmanager
   > def profile(ui, fp):
   >     print('fooprof: start profile')
+  >     sys.stdout.flush()
   >     yield
   >     print('fooprof: end profile')
+  >     sys.stdout.flush()
   > def extsetup(ui):
   >     ui.write(b'fooprof: loaded\n')
   > EOF
--- a/tests/test-progress.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-progress.t	Mon Oct 22 14:46:06 2018 -0400
@@ -33,26 +33,26 @@
   >         nested = True
   >     loops = abs(loops)
   > 
-  >     progress = ui.makeprogress(topiclabel, unit='loopnum', total=total)
-  >     other = ui.makeprogress('other', unit='othernum', total=total)
+  >     progress = ui.makeprogress(topiclabel, unit=b'loopnum', total=total)
+  >     other = ui.makeprogress(b'other', unit=b'othernum', total=total)
   >     for i in range(loops):
   >         progress.update(i, item=getloopitem(i))
   >         if opts.get('parallel'):
-  >             other.update(i, item='other.%d' % i)
+  >             other.update(i, item=b'other.%d' % i)
   >         if nested:
   >             nested_steps = 2
   >             if i and i % 4 == 0:
   >                 nested_steps = 5
-  >             nested = ui.makeprogress('nested', unit='nestnum',
+  >             nested = ui.makeprogress(b'nested', unit=b'nestnum',
   >                                      total=nested_steps)
   >             for j in range(nested_steps):
-  >                 nested.update(j, item='nested.%d' % j)
+  >                 nested.update(j, item=b'nested.%d' % j)
   >             nested.complete()
   >     progress.complete()
   > 
-  > topiclabel = 'loop'
+  > topiclabel = b'loop'
   > def getloopitem(i):
-  >     return 'loop.%d' % i
+  >     return b'loop.%d' % i
   > 
   > EOF
 
--- a/tests/test-propertycache.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-propertycache.py	Mon Oct 22 14:46:06 2018 -0400
@@ -11,10 +11,15 @@
 from mercurial import (
     hg,
     localrepo,
+    pycompat,
     ui as uimod,
     util,
 )
 
+from mercurial.utils import (
+    procutil,
+)
+
 # create some special property cache that trace they call
 
 calllog = []
@@ -44,8 +49,10 @@
 
 # Create an empty repo and instantiate it. It is important to run
 # these tests on the real object to detect regression.
-repopath = os.path.join(os.environ['TESTTMP'], 'repo')
-assert subprocess.call(['hg', 'init', repopath]) == 0
+repopath = pycompat.fsencode(os.path.join(os.environ['TESTTMP'], 'repo'))
+assert subprocess.call(pycompat.rapply(procutil.tonativestr,
+                                       [b'hg', b'init', repopath])) == 0
+
 ui = uimod.ui.load()
 repo = hg.repository(ui, path=repopath).unfiltered()
 
--- a/tests/test-pull-bundle.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-pull-bundle.t	Mon Oct 22 14:46:06 2018 -0400
@@ -59,7 +59,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets bbd179dfa0a7
+  new changesets bbd179dfa0a7 (1 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd repo.pullbundle
@@ -70,7 +70,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets ed1b79f46b9a
+  new changesets ed1b79f46b9a (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg pull -r 2
   pulling from http://localhost:$HGPORT2/
@@ -79,7 +79,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets effea6de0384
+  new changesets effea6de0384 (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ cd ..
   $ killdaemons.py
@@ -110,7 +110,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets bbd179dfa0a7:ed1b79f46b9a
+  new changesets bbd179dfa0a7:ed1b79f46b9a (3 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ killdaemons.py
@@ -136,7 +136,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets bbd179dfa0a7
+  new changesets bbd179dfa0a7 (1 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd repo.pullbundle3
--- a/tests/test-pull-permission.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-pull-permission.t	Mon Oct 22 14:46:06 2018 -0400
@@ -28,6 +28,6 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
 
   $ cd ..
--- a/tests/test-pull-pull-corruption.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-pull-pull-corruption.t	Mon Oct 22 14:46:06 2018 -0400
@@ -70,6 +70,6 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 11 changesets, 11 total revisions
+  checked 11 changesets with 11 changes to 1 files
 
   $ cd ..
--- a/tests/test-pull.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-pull.t	Mon Oct 22 14:46:06 2018 -0400
@@ -23,7 +23,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
 
   $ hg serve -p $HGPORT -d --pid-file=hg.pid
   $ cat hg.pid >> $DAEMON_PIDS
@@ -45,7 +45,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
 
   $ hg co
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -109,12 +109,12 @@
 It's tricky to make file:// URLs working on every platform with
 regular shell commands.
 
-  $ URL=`$PYTHON -c "from __future__ import print_function; import os; print('file://foobar' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
+  $ URL=`"$PYTHON" -c "from __future__ import print_function; import os; print('file://foobar' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
   $ hg pull -q "$URL"
   abort: file:// URLs can only refer to localhost
   [255]
 
-  $ URL=`$PYTHON -c "from __future__ import print_function; import os; print('file://localhost' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
+  $ URL=`"$PYTHON" -c "from __future__ import print_function; import os; print('file://localhost' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test')"`
   $ hg pull -q "$URL"
 
 SEC: check for unsafe ssh url
--- a/tests/test-purge.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-purge.t	Mon Oct 22 14:46:06 2018 -0400
@@ -49,8 +49,9 @@
 
   $ touch untracked_file
   $ touch untracked_file_readonly
-  $ $PYTHON <<EOF
-  > import os, stat
+  $ "$PYTHON" <<EOF
+  > import os
+  > import stat
   > f= 'untracked_file_readonly'
   > os.chmod(f, stat.S_IMODE(os.stat(f).st_mode) & ~stat.S_IWRITE)
   > EOF
--- a/tests/test-push-cgi.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-cgi.t	Mon Oct 22 14:46:06 2018 -0400
@@ -21,7 +21,7 @@
   > from mercurial import demandimport; demandimport.enable()
   > from mercurial.hgweb import hgweb
   > from mercurial.hgweb import wsgicgi
-  > application = hgweb('.', 'test repository')
+  > application = hgweb(b'.', b'test repository')
   > wsgicgi.launch(application)
   > HGWEB
   $ chmod 755 hgweb.cgi
@@ -38,7 +38,7 @@
 expect failure because heads doesn't match (formerly known as 'unsynced changes')
 
   $ QUERY_STRING="cmd=unbundle&heads=0000000000000000000000000000000000000000"; export QUERY_STRING
-  $ $PYTHON hgweb.cgi <bundle.hg >page1 2>&1
+  $ "$PYTHON" hgweb.cgi <bundle.hg >page1 2>&1
   $ cat page1
   Status: 200 Script output follows\r (esc)
   Content-Type: application/mercurial-0.1\r (esc)
@@ -50,7 +50,7 @@
 successful force push
 
   $ QUERY_STRING="cmd=unbundle&heads=666f726365"; export QUERY_STRING
-  $ $PYTHON hgweb.cgi <bundle.hg >page2 2>&1
+  $ "$PYTHON" hgweb.cgi <bundle.hg >page2 2>&1
   $ cat page2
   Status: 200 Script output follows\r (esc)
   Content-Type: application/mercurial-0.1\r (esc)
@@ -65,7 +65,7 @@
 successful push, list of heads
 
   $ QUERY_STRING="cmd=unbundle&heads=f7b1eb17ad24730a1651fccd46c43826d1bbc2ac"; export QUERY_STRING
-  $ $PYTHON hgweb.cgi <bundle.hg >page3 2>&1
+  $ "$PYTHON" hgweb.cgi <bundle.hg >page3 2>&1
   $ cat page3
   Status: 200 Script output follows\r (esc)
   Content-Type: application/mercurial-0.1\r (esc)
@@ -80,7 +80,7 @@
 successful push, SHA1 hash of heads (unbundlehash capability)
 
   $ QUERY_STRING="cmd=unbundle&heads=686173686564 5a785a5f9e0d433b88ed862b206b011b0c3a9d13"; export QUERY_STRING
-  $ $PYTHON hgweb.cgi <bundle.hg >page4 2>&1
+  $ "$PYTHON" hgweb.cgi <bundle.hg >page4 2>&1
   $ cat page4
   Status: 200 Script output follows\r (esc)
   Content-Type: application/mercurial-0.1\r (esc)
--- a/tests/test-push-checkheads-partial-C1.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-partial-C1.t	Mon Oct 22 14:46:06 2018 -0400
@@ -53,7 +53,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-partial-C2.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-partial-C2.t	Mon Oct 22 14:46:06 2018 -0400
@@ -53,7 +53,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-partial-C3.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-partial-C3.t	Mon Oct 22 14:46:06 2018 -0400
@@ -53,7 +53,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-partial-C4.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-partial-C4.t	Mon Oct 22 14:46:06 2018 -0400
@@ -53,7 +53,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-pruned-B2.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-pruned-B2.t	Mon Oct 22 14:46:06 2018 -0400
@@ -53,7 +53,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-pruned-B3.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-pruned-B3.t	Mon Oct 22 14:46:06 2018 -0400
@@ -53,7 +53,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-pruned-B4.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-pruned-B4.t	Mon Oct 22 14:46:06 2018 -0400
@@ -54,7 +54,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-pruned-B5.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-pruned-B5.t	Mon Oct 22 14:46:06 2018 -0400
@@ -57,7 +57,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files
-  new changesets d73caddc5533:821fb21d0dd2
+  new changesets d73caddc5533:821fb21d0dd2 (2 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-pruned-B8.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-pruned-B8.t	Mon Oct 22 14:46:06 2018 -0400
@@ -55,7 +55,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-superceed-A2.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-superceed-A2.t	Mon Oct 22 14:46:06 2018 -0400
@@ -52,7 +52,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-superceed-A3.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-superceed-A3.t	Mon Oct 22 14:46:06 2018 -0400
@@ -55,7 +55,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-superceed-A6.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-superceed-A6.t	Mon Oct 22 14:46:06 2018 -0400
@@ -59,7 +59,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files (+1 heads)
-  new changesets d73caddc5533:0f88766e02d6
+  new changesets d73caddc5533:0f88766e02d6 (2 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-superceed-A7.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-superceed-A7.t	Mon Oct 22 14:46:06 2018 -0400
@@ -59,7 +59,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files (+1 heads)
-  new changesets d73caddc5533:0f88766e02d6
+  new changesets d73caddc5533:0f88766e02d6 (2 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up 'desc(C0)'
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-unpushed-D2.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-unpushed-D2.t	Mon Oct 22 14:46:06 2018 -0400
@@ -57,7 +57,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-unpushed-D3.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-unpushed-D3.t	Mon Oct 22 14:46:06 2018 -0400
@@ -56,7 +56,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d73caddc5533
+  new changesets d73caddc5533 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-unpushed-D4.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-unpushed-D4.t	Mon Oct 22 14:46:06 2018 -0400
@@ -73,7 +73,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files (+1 heads)
-  new changesets d73caddc5533:0f88766e02d6
+  new changesets d73caddc5533:0f88766e02d6 (2 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up 0
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-unpushed-D5.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-checkheads-unpushed-D5.t	Mon Oct 22 14:46:06 2018 -0400
@@ -62,7 +62,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files (+1 heads)
-  new changesets d73caddc5533:0f88766e02d6
+  new changesets d73caddc5533:0f88766e02d6 (2 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up 'desc(C0)'
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-race.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-race.t	Mon Oct 22 14:46:06 2018 -0400
@@ -102,7 +102,7 @@
 
   $ cat >> $HGRCPATH << EOF
   > [ui]
-  > ssh = $PYTHON "$TESTDIR/dummyssh"
+  > ssh = "$PYTHON" "$TESTDIR/dummyssh"
   > # simplify output
   > logtemplate = {node|short} {desc} ({branch})
   > [phases]
@@ -148,7 +148,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 842e2fac6304
+  new changesets 842e2fac6304 (1 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg clone ssh://user@dummy/server client-other
@@ -157,7 +157,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 842e2fac6304
+  new changesets 842e2fac6304 (1 drafts)
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
@@ -214,7 +214,7 @@
   wrote ready: $TESTTMP/readyfile
   waiting on: $TESTTMP/watchfile
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
 
   $ hg -R server graph
   o  98217d5a1659 C-A (default)
@@ -242,7 +242,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets a9149a1428e2
+  new changesets a9149a1428e2 (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg -R ./client-other pull
   pulling from ssh://user@dummy/server
@@ -251,7 +251,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets a9149a1428e2
+  new changesets a9149a1428e2 (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg -R ./client-racy pull
   pulling from ssh://user@dummy/server
@@ -260,7 +260,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets 98217d5a1659
+  new changesets 98217d5a1659 (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
   $ hg -R server graph
@@ -303,7 +303,7 @@
   wrote ready: $TESTTMP/readyfile
   waiting on: $TESTTMP/watchfile
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
 
   $ hg -R server graph
   o  51c544a58128 C-C (default)
@@ -364,7 +364,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 59e76faf78bd
+  new changesets 59e76faf78bd (1 drafts)
   (run 'hg update' to get a working copy)
 
 #endif
@@ -384,7 +384,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 59e76faf78bd
+  new changesets 59e76faf78bd (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg -R ./client-racy pull
   pulling from ssh://user@dummy/server
@@ -393,7 +393,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 51c544a58128
+  new changesets 51c544a58128 (1 drafts)
   (run 'hg update' to get a working copy)
 
   $ hg -R server graph
@@ -451,7 +451,7 @@
   wrote ready: $TESTTMP/readyfile
   waiting on: $TESTTMP/watchfile
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
 
   $ hg -R server graph
   o  d603e2c0cdd7 C-E (default)
@@ -522,7 +522,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d9e379a8c432
+  new changesets d9e379a8c432 (1 drafts)
   (run 'hg update' to get a working copy)
 
 #endif
@@ -542,7 +542,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets d9e379a8c432
+  new changesets d9e379a8c432 (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg -R ./client-racy pull
   pulling from ssh://user@dummy/server
@@ -551,7 +551,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets d603e2c0cdd7
+  new changesets d603e2c0cdd7 (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
 
   $ hg -R server graph
@@ -614,7 +614,7 @@
   wrote ready: $TESTTMP/readyfile
   waiting on: $TESTTMP/watchfile
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
 
   $ hg -R server graph
   o  75d69cba5402 C-G (default)
@@ -693,7 +693,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets 833be552cfe6
+  new changesets 833be552cfe6 (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
 
 #endif
@@ -713,7 +713,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets 833be552cfe6
+  new changesets 833be552cfe6 (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg -R ./client-racy pull
   pulling from ssh://user@dummy/server
@@ -722,7 +722,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets 75d69cba5402
+  new changesets 75d69cba5402 (1 drafts)
   (run 'hg heads' to see heads)
 
   $ hg -R server graph
@@ -789,7 +789,7 @@
   wrote ready: $TESTTMP/readyfile
   waiting on: $TESTTMP/watchfile
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
 
   $ hg -R server graph
   o  b35ed749f288 C-I (my-second-test-branch)
@@ -878,7 +878,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets 89420bf00fae
+  new changesets 89420bf00fae (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
 
 #endif
@@ -899,7 +899,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets 89420bf00fae
+  new changesets 89420bf00fae (1 drafts)
   (run 'hg heads' to see heads)
   $ hg -R ./client-racy pull
   pulling from ssh://user@dummy/server
@@ -908,7 +908,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets b35ed749f288
+  new changesets b35ed749f288 (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
 
   $ hg -R server graph
@@ -977,7 +977,7 @@
   wrote ready: $TESTTMP/readyfile
   waiting on: $TESTTMP/watchfile
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
 
   $ hg -R server graph
   o    be705100c623 C-K (default)
@@ -1026,7 +1026,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets cac2cead0ff0
+  new changesets cac2cead0ff0 (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg -R ./client-other pull
   pulling from ssh://user@dummy/server
@@ -1035,7 +1035,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets cac2cead0ff0
+  new changesets cac2cead0ff0 (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg -R ./client-racy pull
   pulling from ssh://user@dummy/server
@@ -1044,7 +1044,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 0 changes to 0 files
-  new changesets be705100c623
+  new changesets be705100c623 (1 drafts)
   (run 'hg update' to get a working copy)
 
   $ hg -R server graph
@@ -1113,7 +1113,7 @@
   wrote ready: $TESTTMP/readyfile
   waiting on: $TESTTMP/watchfile
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
 
   $ hg -R server graph
   o  6fd3090135df C-M (default)
@@ -1169,7 +1169,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 0 changes to 0 files
-  new changesets 866a66e18630
+  new changesets 866a66e18630 (1 drafts)
   (run 'hg update' to get a working copy)
 
 (creates named branch on head)
@@ -1191,7 +1191,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 0 changes to 0 files
-  new changesets 866a66e18630:55a6f1c01b48
+  new changesets 866a66e18630:55a6f1c01b48 (2 drafts)
   (run 'hg update' to get a working copy)
   $ hg -R ./client-racy pull
   pulling from ssh://user@dummy/server
@@ -1200,7 +1200,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 1 changes to 1 files (+1 heads)
-  new changesets 6fd3090135df:55a6f1c01b48
+  new changesets 6fd3090135df:55a6f1c01b48 (2 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
 
   $ hg -R server graph
@@ -1285,7 +1285,7 @@
   wrote ready: $TESTTMP/readyfile
   waiting on: $TESTTMP/watchfile
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
 
   $ hg -R server graph
   o  1b58ee3f79e5 C-P (default)
@@ -1349,7 +1349,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 0 changes to 0 files (+1 heads)
-  new changesets b0ee3d6f51bc
+  new changesets b0ee3d6f51bc (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg -R ./client-other pull
   pulling from ssh://user@dummy/server
@@ -1358,7 +1358,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 0 changes to 0 files (+1 heads)
-  new changesets b0ee3d6f51bc
+  new changesets b0ee3d6f51bc (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg -R ./client-racy pull
   pulling from ssh://user@dummy/server
@@ -1367,7 +1367,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 1 changes to 1 files (+1 heads)
-  new changesets d0a85b2252a9:1b58ee3f79e5
+  new changesets d0a85b2252a9:1b58ee3f79e5 (2 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
 
   $ hg -R server graph
@@ -1458,7 +1458,7 @@
   wrote ready: $TESTTMP/readyfile
   waiting on: $TESTTMP/watchfile
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
 
   $ hg -R server graph
   o  de7b9e2ba3f6 C-R (other)
@@ -1520,7 +1520,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 1 files (+1 heads)
-  new changesets 2efd43f7b5ba:3d57ed3c1091
+  new changesets 2efd43f7b5ba:3d57ed3c1091 (2 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg -R ./client-other pull
   pulling from ssh://user@dummy/server
@@ -1529,7 +1529,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 1 files (+1 heads)
-  new changesets 2efd43f7b5ba:3d57ed3c1091
+  new changesets 2efd43f7b5ba:3d57ed3c1091 (2 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg -R ./client-racy pull
   pulling from ssh://user@dummy/server
@@ -1538,7 +1538,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets de7b9e2ba3f6
+  new changesets de7b9e2ba3f6 (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
   $ hg -R server graph
@@ -1634,7 +1634,7 @@
   wrote ready: $TESTTMP/readyfile
   waiting on: $TESTTMP/watchfile
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
 
   $ hg -R server debugobsolete
   $ hg -R server graph
@@ -1708,7 +1708,7 @@
   1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
-  new changesets 720c5163ecf6
+  new changesets 720c5163ecf6 (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg -R ./client-other pull
   pulling from ssh://user@dummy/server
@@ -1720,7 +1720,7 @@
   1 new obsolescence markers
   obsoleted 1 changesets
   1 new orphan changesets
-  new changesets 720c5163ecf6
+  new changesets 720c5163ecf6 (1 drafts)
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ hg -R ./client-racy pull
   pulling from ssh://user@dummy/server
@@ -1730,7 +1730,7 @@
   adding file changes
   added 1 changesets with 0 changes to 0 files
   1 new orphan changesets
-  new changesets a98a47d8b85b
+  new changesets a98a47d8b85b (1 drafts)
   (run 'hg update' to get a working copy)
 
   $ hg -R server debugobsolete
@@ -1834,7 +1834,7 @@
   wrote ready: $TESTTMP/readyfile
   waiting on: $TESTTMP/watchfile
   abort: push failed:
-  'repository changed while pushing - please try again'
+  'remote repository changed while pushing - please try again'
 
   $ hg -R server debugobsolete
   b0ee3d6f51bc4c0ca6d4f2907708027a6c376233 720c5163ecf64dcc6216bee2d62bf3edb1882499 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- a/tests/test-push-warn.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push-warn.t	Mon Oct 22 14:46:06 2018 -0400
@@ -419,7 +419,7 @@
   adding c
   created new head
 
-  $ for i in `$PYTHON $TESTDIR/seq.py 3`; do hg -R h up -q 0; echo $i > h/b; hg -R h ci -qAm$i; done
+  $ for i in `"$PYTHON" $TESTDIR/seq.py 3`; do hg -R h up -q 0; echo $i > h/b; hg -R h ci -qAm$i; done
 
   $ hg -R i push h
   pushing to h
--- a/tests/test-push.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-push.t	Mon Oct 22 14:46:06 2018 -0400
@@ -11,7 +11,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  new changesets bfaf4b5cbf01:916f1afdef90
+  new changesets bfaf4b5cbf01:916f1afdef90 (9 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
   $ for i in 0 1 2 3 4 5 6 7 8; do
@@ -31,7 +31,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   
   pushing to test-revflag-1
   searching for changes
@@ -43,7 +43,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
   
   pushing to test-revflag-2
   searching for changes
@@ -55,7 +55,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
   
   pushing to test-revflag-3
   searching for changes
@@ -67,7 +67,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 4 changesets, 4 total revisions
+  checked 4 changesets with 4 changes to 1 files
   
   pushing to test-revflag-4
   searching for changes
@@ -79,7 +79,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
   
   pushing to test-revflag-5
   searching for changes
@@ -91,7 +91,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
   
   pushing to test-revflag-6
   searching for changes
@@ -103,7 +103,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 4 changesets, 5 total revisions
+  checked 4 changesets with 5 changes to 2 files
   
   pushing to test-revflag-7
   searching for changes
@@ -115,7 +115,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 5 changesets, 6 total revisions
+  checked 5 changesets with 6 changes to 3 files
   
   pushing to test-revflag-8
   searching for changes
@@ -127,7 +127,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 5 changesets, 5 total revisions
+  checked 5 changesets with 5 changes to 2 files
 
   $ cd test-revflag-8
 
@@ -146,7 +146,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 9 changesets, 7 total revisions
+  checked 9 changesets with 7 changes to 4 files
 
   $ cd ..
 
@@ -195,7 +195,7 @@
   crosschecking files in changesets and manifests
   checking files
    beta@1: dddc47b3ba30 not in manifests
-  2 files, 2 changesets, 4 total revisions
+  checked 2 changesets with 4 changes to 2 files
   1 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
@@ -230,7 +230,7 @@
   crosschecking files in changesets and manifests
   checking files
    beta@1: manifest refers to unknown revision dddc47b3ba30
-  2 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 2 files
   1 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
--- a/tests/test-py3-commands.t	Wed Oct 10 12:25:28 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,239 +0,0 @@
-#require py3exe
-
-This test helps in keeping a track on which commands we can run on
-Python 3 and see what kind of errors are coming up.
-The full traceback is hidden to have a stable output.
-  $ HGBIN=`which hg`
-
-  $ for cmd in version debuginstall ; do
-  >   echo $cmd
-  >   $PYTHON3 $HGBIN $cmd 2>&1 2>&1 | tail -1
-  > done
-  version
-  warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-  debuginstall
-  no problems detected
-
-#if test-repo
-Make a clone so that any features in the developer's .hg/hgrc that
-might confuse Python 3 don't break this test. When we can do commit in
-Python 3, we'll stop doing this. We use e76ed1e480ef for the clone
-because it has different files than 273ce12ad8f1, so we can test both
-`files` from dirstate and `files` loaded from a specific revision.
-
-  $ hg clone -r e76ed1e480ef "`dirname "$TESTDIR"`" testrepo 2>&1 | tail -1
-  15 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
-Test using -R, which exercises some URL code:
-  $ $PYTHON3 $HGBIN -R testrepo files -r 273ce12ad8f1 | tail -1
-  testrepo/tkmerge
-
-Now prove `hg files` is reading the whole manifest. We have to grep
-out some potential warnings that come from hgrc as yet.
-  $ cd testrepo
-  $ $PYTHON3 $HGBIN files -r 273ce12ad8f1
-  .hgignore
-  PKG-INFO
-  README
-  hg
-  mercurial/__init__.py
-  mercurial/byterange.py
-  mercurial/fancyopts.py
-  mercurial/hg.py
-  mercurial/mdiff.py
-  mercurial/revlog.py
-  mercurial/transaction.py
-  notes.txt
-  setup.py
-  tkmerge
-
-  $ $PYTHON3 $HGBIN files -r 273ce12ad8f1 | wc -l
-  \s*14 (re)
-  $ $PYTHON3 $HGBIN files | wc -l
-  \s*15 (re)
-
-Test if log-like commands work:
-
-  $ $PYTHON3 $HGBIN tip
-  changeset:   10:e76ed1e480ef
-  tag:         tip
-  user:        oxymoron@cinder.waste.org
-  date:        Tue May 03 23:37:43 2005 -0800
-  summary:     Fix linking of changeset revs when merging
-  
-
-  $ $PYTHON3 $HGBIN log -r0
-  changeset:   0:9117c6561b0b
-  user:        mpm@selenic.com
-  date:        Tue May 03 13:16:10 2005 -0800
-  summary:     Add back links from file revisions to changeset revisions
-  
-
-  $ cd ..
-#endif
-
-Test if `hg config` works:
-
-  $ $PYTHON3 $HGBIN config
-  devel.all-warnings=true
-  devel.default-date=0 0
-  largefiles.usercache=$TESTTMP/.cache/largefiles
-  ui.slash=True
-  ui.interactive=False
-  ui.mergemarkers=detailed
-  ui.promptecho=True
-  web.address=localhost
-  web.ipv6=False
-
-  $ cat > included-hgrc <<EOF
-  > [extensions]
-  > babar = imaginary_elephant
-  > EOF
-  $ cat >> $HGRCPATH <<EOF
-  > %include $TESTTMP/included-hgrc
-  > EOF
-  $ $PYTHON3 $HGBIN version | tail -1
-  *** failed to import extension babar from imaginary_elephant: *: 'imaginary_elephant' (glob)
-  warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-
-  $ rm included-hgrc
-  $ touch included-hgrc
-
-Test bytes-ness of policy.policy with HGMODULEPOLICY
-
-  $ HGMODULEPOLICY=py
-  $ export HGMODULEPOLICY
-  $ $PYTHON3 `which hg` debuginstall 2>&1 2>&1 | tail -1
-  no problems detected
-
-`hg init` can create empty repos
-`hg status works fine`
-`hg summary` also works!
-
-  $ $PYTHON3 `which hg` init py3repo
-  $ cd py3repo
-  $ echo "This is the file 'iota'." > iota
-  $ $PYTHON3 $HGBIN status
-  ? iota
-  $ $PYTHON3 $HGBIN add iota
-  $ $PYTHON3 $HGBIN status
-  A iota
-  $ hg diff --nodates --git
-  diff --git a/iota b/iota
-  new file mode 100644
-  --- /dev/null
-  +++ b/iota
-  @@ -0,0 +1,1 @@
-  +This is the file 'iota'.
-  $ $PYTHON3 $HGBIN commit --message 'commit performed in Python 3'
-  $ $PYTHON3 $HGBIN status
-
-  $ mkdir A
-  $ echo "This is the file 'mu'." > A/mu
-  $ $PYTHON3 $HGBIN addremove
-  adding A/mu
-  $ $PYTHON3 $HGBIN status
-  A A/mu
-  $ HGEDITOR='echo message > ' $PYTHON3 $HGBIN commit
-  $ $PYTHON3 $HGBIN status
-  $ $PYHON3 $HGBIN summary
-  parent: 1:e1e9167203d4 tip
-   message
-  branch: default
-  commit: (clean)
-  update: (current)
-  phases: 2 draft
-
-Test weird unicode-vs-bytes stuff
-
-  $ $PYTHON3 $HGBIN help | egrep -v '^ |^$'
-  Mercurial Distributed SCM
-  list of commands:
-  additional help topics:
-  (use 'hg help -v' to show built-in aliases and global options)
-
-  $ $PYTHON3 $HGBIN help help | egrep -v '^ |^$'
-  hg help [-ecks] [TOPIC]
-  show help for a given topic or a help overview
-  options ([+] can be repeated):
-  (some details hidden, use --verbose to show complete help)
-
-  $ $PYTHON3 $HGBIN help -k notopic
-  abort: no matches
-  (try 'hg help' for a list of topics)
-  [255]
-
-Prove the repo is valid using the Python 2 `hg`:
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  2 files, 2 changesets, 2 total revisions
-  $ hg log
-  changeset:   1:e1e9167203d4
-  tag:         tip
-  user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
-  summary:     message
-  
-  changeset:   0:71c96e924262
-  user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
-  summary:     commit performed in Python 3
-  
-
-  $ $PYTHON3 $HGBIN log -G
-  @  changeset:   1:e1e9167203d4
-  |  tag:         tip
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     message
-  |
-  o  changeset:   0:71c96e924262
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     commit performed in Python 3
-  
-  $ $PYTHON3 $HGBIN log -Tjson
-  [
-   {
-    "bookmarks": [],
-    "branch": "default",
-    "date": [0, 0],
-    "desc": "message",
-    "node": "e1e9167203d450ca2f558af628955b5f5afd4489",
-    "parents": ["71c96e924262969ff0d8d3d695b0f75412ccc3d8"],
-    "phase": "draft",
-    "rev": 1,
-    "tags": ["tip"],
-    "user": "test"
-   },
-   {
-    "bookmarks": [],
-    "branch": "default",
-    "date": [0, 0],
-    "desc": "commit performed in Python 3",
-    "node": "71c96e924262969ff0d8d3d695b0f75412ccc3d8",
-    "parents": ["0000000000000000000000000000000000000000"],
-    "phase": "draft",
-    "rev": 0,
-    "tags": [],
-    "user": "test"
-   }
-  ]
-
-Show that update works now!
-
-  $ $PYTHON3 $HGBIN up 0
-  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ $PYTHON3 $HGBIN identify
-  71c96e924262
-
-branches and bookmarks also works!
-
-  $ $PYTHON3 $HGBIN branches
-  default                        1:e1e9167203d4
-  $ $PYTHON3 $HGBIN bookmark book
-  $ $PYTHON3 $HGBIN bookmarks
-   * book                      0:71c96e924262
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rebase-backup.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,150 @@
+  $ cat << EOF >> $HGRCPATH
+  > [extensions]
+  > rebase=
+  > EOF
+
+==========================================
+Test history-editing-backup config option |
+==========================================
+Test with Pre-obsmarker rebase:
+1) When config option is not set:
+  $ hg init repo1
+  $ cd repo1
+  $ echo a>a
+  $ hg ci -qAma
+  $ echo b>b
+  $ hg ci -qAmb
+  $ echo c>c
+  $ hg ci -qAmc
+  $ hg up 0 -q
+  $ echo d>d
+  $ hg ci -qAmd
+  $ echo e>e
+  $ hg ci -qAme
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  @  4: e
+  |
+  o  3: d
+  |
+  | o  2: c
+  | |
+  | o  1: b
+  |/
+  o  0: a
+  
+  $ hg rebase -s 1 -d .
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/d2ae7f538514-c7ed7a78-rebase.hg
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  o  4: c
+  |
+  o  3: b
+  |
+  @  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+
+2) When config option is set:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+
+  $ echo f>f
+  $ hg ci -Aqmf
+  $ echo g>g
+  $ hg ci -Aqmg
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  @  6: g
+  |
+  o  5: f
+  |
+  | o  4: c
+  | |
+  | o  3: b
+  |/
+  o  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+  $ hg rebase -s 3 -d .
+  rebasing 3:05bff2a95b12 "b"
+  rebasing 4:1762bde4404d "c"
+
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  o  6: c
+  |
+  o  5: b
+  |
+  @  4: g
+  |
+  o  3: f
+  |
+  o  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+Test when rebased revisions are stripped during abort:
+======================================================
+
+  $ echo conflict > c
+  $ hg ci -Am "conflict with c"
+  adding c
+  created new head
+  $ hg log -GT "{rev}: {firstline(desc)}\n"
+  @  7: conflict with c
+  |
+  | o  6: c
+  | |
+  | o  5: b
+  |/
+  o  4: g
+  |
+  o  3: f
+  |
+  o  2: e
+  |
+  o  1: d
+  |
+  o  0: a
+  
+When history-editing-backup = True:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = True
+  > EOF
+  $ hg rebase -s 5 -d .
+  rebasing 5:1f8148a544ee "b"
+  rebasing 6:f8bc7d28e573 "c"
+  merging c
+  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --abort
+  saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/818c1a43c916-2b644d96-backup.hg
+  rebase aborted
+
+When history-editing-backup = False:
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > history-editing-backup = False
+  > EOF
+  $ hg rebase -s 5 -d .
+  rebasing 5:1f8148a544ee "b"
+  rebasing 6:f8bc7d28e573 "c"
+  merging c
+  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --abort
+  rebase aborted
+  $ cd ..
+
--- a/tests/test-rebase-base-flag.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebase-base-flag.t	Mon Oct 22 14:46:06 2018 -0400
@@ -14,7 +14,7 @@
   > EOF
 
   $ rebasewithdag() {
-  >   N=`$PYTHON -c "print($N+1)"`
+  >   N=`"$PYTHON" -c "print($N+1)"`
   >   hg init repo$N && cd repo$N
   >   hg debugdrawdag
   >   hg rebase "$@" > _rebasetmp
--- a/tests/test-rebase-collapse.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebase-collapse.t	Mon Oct 22 14:46:06 2018 -0400
@@ -540,7 +540,7 @@
   adding manifests
   adding file changes
   added 4 changesets with 11 changes to 7 files (+1 heads)
-  new changesets f447d5abf5ea:338e84e2e558
+  new changesets f447d5abf5ea:338e84e2e558 (4 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up -q tip
   $ hg tglog
--- a/tests/test-rebase-conflicts.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebase-conflicts.t	Mon Oct 22 14:46:06 2018 -0400
@@ -155,7 +155,7 @@
   adding manifests
   adding file changes
   added 11 changesets with 8 changes to 3 files (+1 heads)
-  new changesets 24797d4f68de:2f2496ddf49d
+  new changesets 24797d4f68de:2f2496ddf49d (11 drafts)
   (run 'hg heads' to see heads)
   $ hg up default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-rebase-dest.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebase-dest.t	Mon Oct 22 14:46:06 2018 -0400
@@ -119,7 +119,7 @@
   > EOF
 
   $ rebasewithdag() {
-  >   N=`$PYTHON -c "print($N+1)"`
+  >   N=`"$PYTHON" -c "print($N+1)"`
   >   hg init repo$N && cd repo$N
   >   hg debugdrawdag
   >   hg rebase "$@" > _rebasetmp
--- a/tests/test-rebase-inmemory.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebase-inmemory.t	Mon Oct 22 14:46:06 2018 -0400
@@ -156,7 +156,92 @@
   |/
   o  0: b173517d0057 'a'
   
+
+Test reporting of path conflicts
+
+  $ hg rm a
+  $ mkdir a
+  $ touch a/a
+  $ hg ci -Am "a/a"
+  adding a/a
+  $ hg tglog
+  @  4: daf7dfc139cb 'a/a'
+  |
+  o  3: 844a7de3e617 'c'
+  |
+  | o  2: 09c044d2cb43 'd'
+  | |
+  | o  1: fc055c3b4d33 'b'
+  |/
+  o  0: b173517d0057 'a'
+  
+  $ hg rebase -r . -d 2
+  rebasing 4:daf7dfc139cb "a/a" (tip)
+  saved backup bundle to $TESTTMP/repo1/repo2/.hg/strip-backup/daf7dfc139cb-fdbfcf4f-rebase.hg
+
+  $ hg tglog
+  @  4: c6ad37a4f250 'a/a'
+  |
+  | o  3: 844a7de3e617 'c'
+  | |
+  o |  2: 09c044d2cb43 'd'
+  | |
+  o |  1: fc055c3b4d33 'b'
+  |/
+  o  0: b173517d0057 'a'
+  
+  $ echo foo > foo
+  $ hg ci -Aqm "added foo"
+  $ hg up '.^'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo bar > bar
+  $ hg ci -Aqm "added bar"
+  $ hg rm a/a
+  $ echo a > a
+  $ hg ci -Aqm "added a back!"
+  $ hg tglog
+  @  7: 855e9797387e 'added a back!'
+  |
+  o  6: d14530e5e3e6 'added bar'
+  |
+  | o  5: 9b94b9373deb 'added foo'
+  |/
+  o  4: c6ad37a4f250 'a/a'
+  |
+  | o  3: 844a7de3e617 'c'
+  | |
+  o |  2: 09c044d2cb43 'd'
+  | |
+  o |  1: fc055c3b4d33 'b'
+  |/
+  o  0: b173517d0057 'a'
+  
+  $ hg rebase -r . -d 5
+  rebasing 7:855e9797387e "added a back!" (tip)
+  saved backup bundle to $TESTTMP/repo1/repo2/.hg/strip-backup/855e9797387e-81ee4c5d-rebase.hg
+
+  $ hg tglog
+  @  7: bb3f02be2688 'added a back!'
+  |
+  | o  6: d14530e5e3e6 'added bar'
+  | |
+  o |  5: 9b94b9373deb 'added foo'
+  |/
+  o  4: c6ad37a4f250 'a/a'
+  |
+  | o  3: 844a7de3e617 'c'
+  | |
+  o |  2: 09c044d2cb43 'd'
+  | |
+  o |  1: fc055c3b4d33 'b'
+  |/
+  o  0: b173517d0057 'a'
+  
+
+  $ cd ..
+
 Test dry-run rebasing
+
   $ hg init repo3
   $ cd repo3
   $ echo a>a
@@ -325,6 +410,25 @@
   hit a merge conflict
   [1]
 
+In-memory rebase that fails due to merge conflicts
+
+  $ hg rebase -s 2 -d 7
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  rebasing 4:e860deea161a "e"
+  merging e
+  transaction abort!
+  rollback completed
+  hit merge conflicts; re-running rebase without in-memory merge
+  rebase aborted
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  rebasing 4:e860deea161a "e"
+  merging e
+  warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+
 ==========================
 Test for --confirm option|
 ==========================
@@ -509,3 +613,31 @@
   o  0:cb9a9f314b8b test
      a
   
+#if execbit
+
+Test a metadata-only in-memory merge
+  $ cd $TESTTMP
+  $ hg init no_exception
+  $ cd no_exception
+# Produce the following graph:
+#   o  'add +x to foo.txt'
+#   | o  r1  (adds bar.txt, just for something to rebase to)
+#   |/
+#   o  r0   (adds foo.txt, no +x)
+  $ echo hi > foo.txt
+  $ hg ci -qAm r0
+  $ echo hi > bar.txt
+  $ hg ci -qAm r1
+  $ hg co -qr ".^"
+  $ chmod +x foo.txt
+  $ hg ci -qAm 'add +x to foo.txt'
+issue5960: this was raising an AttributeError exception
+  $ hg rebase -r . -d 1
+  rebasing 2:539b93e77479 "add +x to foo.txt" (tip)
+  saved backup bundle to $TESTTMP/no_exception/.hg/strip-backup/*.hg (glob)
+  $ hg diff -c tip
+  diff --git a/foo.txt b/foo.txt
+  old mode 100644
+  new mode 100755
+
+#endif
--- a/tests/test-rebase-named-branches.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebase-named-branches.t	Mon Oct 22 14:46:06 2018 -0400
@@ -16,7 +16,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 7 changes to 7 files (+2 heads)
-  new changesets cd010b8cd998:02de42196ebe
+  new changesets cd010b8cd998:02de42196ebe (8 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up tip
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-rebase-newancestor.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebase-newancestor.t	Mon Oct 22 14:46:06 2018 -0400
@@ -133,7 +133,8 @@
   note: rebase of 1:1d1a643d390e created no changes to commit
   rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
   rebasing 4:4b019212aaf6 "dev: merge default"
-  other [source] changed f-default which local [dest] deleted
+  file 'f-default' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
   rebasing 6:9455ee510502 "dev: merge default"
   saved backup bundle to $TESTTMP/ancestor-merge/.hg/strip-backup/1d1a643d390e-43e9e04b-rebase.hg
@@ -162,7 +163,8 @@
   > EOF
   rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
   rebasing 4:4b019212aaf6 "dev: merge default"
-  other [source] changed f-default which local [dest] deleted
+  file 'f-default' was deleted in local [dest] but was modified in other [source].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
   rebasing 6:9455ee510502 "dev: merge default"
   saved backup bundle to $TESTTMP/ancestor-merge-2/.hg/strip-backup/ec2c14fb2984-62d0b222-rebase.hg
--- a/tests/test-rebase-obsolete.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebase-obsolete.t	Mon Oct 22 14:46:06 2018 -0400
@@ -15,6 +15,7 @@
   > [extensions]
   > rebase=
   > drawdag=$TESTDIR/drawdag.py
+  > strip=
   > EOF
 
 Setup rebase canonical repo
@@ -26,7 +27,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 7 changes to 7 files (+2 heads)
-  new changesets cd010b8cd998:02de42196ebe
+  new changesets cd010b8cd998:02de42196ebe (8 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up tip
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -355,9 +356,9 @@
   $ hg id --debug -r tip
   4dc2197e807bae9817f09905b50ab288be2dbbcf tip
   $ hg debugobsolete
-  42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 4dc2197e807bae9817f09905b50ab288be2dbbcf 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'rebase', 'user': 'test'}
-  5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 4dc2197e807bae9817f09905b50ab288be2dbbcf 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'rebase', 'user': 'test'}
-  32af7686d403cf45b5d95f2d70cebea587ac806a 4dc2197e807bae9817f09905b50ab288be2dbbcf 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'rebase', 'user': 'test'}
+  42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 4dc2197e807bae9817f09905b50ab288be2dbbcf 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'fold-id': '6fb65cdc', 'fold-idx': '1', 'fold-size': '3', 'operation': 'rebase', 'user': 'test'}
+  5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 4dc2197e807bae9817f09905b50ab288be2dbbcf 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'fold-id': '6fb65cdc', 'fold-idx': '2', 'fold-size': '3', 'operation': 'rebase', 'user': 'test'}
+  32af7686d403cf45b5d95f2d70cebea587ac806a 4dc2197e807bae9817f09905b50ab288be2dbbcf 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'fold-id': '6fb65cdc', 'fold-idx': '3', 'fold-size': '3', 'operation': 'rebase', 'user': 'test'}
 
   $ cd ..
 
@@ -574,7 +575,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 7 changes to 7 files (+2 heads)
-  new changesets cd010b8cd998:02de42196ebe
+  new changesets cd010b8cd998:02de42196ebe (8 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up 3
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -1122,6 +1123,23 @@
   o  0:b173517d0057 a
   
   $ hg strip -r 8:
+  $ hg log -G -r 'a'::
+  *  7:1143e9adc121 f
+  |
+  | o  6:d60ebfa0f1cb e
+  | |
+  | o  5:027ad6c5830d d'
+  | |
+  x |  4:76be324c128b d (rewritten using replace as 5:027ad6c5830d)
+  |/
+  o  3:a82ac2b38757 c
+  |
+  | o  2:630d7c95eff7 x
+  | |
+  o |  1:488e1b7e7341 b
+  |/
+  o  0:b173517d0057 a
+  
 
 If the rebase set has an obsolete (d) with a successor (d') outside the rebase
 set and none in destination, we still get the divergence warning.
@@ -1493,6 +1511,26 @@
   
   $ cd ..
 
+Rebase merge where extinct node has successor that is not an ancestor of
+destination
+
+  $ hg init extinct-with-succ-not-in-dest
+  $ cd extinct-with-succ-not-in-dest
+
+  $ hg debugdrawdag <<EOF
+  > E C # replace: C -> E
+  > | |
+  > D B
+  > |/
+  > A
+  > EOF
+
+  $ hg rebase -d D -s B
+  rebasing 1:112478962961 "B" (B)
+  note: not rebasing 3:26805aba1e60 "C" (C) and its descendants as this would cause divergence
+
+  $ cd ..
+
   $ hg init p2-succ-in-dest-c
   $ cd p2-succ-in-dest-c
 
@@ -1788,3 +1826,312 @@
   |
   o  0:426bada5c675 A
   
+====================
+Test --stop option |
+====================
+  $ cd ..
+  $ hg init rbstop
+  $ cd rbstop
+  $ echo a>a
+  $ hg ci -Aqma
+  $ echo b>b
+  $ hg ci -Aqmb
+  $ echo c>c
+  $ hg ci -Aqmc
+  $ echo d>d
+  $ hg ci -Aqmd
+  $ hg up 0 -q
+  $ echo f>f
+  $ hg ci -Aqmf
+  $ echo D>d
+  $ hg ci -Aqm "conflict with d"
+  $ hg up 3 -q
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+  $ hg rebase -s 1 -d 5
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  1 new orphan changesets
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  7:7fffad344617 test
+  |  c
+  |
+  o  6:b15528633407 test
+  |  b
+  |
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | x  2:177f92b77385 test
+  | |  c
+  | |
+  | x  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+Test it aborts if unstable csets is not allowed:
+===============================================
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution.allowunstable=False
+  > EOF
+
+  $ hg strip 6 --no-backup -q
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+  $ hg rebase -s 1 -d 5
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  abort: cannot remove original changesets with unrebased descendants
+  (either enable obsmarkers to allow unstable revisions or use --keep to keep original changesets)
+  [255]
+  $ hg rebase --abort
+  saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg
+  rebase aborted
+
+Test --stop when --keep is passed:
+==================================
+  $ hg rebase -s 1 -d 5 --keep
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  7:7fffad344617 test
+  |  c
+  |
+  o  6:b15528633407 test
+  |  b
+  |
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+Test --stop aborts when --collapse was passed:
+=============================================
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > evolution.allowunstable=True
+  > EOF
+
+  $ hg strip 6
+  saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+  $ hg rebase -s 1 -d 5 --collapse -m "collapsed b c d"
+  rebasing 1:d2ae7f538514 "b"
+  rebasing 2:177f92b77385 "c"
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  abort: cannot stop in --collapse session
+  [255]
+  $ hg rebase --abort
+  rebase aborted
+  $ hg diff
+  $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
+  o  5:00bfc9898aeb test
+  |  conflict with d
+  |
+  o  4:dafd40200f93 test
+  |  f
+  |
+  | @  3:055a42cdd887 test
+  | |  d
+  | |
+  | o  2:177f92b77385 test
+  | |  c
+  | |
+  | o  1:d2ae7f538514 test
+  |/   b
+  |
+  o  0:cb9a9f314b8b test
+     a
+  
+Test --stop raise errors with conflicting options:
+=================================================
+  $ hg rebase -s 3 -d 5
+  rebasing 3:055a42cdd887 "d"
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop --dry-run
+  abort: cannot specify both --dry-run and --stop
+  [255]
+
+  $ hg rebase -s 3 -d 5
+  abort: rebase in progress
+  (use 'hg rebase --continue' or 'hg rebase --abort')
+  [255]
+  $ hg rebase --stop --continue
+  abort: cannot use --stop with --continue
+  [255]
+
+Test --stop moves bookmarks of original revisions to new rebased nodes:
+======================================================================
+  $ cd ..
+  $ hg init repo
+  $ cd repo
+
+  $ echo a > a
+  $ hg ci -Am A
+  adding a
+
+  $ echo b > b
+  $ hg ci -Am B
+  adding b
+  $ hg book X
+  $ hg book Y
+
+  $ echo c > c
+  $ hg ci -Am C
+  adding c
+  $ hg book Z
+
+  $ echo d > d
+  $ hg ci -Am D
+  adding d
+
+  $ hg up 0 -q
+  $ echo e > e
+  $ hg ci -Am E
+  adding e
+  created new head
+
+  $ echo doubt > d
+  $ hg ci -Am "conflict with d"
+  adding d
+
+  $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n"
+  @  5: 39adf30bc1be 'conflict with d' bookmarks:
+  |
+  o  4: 9c1e55f411b6 'E' bookmarks:
+  |
+  | o  3: 67a385d4e6f2 'D' bookmarks: Z
+  | |
+  | o  2: 49cb3485fa0c 'C' bookmarks: Y
+  | |
+  | o  1: 6c81ed0049f8 'B' bookmarks: X
+  |/
+  o  0: 1994f17a630e 'A' bookmarks:
+  
+  $ hg rebase -s 1 -d 5
+  rebasing 1:6c81ed0049f8 "B" (X)
+  rebasing 2:49cb3485fa0c "C" (Y)
+  rebasing 3:67a385d4e6f2 "D" (Z)
+  merging d
+  warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ hg rebase --stop
+  1 new orphan changesets
+  $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n"
+  o  7: 9c86c650b686 'C' bookmarks: Y
+  |
+  o  6: 9b87b54e5fd8 'B' bookmarks: X
+  |
+  @  5: 39adf30bc1be 'conflict with d' bookmarks:
+  |
+  o  4: 9c1e55f411b6 'E' bookmarks:
+  |
+  | *  3: 67a385d4e6f2 'D' bookmarks: Z
+  | |
+  | x  2: 49cb3485fa0c 'C' bookmarks:
+  | |
+  | x  1: 6c81ed0049f8 'B' bookmarks:
+  |/
+  o  0: 1994f17a630e 'A' bookmarks:
+  
--- a/tests/test-rebase-parameters.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebase-parameters.t	Mon Oct 22 14:46:06 2018 -0400
@@ -17,7 +17,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 7 changes to 7 files (+2 heads)
-  new changesets cd010b8cd998:02de42196ebe
+  new changesets cd010b8cd998:02de42196ebe (8 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up tip
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -61,7 +61,7 @@
   [1]
 
   $ hg rebase --continue --abort
-  abort: cannot use both abort and continue
+  abort: cannot use --abort with --continue
   [255]
 
   $ hg rebase --continue --collapse
--- a/tests/test-rebase-partial.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebase-partial.t	Mon Oct 22 14:46:06 2018 -0400
@@ -15,7 +15,7 @@
   > EOF
 
   $ rebasewithdag() {
-  >   N=`$PYTHON -c "print($N+1)"`
+  >   N=`"$PYTHON" -c "print($N+1)"`
   >   hg init repo$N && cd repo$N
   >   hg debugdrawdag
   >   hg rebase "$@" > _rebasetmp
--- a/tests/test-rebase-scenario-global.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebase-scenario-global.t	Mon Oct 22 14:46:06 2018 -0400
@@ -18,7 +18,7 @@
   adding manifests
   adding file changes
   added 8 changesets with 7 changes to 7 files (+2 heads)
-  new changesets cd010b8cd998:02de42196ebe
+  new changesets cd010b8cd998:02de42196ebe (8 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up tip
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -421,7 +421,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 9 changes to 9 files (+2 heads)
-  new changesets 9ae2ed22e576:479ddb54a924
+  new changesets 9ae2ed22e576:479ddb54a924 (9 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg tglog
   o  8: 479ddb54a924 'I'
--- a/tests/test-rebuildstate.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rebuildstate.t	Mon Oct 22 14:46:06 2018 -0400
@@ -47,14 +47,14 @@
 
 state dump after
 
-  $ hg debugstate --nodates | sort
+  $ hg debugstate --no-dates | sort
   n   0         -1 unset               bar
   n   0         -1 unset               foo
 
   $ hg debugadddrop --normal-lookup file1 file2
   $ hg debugadddrop --drop bar
   $ hg debugadddrop --drop
-  $ hg debugstate --nodates
+  $ hg debugstate --no-dates
   n   0         -1 unset               file1
   n   0         -1 unset               file2
   n   0         -1 unset               foo
@@ -78,13 +78,13 @@
   ? baz
   C foo
   $ hg debugadddrop --normal-lookup baz
-  $ hg debugdirstate --nodates
+  $ hg debugdirstate --no-dates
   r   0          0 * bar (glob)
   n   0         -1 * baz (glob)
   n 644          0 * foo (glob)
   a   0         -1 * qux (glob)
   $ hg debugrebuilddirstate --minimal
-  $ hg debugdirstate --nodates
+  $ hg debugdirstate --no-dates
   r   0          0 * bar (glob)
   n 644          0 * foo (glob)
   a   0         -1 * qux (glob)
@@ -104,16 +104,16 @@
   R bar
   ? baz
   C foo
-  $ hg debugdirstate --nodates
+  $ hg debugdirstate --no-dates
   r   0          0 * bar (glob)
   n 644          0 * foo (glob)
   a   0         -1 * qux (glob)
   $ hg debugadddrop --drop foo
-  $ hg debugdirstate --nodates
+  $ hg debugdirstate --no-dates
   r   0          0 * bar (glob)
   a   0         -1 * qux (glob)
   $ hg debugrebuilddirstate --minimal
-  $ hg debugdirstate --nodates
+  $ hg debugdirstate --no-dates
   r   0          0 * bar (glob)
   n   0         -1 * foo (glob)
   a   0         -1 * qux (glob)
--- a/tests/test-relink.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-relink.t	Mon Oct 22 14:46:06 2018 -0400
@@ -11,9 +11,12 @@
   > from __future__ import absolute_import, print_function
   > import os
   > import sys
-  > from mercurial import util
+  > from mercurial import (
+  >     pycompat,
+  >     util,
+  > )
   > path1, path2 = sys.argv[1:3]
-  > if util.samefile(path1, path2):
+  > if util.samefile(pycompat.fsencode(path1), pycompat.fsencode(path2)):
   >     print('%s == %s' % (path1, path2))
   > else:
   >     print('%s != %s' % (path1, path2))
@@ -49,7 +52,7 @@
 
 Test files are read in binary mode
 
-  $ $PYTHON -c "open('.hg/store/data/dummy.i', 'wb').write(b'a\r\nb\n')"
+  $ "$PYTHON" -c "open('.hg/store/data/dummy.i', 'wb').write(b'a\r\nb\n')"
   $ cd ..
 
 
@@ -68,7 +71,7 @@
   $ echo b >> b
   $ hg ci -m changeb
   created new head
-  $ $PYTHON -c "open('.hg/store/data/dummy.i', 'wb').write(b'a\nb\r\n')"
+  $ "$PYTHON" -c "open('.hg/store/data/dummy.i', 'wb').write(b'a\nb\r\n')"
 
 
 relink
@@ -98,9 +101,9 @@
 
 check hardlinks
 
-  $ $PYTHON arelinked.py repo/.hg/store/data/a.i clone/.hg/store/data/a.i
+  $ "$PYTHON" arelinked.py repo/.hg/store/data/a.i clone/.hg/store/data/a.i
   repo/.hg/store/data/a.i == clone/.hg/store/data/a.i
-  $ $PYTHON arelinked.py repo/.hg/store/data/b.i clone/.hg/store/data/b.i
+  $ "$PYTHON" arelinked.py repo/.hg/store/data/b.i clone/.hg/store/data/b.i
   repo/.hg/store/data/b.i != clone/.hg/store/data/b.i
 
 #endif
--- a/tests/test-remove.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-remove.t	Mon Oct 22 14:46:06 2018 -0400
@@ -520,6 +520,14 @@
   deleting [===========================================>] 1/1\r (no-eol) (esc)
                                                               \r (no-eol) (esc)
   removing a
+  $ hg remove a -nv --color debug
+  \r (no-eol) (esc)
+  deleting [===========================================>] 1/1\r (no-eol) (esc)
+                                                              \r (no-eol) (esc)
+  \r (no-eol) (esc)
+  deleting [===========================================>] 1/1\r (no-eol) (esc)
+                                                              \r (no-eol) (esc)
+  [ui.addremove.removed ui.status|removing a]
   $ hg diff
 
   $ cat >> .hg/hgrc <<EOF
--- a/tests/test-removeemptydirs.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-removeemptydirs.t	Mon Oct 22 14:46:06 2018 -0400
@@ -87,8 +87,8 @@
   $ hg co -qr 'desc(first_rebase_source)'
   $ cd $TESTTMP/hgrebase/somedir
   $ hg --config extensions.rebase= rebase -qr . -d 'desc(first_rebase_dest)'
-  current directory was removed
-  (consider changing to repo root: $TESTTMP/hgrebase)
+  current directory was removed (rmcwd !)
+  (consider changing to repo root: $TESTTMP/hgrebase) (rmcwd !)
   $ cd $TESTTMP/hgrebase/somedir
 (The current node is the rebased first_rebase_source on top of
 first_rebase_dest)
@@ -174,7 +174,9 @@
 
 Histedit doing 'pick, pick, fold':
 
-  $ hg histedit --commands /dev/stdin <<EOF
+#if rmcwd
+
+  $ hg histedit --commands - <<EOF
   > pick 6274c77c93c3 1 add bar
   > pick ff70a87b588f 0 add foo
   > fold 9992bb0ac0db 2 add baz
@@ -196,6 +198,25 @@
   1:5c806432464a add foo
   0:d17db4b0303a add bar
 
+#else
+
+  $ cd $TESTTMP/issue5826_withrm
+
+  $ hg histedit --commands - <<EOF
+  > pick 6274c77c93c3 1 add bar
+  > pick ff70a87b588f 0 add foo
+  > fold 9992bb0ac0db 2 add baz
+  > EOF
+  saved backup bundle to $TESTTMP/issue5826_withrm/.hg/strip-backup/5c806432464a-cd4c8d86-histedit.hg
+
+  $ hg log -T '{rev}:{node|short} {desc}\n'
+  1:b9eddaa97cbc add foo
+  ***
+  add baz
+  0:d17db4b0303a add bar
+
+#endif
+
 Now test that again with experimental.removeemptydirs=false:
   $ hg init issue5826_norm
   $ cd issue5826_norm
@@ -227,7 +248,7 @@
 
 Histedit doing 'pick, pick, fold':
 
-  $ hg histedit --commands /dev/stdin <<EOF
+  $ hg histedit --commands - <<EOF
   > pick 6274c77c93c3 1 add bar
   > pick ff70a87b588f 0 add foo
   > fold 9992bb0ac0db 2 add baz
@@ -275,6 +296,9 @@
   > y
   > a
   > EOF
+
+The split succeeds on no-rmcwd platforms, which alters the rest of the tests
+#if rmcwd
   $ cat ../split_commands | hg split
   current directory was removed
   (consider changing to repo root: $TESTTMP/hgsplit)
@@ -292,6 +316,7 @@
   
   abort: $ENOENT$
   [255]
+#endif
 
 Let's try that again without the rmdir
   $ cd $TESTTMP/hgsplit/somedir
--- a/tests/test-rename-merge2.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rename-merge2.t	Mon Oct 22 14:46:06 2018 -0400
@@ -3,7 +3,7 @@
   $ cd t
   $ cat <<EOF > merge
   > import sys, os
-  > f = open(sys.argv[1], "wb")
+  > f = open(sys.argv[1], "w")
   > f.write("merge %s %s %s" % (sys.argv[1], sys.argv[2], sys.argv[3]))
   > f.close()
   > EOF
@@ -47,7 +47,7 @@
   >     echo "--------------"
   >     echo "test L:$1 R:$2 W:$3 - $4"
   >     echo "--------------"
-  >     hg merge -y --debug --traceback --tool="$PYTHON ../merge"
+  >     hg merge -y --debug --traceback --tool="\"$PYTHON\" ../merge"
   > 
   >     echo "--------------"
   >     hg status -camC -X rev
@@ -692,7 +692,8 @@
   starting 4 threads for background file closing (?)
    a: prompt deleted/changed -> m (premerge)
   picked tool ':prompt' for a (binary False symlink False changedelete True)
-  other [merge rev] changed a which local [working copy] deleted
+  file 'a' was deleted in local [working copy] but was modified in other [merge rev].
+  What do you want to do?
   use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
    b: both created -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
@@ -737,7 +738,8 @@
   starting 4 threads for background file closing (?)
    a: prompt changed/deleted -> m (premerge)
   picked tool ':prompt' for a (binary False symlink False changedelete True)
-  local [working copy] changed a which other [merge rev] deleted
+  file 'a' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
    b: both created -> m (premerge)
   picked tool '* ../merge' for b (binary False symlink False changedelete False) (glob)
--- a/tests/test-rename.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rename.t	Mon Oct 22 14:46:06 2018 -0400
@@ -71,6 +71,7 @@
 
   $ hg rename --after d1/a dummy
   d1/a: not recording move - dummy does not exist
+  [1]
 
 move a single file to an existing directory
 
@@ -266,8 +267,9 @@
 
   $ hg rename d1/* d2
   d2/b: not overwriting - file already committed
-  (hg rename --force to replace the file by recording a rename)
+  ('hg rename --force' to replace the file by recording a rename)
   moving d1/d11/a1 to d2/d11/a1
+  [1]
   $ hg status -C
   A d2/a
     d1/a
@@ -338,6 +340,7 @@
   d1/b: not recording move - d2/d21/b does not exist
   d1/ba: not recording move - d2/d21/ba does not exist
   moving d1/d11/a1 to d2/d21/a1
+  [1]
   $ hg status -C
   A d2/d21/a
     d1/a
@@ -371,7 +374,8 @@
   $ echo "ca" > d1/ca
   $ hg rename d1/ba d1/ca
   d1/ca: not overwriting - file exists
-  (hg rename --after to record the rename)
+  ('hg rename --after' to record the rename)
+  [1]
   $ hg status -C
   ? d1/ca
   $ hg update -C
@@ -395,7 +399,8 @@
   $ ln -s ba d1/ca
   $ hg rename --traceback d1/ba d1/ca
   d1/ca: not overwriting - file exists
-  (hg rename --after to record the rename)
+  ('hg rename --after' to record the rename)
+  [1]
   $ hg status -C
   ? d1/ca
   $ hg update -C
@@ -421,6 +426,7 @@
   $ hg rename d1/* d2/* d3
   moving d1/d11/a1 to d3/d11/a1
   d3/b: not overwriting - d2/b collides with d1/b
+  [1]
   $ hg status -C
   A d3/a
     d1/a
--- a/tests/test-repair-strip.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-repair-strip.t	Mon Oct 22 14:46:06 2018 -0400
@@ -21,7 +21,7 @@
   >   hg verify
   >   echo % journal contents
   >   if [ -f .hg/store/journal ]; then
-  >       cat .hg/store/journal | $PYTHON $TESTTMP/dumpjournal.py
+  >       cat .hg/store/journal | "$PYTHON" $TESTTMP/dumpjournal.py
   >   else
   >       echo "(no journal)"
   >   fi
@@ -63,7 +63,7 @@
    (expected 1)
    b@?: 736c29771fba not in manifests
   warning: orphan data file 'data/c.i'
-  2 files, 2 changesets, 3 total revisions
+  checked 2 changesets with 3 changes to 2 files
   2 warnings encountered!
   2 integrity errors encountered!
   % journal contents
@@ -76,7 +76,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 2 files
   $ teststrip 0 2 r .hg/store/data/b.i
   % before update 0, strip 2
   changeset:   0:cb9a9f314b8b
@@ -90,7 +90,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 4 changesets, 4 total revisions
+  checked 4 changesets with 4 changes to 3 files
   % journal contents
   (no journal)
   $ teststrip 0 2 w .hg/store/00manifest.i
@@ -120,7 +120,7 @@
    b@?: rev 1 points to nonexistent changeset 2
    (expected 1)
    c@?: rev 0 points to nonexistent changeset 3
-  3 files, 2 changesets, 4 total revisions
+  checked 2 changesets with 4 changes to 3 files
   1 warnings encountered!
   7 integrity errors encountered!
   (first damaged changeset appears to be 3)
@@ -134,6 +134,6 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 2 files
 
   $ cd ..
--- a/tests/test-requires.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-requires.t	Mon Oct 22 14:46:06 2018 -0400
@@ -38,7 +38,7 @@
   >     for name, module in extensions.extensions(ui):
   >         if __name__ == module.__name__:
   >             # support specific feature locally
-  >             supported |= {'featuresetup-test'}
+  >             supported |= {b'featuresetup-test'}
   >             return
   > def uisetup(ui):
   >     localrepo.featuresetupfuncs.add(featuresetup)
--- a/tests/test-resolve.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-resolve.t	Mon Oct 22 14:46:06 2018 -0400
@@ -67,6 +67,9 @@
   $ hg resolve -l
   R file1
   U file2
+  $ hg resolve --re-merge filez file2
+  arguments do not match paths that need resolving
+  (try: hg resolve --re-merge path:filez path:file2)
   $ hg resolve -m filez file2
   arguments do not match paths that need resolving
   (try: hg resolve -m path:filez path:file2)
@@ -153,18 +156,18 @@
   $ hg resolve -l -Tjson
   [
    {
-    "path": "file1",
-    "status": "R"
+    "mergestatus": "R",
+    "path": "file1"
    },
    {
-    "path": "file2",
-    "status": "U"
+    "mergestatus": "U",
+    "path": "file2"
    }
   ]
 
-  $ hg resolve -l -T '{path} {status} {p1rev} {p2rev}\n'
-  file1 R 2 1
-  file2 U 2 1
+  $ hg resolve -l -T '{path} {mergestatus} {status} {p1rev} {p2rev}\n'
+  file1 R M 2 1
+  file2 U M 2 1
 
 resolve -m without paths should mark all resolved
 
@@ -373,4 +376,241 @@
 
   $ hg resolve -l
 
+resolve -m can be configured to look for remaining conflict markers
+  $ hg up -qC 2
+  $ hg merge -q --tool=internal:merge 1
+  warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging file2! (edit, then use 'hg resolve --mark')
+  [1]
+  $ hg resolve -l
+  U file1
+  U file2
+  $ echo 'remove markers' > file1
+  $ hg --config commands.resolve.mark-check=abort resolve -m
+  warning: the following files still have conflict markers:
+    file2
+  abort: conflict markers detected
+  (use --all to mark anyway)
+  [255]
+  $ hg resolve -l
+  U file1
+  U file2
+Try with --all from the hint
+  $ hg --config commands.resolve.mark-check=abort resolve -m --all
+  warning: the following files still have conflict markers:
+    file2
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+Test option value 'warn'
+  $ hg resolve --unmark
+  $ hg resolve -l
+  U file1
+  U file2
+  $ hg --config commands.resolve.mark-check=warn resolve -m
+  warning: the following files still have conflict markers:
+    file2
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+If the file is already marked as resolved, we don't warn about it
+  $ hg resolve --unmark file1
+  $ hg resolve -l
+  U file1
+  R file2
+  $ hg --config commands.resolve.mark-check=warn resolve -m
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+If the user passes an invalid value, we treat it as 'none'.
+  $ hg resolve --unmark
+  $ hg resolve -l
+  U file1
+  U file2
+  $ hg --config commands.resolve.mark-check=nope resolve -m
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+Test explicitly setting the otion to 'none'
+  $ hg resolve --unmark
+  $ hg resolve -l
+  U file1
+  U file2
+  $ hg --config commands.resolve.mark-check=none resolve -m
+  (no more unresolved files)
+  $ hg resolve -l
+  R file1
+  R file2
+Testing the --re-merge flag
+  $ hg resolve --unmark file1
+  $ hg resolve -l
+  U file1
+  R file2
+  $ hg resolve --mark --re-merge
+  abort: too many actions specified
+  [255]
+  $ hg resolve --re-merge --all
+  merging file1
+  warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
+  [1]
+Explicit re-merge
+  $ hg resolve --unmark file1
+  $ hg resolve --config commands.resolve.explicit-re-merge=1 --all
+  abort: no action specified
+  (use --mark, --unmark, --list or --re-merge)
+  [255]
+  $ hg resolve --config commands.resolve.explicit-re-merge=1 --re-merge --all
+  merging file1
+  warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
+  [1]
+
   $ cd ..
+
+======================================================
+Test 'hg resolve' confirm config option functionality |
+======================================================
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > rebase=
+  > EOF
+
+  $ hg init repo2
+  $ cd repo2
+
+  $ echo boss > boss
+  $ hg ci -Am "add boss"
+  adding boss
+
+  $ for emp in emp1 emp2 emp3; do echo work > $emp; done;
+  $ hg ci -Aqm "added emp1 emp2 emp3"
+
+  $ hg up 0
+  0 files updated, 0 files merged, 3 files removed, 0 files unresolved
+
+  $ for emp in emp1 emp2 emp3; do echo nowork > $emp; done;
+  $ hg ci -Aqm "added lazy emp1 emp2 emp3"
+
+  $ hg log -GT "{rev} {node|short} {firstline(desc)}\n"
+  @  2 0acfd4a49af0 added lazy emp1 emp2 emp3
+  |
+  | o  1 f30f98a8181f added emp1 emp2 emp3
+  |/
+  o  0 88660038d466 add boss
+  
+  $ hg rebase -s 1 -d 2
+  rebasing 1:f30f98a8181f "added emp1 emp2 emp3"
+  merging emp1
+  merging emp2
+  merging emp3
+  warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+
+Test when commands.resolve.confirm config option is not set:
+===========================================================
+  $ hg resolve --all
+  merging emp1
+  merging emp2
+  merging emp3
+  warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
+  [1]
+
+Test when config option is set:
+==============================
+  $ cat >> $HGRCPATH << EOF
+  > [ui]
+  > interactive = True
+  > [commands]
+  > resolve.confirm = True
+  > EOF
+
+  $ hg resolve
+  abort: no files or directories specified
+  (use --all to re-merge all unresolved files)
+  [255]
+  $ hg resolve --all << EOF
+  > n
+  > EOF
+  re-merge all unresolved files (yn)? n
+  abort: user quit
+  [255]
+
+  $ hg resolve --all << EOF
+  > y
+  > EOF
+  re-merge all unresolved files (yn)? y
+  merging emp1
+  merging emp2
+  merging emp3
+  warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
+  warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
+  [1]
+
+Test that commands.resolve.confirm respect --mark option (only when no patterns args are given):
+===============================================================================================
+
+  $ hg resolve -m emp1
+  $ hg resolve -l
+  R emp1
+  U emp2
+  U emp3
+
+  $ hg resolve -m << EOF
+  > n
+  > EOF
+  mark all unresolved files as resolved (yn)? n
+  abort: user quit
+  [255]
+
+  $ hg resolve -m << EOF
+  > y
+  > EOF
+  mark all unresolved files as resolved (yn)? y
+  (no more unresolved files)
+  continue: hg rebase --continue
+  $ hg resolve -l
+  R emp1
+  R emp2
+  R emp3
+
+Test that commands.resolve.confirm respect --unmark option (only when no patterns args are given):
+===============================================================================================
+
+  $ hg resolve -u emp1
+
+  $ hg resolve -l
+  U emp1
+  R emp2
+  R emp3
+
+  $ hg resolve -u << EOF
+  > n
+  > EOF
+  mark all resolved files as unresolved (yn)? n
+  abort: user quit
+  [255]
+
+  $ hg resolve -m << EOF
+  > y
+  > EOF
+  mark all unresolved files as resolved (yn)? y
+  (no more unresolved files)
+  continue: hg rebase --continue
+
+  $ hg resolve -l
+  R emp1
+  R emp2
+  R emp3
+
+  $ hg rebase --abort
+  rebase aborted
+  $ cd ..
--- a/tests/test-revert-interactive.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-revert-interactive.t	Mon Oct 22 14:46:06 2018 -0400
@@ -51,11 +51,8 @@
   > n
   > n
   > EOF
-  reverting f
-  reverting folder1/g
+  remove added file folder1/i (Yn)? y
   removing folder1/i
-  reverting folder2/h
-  remove added file folder1/i (Yn)? y
   diff --git a/f b/f
   2 hunks, 2 lines changed
   examine changes to 'f'? [Ynesfdaq?] y
@@ -115,6 +112,8 @@
   2 hunks, 2 lines changed
   examine changes to 'folder2/h'? [Ynesfdaq?] n
   
+  reverting f
+  reverting folder1/g
   $ cat f
   1
   2
@@ -140,8 +139,6 @@
 Test that --interactive lift the need for --all
 
   $ echo q | hg revert -i -r 2
-  reverting folder1/g
-  reverting folder2/h
   diff --git a/folder1/g b/folder1/g
   1 hunks, 1 lines changed
   examine changes to 'folder1/g'? [Ynesfdaq?] q
@@ -197,10 +194,6 @@
   > n
   > n
   > EOF
-  reverting f
-  reverting folder1/g
-  removing folder1/i
-  reverting folder2/h
   remove added file folder1/i (Yn)? n
   diff --git a/f b/f
   2 hunks, 2 lines changed
@@ -250,6 +243,8 @@
   2 hunks, 2 lines changed
   examine changes to 'folder2/h'? [Ynesfdaq?] n
   
+  reverting f
+  reverting folder1/g
   $ cat f
   1
   2
@@ -354,7 +349,6 @@
   > y
   > e
   > EOF
-  reverting k
   diff --git a/k b/k
   1 hunks, 2 lines changed
   examine changes to 'k'? [Ynesfdaq?] y
@@ -365,6 +359,7 @@
   +2
   discard this change to 'k'? [Ynesfdaq?] e
   
+  reverting k
   $ cat k
   42
 
@@ -378,15 +373,14 @@
   $ hg revert -i <<EOF
   > n
   > EOF
-  forgetting newfile
   forget added file newfile (Yn)? n
   $ hg status
   A newfile
   $ hg revert -i <<EOF
   > y
   > EOF
+  forget added file newfile (Yn)? y
   forgetting newfile
-  forget added file newfile (Yn)? y
   $ hg status
   ? newfile
 
@@ -406,7 +400,6 @@
   > y
   > y
   > EOF
-  reverting a
   diff --git a/a b/a
   1 hunks, 1 lines changed
   examine changes to 'a'? [Ynesfdaq?] y
@@ -417,6 +410,7 @@
   \ No newline at end of file
   apply this change to 'a'? [Ynesfdaq?] y
   
+  reverting a
   $ cat a
   0
 
--- a/tests/test-revert.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-revert.t	Mon Oct 22 14:46:06 2018 -0400
@@ -129,9 +129,9 @@
 ----------------------------------
 
   $ hg revert --all -r0
-  adding a
+  forgetting z
   removing d
-  forgetting z
+  adding a
 
 revert explicitly to parent (--rev)
 -----------------------------------
@@ -283,8 +283,8 @@
   $ echo foo > newdir/newfile
   $ hg add newdir/newfile
   $ hg revert b newdir
+  forgetting newdir/newfile
   reverting b/b
-  forgetting newdir/newfile
   $ echo foobar > b/b
   $ hg revert .
   reverting b/b
@@ -368,9 +368,9 @@
   $ hg update '.^'
   1 files updated, 0 files merged, 2 files removed, 0 files unresolved
   $ hg revert -rtip -a
+  removing ignored
   adding allyour
   adding base
-  removing ignored
   $ hg status -C
   A allyour
     ignored
@@ -495,7 +495,7 @@
 
 check list of planned files
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py filelist 2
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py filelist 2
   content1_content1_content1-tracked
   content1_content1_content1-untracked
   content1_content1_content3-tracked
@@ -532,6 +532,7 @@
 
   $ cat << EOF >> dircontent.py
   > # generate a simple text view of the directory for easy comparison
+  > from __future__ import print_function
   > import os
   > files = os.listdir('.')
   > files.sort()
@@ -539,7 +540,7 @@
   >     if os.path.isdir(filename):
   >         continue
   >     content = open(filename).read()
-  >     print '%-6s %s' % (content.strip(), filename)
+  >     print('%-6s %s' % (content.strip(), filename))
   > EOF
 
 Generate appropriate repo state
@@ -550,7 +551,7 @@
 
 Generate base changeset
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 1
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 1
   $ hg addremove --similarity 0
   adding content1_content1_content1-tracked
   adding content1_content1_content1-untracked
@@ -597,7 +598,7 @@
 
 (create a simple text version of the content)
 
-  $ $PYTHON ../dircontent.py > ../content-base.txt
+  $ "$PYTHON" ../dircontent.py > ../content-base.txt
   $ cat ../content-base.txt
   content1 content1_content1_content1-tracked
   content1 content1_content1_content1-untracked
@@ -622,7 +623,7 @@
 
 Create parent changeset
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 2
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 2
   $ hg addremove --similarity 0
   removing content1_missing_content1-tracked
   removing content1_missing_content1-untracked
@@ -661,7 +662,7 @@
 
 (create a simple text version of the content)
 
-  $ $PYTHON ../dircontent.py > ../content-parent.txt
+  $ "$PYTHON" ../dircontent.py > ../content-parent.txt
   $ cat ../content-parent.txt
   content1 content1_content1_content1-tracked
   content1 content1_content1_content1-untracked
@@ -686,7 +687,7 @@
 
 Setup working directory
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 wc
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 wc
   $ hg addremove --similarity 0
   adding content1_missing_content1-tracked
   adding content1_missing_content1-untracked
@@ -754,7 +755,7 @@
 
 (create a simple text version of the content)
 
-  $ $PYTHON ../dircontent.py > ../content-wc.txt
+  $ "$PYTHON" ../dircontent.py > ../content-wc.txt
   $ cat ../content-wc.txt
   content1 content1_content1_content1-tracked
   content1 content1_content1_content1-untracked
@@ -790,35 +791,35 @@
 check revert output
 
   $ hg revert --all
-  undeleting content1_content1_content1-untracked
-  reverting content1_content1_content3-tracked
-  undeleting content1_content1_content3-untracked
-  reverting content1_content1_missing-tracked
-  undeleting content1_content1_missing-untracked
-  reverting content1_content2_content1-tracked
-  undeleting content1_content2_content1-untracked
-  undeleting content1_content2_content2-untracked
-  reverting content1_content2_content3-tracked
-  undeleting content1_content2_content3-untracked
-  reverting content1_content2_missing-tracked
-  undeleting content1_content2_missing-untracked
   forgetting content1_missing_content1-tracked
   forgetting content1_missing_content3-tracked
   forgetting content1_missing_missing-tracked
-  undeleting missing_content2_content2-untracked
-  reverting missing_content2_content3-tracked
-  undeleting missing_content2_content3-untracked
-  reverting missing_content2_missing-tracked
-  undeleting missing_content2_missing-untracked
   forgetting missing_missing_content3-tracked
   forgetting missing_missing_missing-tracked
+  reverting content1_content1_content3-tracked
+  reverting content1_content1_missing-tracked
+  reverting content1_content2_content1-tracked
+  reverting content1_content2_content3-tracked
+  reverting content1_content2_missing-tracked
+  reverting missing_content2_content3-tracked
+  reverting missing_content2_missing-tracked
+  undeleting content1_content1_content1-untracked
+  undeleting content1_content1_content3-untracked
+  undeleting content1_content1_missing-untracked
+  undeleting content1_content2_content1-untracked
+  undeleting content1_content2_content2-untracked
+  undeleting content1_content2_content3-untracked
+  undeleting content1_content2_missing-untracked
+  undeleting missing_content2_content2-untracked
+  undeleting missing_content2_content3-untracked
+  undeleting missing_content2_missing-untracked
 
 Compare resulting directory with revert target.
 
 The diff is filtered to include change only. The only difference should be
 additional `.orig` backup file when applicable.
 
-  $ $PYTHON ../dircontent.py > ../content-parent-all.txt
+  $ "$PYTHON" ../dircontent.py > ../content-parent-all.txt
   $ cd ..
   $ diff -U 0 -- content-parent.txt content-parent-all.txt | grep _
   +content3 content1_content1_content3-tracked.orig
@@ -847,35 +848,35 @@
 check revert output
 
   $ hg revert --all --rev 'desc(base)'
-  undeleting content1_content1_content1-untracked
-  reverting content1_content1_content3-tracked
-  undeleting content1_content1_content3-untracked
-  reverting content1_content1_missing-tracked
-  undeleting content1_content1_missing-untracked
-  undeleting content1_content2_content1-untracked
-  reverting content1_content2_content2-tracked
-  undeleting content1_content2_content2-untracked
-  reverting content1_content2_content3-tracked
-  undeleting content1_content2_content3-untracked
-  reverting content1_content2_missing-tracked
-  undeleting content1_content2_missing-untracked
-  adding content1_missing_content1-untracked
-  reverting content1_missing_content3-tracked
-  adding content1_missing_content3-untracked
-  reverting content1_missing_missing-tracked
-  adding content1_missing_missing-untracked
+  forgetting missing_missing_content3-tracked
+  forgetting missing_missing_missing-tracked
   removing missing_content2_content2-tracked
   removing missing_content2_content3-tracked
   removing missing_content2_missing-tracked
-  forgetting missing_missing_content3-tracked
-  forgetting missing_missing_missing-tracked
+  reverting content1_content1_content3-tracked
+  reverting content1_content1_missing-tracked
+  reverting content1_content2_content2-tracked
+  reverting content1_content2_content3-tracked
+  reverting content1_content2_missing-tracked
+  reverting content1_missing_content3-tracked
+  reverting content1_missing_missing-tracked
+  adding content1_missing_content1-untracked
+  adding content1_missing_content3-untracked
+  adding content1_missing_missing-untracked
+  undeleting content1_content1_content1-untracked
+  undeleting content1_content1_content3-untracked
+  undeleting content1_content1_missing-untracked
+  undeleting content1_content2_content1-untracked
+  undeleting content1_content2_content2-untracked
+  undeleting content1_content2_content3-untracked
+  undeleting content1_content2_missing-untracked
 
 Compare resulting directory with revert target.
 
 The diff is filtered to include change only. The only difference should be
 additional `.orig` backup file when applicable.
 
-  $ $PYTHON ../dircontent.py > ../content-base-all.txt
+  $ "$PYTHON" ../dircontent.py > ../content-base-all.txt
   $ cd ..
   $ diff -U 0 -- content-base.txt content-base-all.txt | grep _
   +content3 content1_content1_content3-tracked.orig
@@ -902,7 +903,7 @@
 revert all files individually and check the output
 (output is expected to be different than in the --all case)
 
-  $ for file in `$PYTHON $TESTDIR/generate-working-copy-states.py filelist 2`; do
+  $ for file in `"$PYTHON" $TESTDIR/generate-working-copy-states.py filelist 2`; do
   >   echo '### revert for:' $file;
   >   hg revert $file;
   >   echo
@@ -979,7 +980,7 @@
 check resulting directory against the --all run
 (There should be no difference)
 
-  $ $PYTHON ../dircontent.py > ../content-parent-explicit.txt
+  $ "$PYTHON" ../dircontent.py > ../content-parent-explicit.txt
   $ cd ..
   $ diff -U 0 -- content-parent-all.txt content-parent-explicit.txt | grep _
   [1]
@@ -995,7 +996,7 @@
 revert all files individually and check the output
 (output is expected to be different than in the --all case)
 
-  $ for file in `$PYTHON $TESTDIR/generate-working-copy-states.py filelist 2`; do
+  $ for file in `"$PYTHON" $TESTDIR/generate-working-copy-states.py filelist 2`; do
   >   echo '### revert for:' $file;
   >   hg revert $file --rev 'desc(base)';
   >   echo
@@ -1072,7 +1073,7 @@
 check resulting directory against the --all run
 (There should be no difference)
 
-  $ $PYTHON ../dircontent.py > ../content-base-explicit.txt
+  $ "$PYTHON" ../dircontent.py > ../content-base-explicit.txt
   $ cd ..
   $ diff -U 0 -- content-base-all.txt content-base-explicit.txt | grep _
   [1]
@@ -1120,8 +1121,8 @@
   M A
   A B
   $ hg revert --rev 1 --all
+  removing B
   reverting A
-  removing B
   $ hg status --rev 1
 
 From the other parents
@@ -1140,8 +1141,8 @@
   M A
   A B
   $ hg revert --rev 1 --all
+  removing B
   reverting A
-  removing B
   $ hg status --rev 1
 
   $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-revisions.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,45 @@
+  $ hg init repo
+  $ cd repo
+
+  $ echo 0 > a
+  $ hg ci -qAm 0
+  $ for i in 5 8 14 43 167; do
+  >   hg up -q 0
+  >   echo $i > a
+  >   hg ci -qm $i
+  > done
+  $ cat <<EOF >> .hg/hgrc
+  > [alias]
+  > l = log -T '{rev}:{shortest(node,1)}\n'
+  > EOF
+
+  $ hg l
+  5:00f
+  4:7ba5d
+  3:7ba57
+  2:72
+  1:9
+  0:b
+  $ cat <<EOF >> .hg/hgrc
+  > [experimental]
+  > revisions.disambiguatewithin=not 4
+  > EOF
+  $ hg l
+  5:00
+  4:7ba5d
+  3:7b
+  2:72
+  1:9
+  0:b
+9 was unambiguous and still is
+  $ hg l -r 9
+  1:9
+7 was ambiguous and still is
+  $ hg l -r 7
+  abort: 00changelog.i@7: ambiguous identifier!
+  [255]
+7b is no longer ambiguous
+  $ hg l -r 7b
+  3:7b
+
+  $ cd ..
--- a/tests/test-revlog-ancestry.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-revlog-ancestry.py	Mon Oct 22 14:46:06 2018 -0400
@@ -22,10 +22,10 @@
     commit(name, time)
 
 def update(rev):
-    merge.update(repo, rev, False, True)
+    merge.update(repo, rev, branchmerge=False, force=True)
 
 def merge_(rev):
-    merge.update(repo, rev, True, False)
+    merge.update(repo, rev, branchmerge=True, force=False)
 
 if __name__ == '__main__':
     addcommit(b"A", 0)
--- a/tests/test-revlog-ancestry.py.out	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-revlog-ancestry.py.out	Mon Oct 22 14:46:06 2018 -0400
@@ -1,15 +1,15 @@
 Ancestors of 5
 4 2 0 
 Ancestors of 6 and 5
-3 4 2 1 0 
+4 3 2 1 0 
 Ancestors of 5 and 4
 4 2 0 
 Ancestors of 7, stop at 6
 6 
 Ancestors of 7, including revs
-7 6 5 3 4 2 1 0 
+7 6 5 4 3 2 1 0 
 Ancestors of 7, 5 and 3, including revs
-7 5 3 6 4 2 1 0 
+7 6 5 4 3 2 1 0 
 
 Descendants of 5
 7 8 
--- a/tests/test-revlog-raw.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-revlog-raw.py	Mon Oct 22 14:46:06 2018 -0400
@@ -20,7 +20,7 @@
 
 # The test wants to control whether to use delta explicitly, based on
 # "storedeltachains".
-revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self.storedeltachains
+revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self._storedeltachains
 
 def abort(msg):
     print('abort: %s' % msg)
@@ -78,7 +78,7 @@
     else:
         flags = revlog.REVIDX_DEFAULT_FLAGS
     # Change storedeltachains temporarily, to override revlog's delta decision
-    rlog.storedeltachains = isdelta
+    rlog._storedeltachains = isdelta
     try:
         rlog.addrevision(text, tr, nextrev, p1, p2, flags=flags)
         return nextrev
@@ -86,7 +86,7 @@
         abort('rev %d: failed to append: %s' % (nextrev, ex))
     finally:
         # Restore storedeltachains. It is always True, see revlog.__init__
-        rlog.storedeltachains = True
+        rlog._storedeltachains = True
 
 def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True):
     '''Copy revlog to destname using revlog.addgroup. Return the copied revlog.
--- a/tests/test-revlog.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-revlog.t	Mon Oct 22 14:46:06 2018 -0400
@@ -34,14 +34,20 @@
 
   $ hg init
 
-  >>> open("a.i", "wb").write(
+  >>> import codecs
+  >>> open("a.i", "wb").write(codecs.decode(codecs.decode(
   ... b"""eJxjYGZgZIAAYQYGxhgom+k/FMx8YKx9ZUaKSOyqo4cnuKb8mbqHV5cBCVTMWb1Cwqkhe4Gsg9AD
-  ... Joa3dYtcYYYBAQ8Qr4OqZAYRICPTSr5WKd/42rV36d+8/VmrNpv7NP1jQAXrQE4BqQUARngwVA=="""
-  ... .decode("base64").decode("zlib"))
+  ... Joa3dYtcYYYBAQ8Qr4OqZAYRICPTSr5WKd/42rV36d+8/VmrNpv7NP1jQAXrQE4BqQUARngwVA==""",
+  ... "base64"), "zlib")) and None
 
-  $ hg debugindex a.i
+  $ hg debugrevlogindex a.i
      rev linkrev nodeid       p1           p2
        0       2 99e0332bd498 000000000000 000000000000
        1       3 6674f57a23d8 99e0332bd498 000000000000
-  $ hg debugdata a.i 1 2>&1 | egrep 'Error:.*decoded'
-  (mercurial\.\w+\.mpatch\.)?mpatchError: patch cannot be decoded (re)
+
+  >>> from mercurial import revlog, vfs
+  >>> tvfs = vfs.vfs(b'.')
+  >>> tvfs.options = {b'revlogv1': True}
+  >>> rl = revlog.revlog(tvfs, b'a.i')
+  >>> rl.revision(1)
+  mpatchError(*'patch cannot be decoded'*) (glob)
--- a/tests/test-revset.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-revset.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1793,6 +1793,16 @@
 
 Test hexadecimal revision
   $ log 'id(2)'
+  $ log 'id(5)'
+  2
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x5)'
+  2
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x5'
+  2
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x)'
+  $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x'
+  abort: 00changelog.i@: ambiguous identifier!
+  [255]
   $ log 'id(23268)'
   4
   $ log 'id(2785f51eece)'
@@ -2916,16 +2926,20 @@
         (symbol '0'))))
   * analyzed:
   (func
-    (symbol 'first')
+    (symbol 'revset')
     (func
-      (symbol 'rev')
-      (symbol '0')))
+      (symbol 'first')
+      (func
+        (symbol 'rev')
+        (symbol '0'))))
   * optimized:
   (func
-    (symbol 'first')
+    (symbol 'revset')
     (func
-      (symbol 'rev')
-      (symbol '0')))
+      (symbol 'first')
+      (func
+        (symbol 'rev')
+        (symbol '0'))))
   * set:
   <baseset+ [0]>
   0
--- a/tests/test-revset2.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-revset2.t	Mon Oct 22 14:46:06 2018 -0400
@@ -346,7 +346,7 @@
 test ',' in `_list`
   $ log '0,1'
   hg: parse error: can't use a list in this context
-  (see hg help "revsets.x or y")
+  (see 'hg help "revsets.x or y"')
   [255]
   $ try '0,1,2'
   (list
@@ -354,7 +354,7 @@
     (symbol '1')
     (symbol '2'))
   hg: parse error: can't use a list in this context
-  (see hg help "revsets.x or y")
+  (see 'hg help "revsets.x or y"')
   [255]
 
 test that chained `or` operations make balanced addsets
@@ -413,14 +413,14 @@
 test that chained `or` operations never eat up stack (issue4624)
 (uses `0:1` instead of `0` to avoid future optimization of trivial revisions)
 
-  $ hg log -T '{rev}\n' -r `$PYTHON -c "print '+'.join(['0:1'] * 500)"`
+  $ hg log -T '{rev}\n' -r `"$PYTHON" -c "print('+'.join(['0:1'] * 500))"`
   0
   1
 
 test that repeated `-r` options never eat up stack (issue4565)
 (uses `-r 0::1` to avoid possible optimization at old-style parser)
 
-  $ hg log -T '{rev}\n' `$PYTHON -c "for i in range(500): print '-r 0::1 ',"`
+  $ hg log -T '{rev}\n' `"$PYTHON" -c "for i in range(500): print('-r 0::1 ')"`
   0
   1
 
@@ -1527,8 +1527,8 @@
   $ hg init problematicencoding
   $ cd problematicencoding
 
-  $ $PYTHON > setup.sh <<EOF
-  > print u'''
+  $ "$PYTHON" > setup.sh <<EOF
+  > print(u'''
   > echo a > text
   > hg add text
   > hg --encoding utf-8 commit -u '\u30A2' -m none
@@ -1538,13 +1538,13 @@
   > hg --encoding utf-8 commit -u none -m '\u30A2'
   > echo d > text
   > hg --encoding utf-8 commit -u none -m '\u30C2'
-  > '''.encode('utf-8')
+  > '''.encode('utf-8'))
   > EOF
   $ sh < setup.sh
 
 test in problematic encoding
-  $ $PYTHON > test.sh <<EOF
-  > print u'''
+  $ "$PYTHON" > test.sh <<EOF
+  > print(u'''
   > hg --encoding cp932 log --template '{rev}\\n' -r 'author(\u30A2)'
   > echo ====
   > hg --encoding cp932 log --template '{rev}\\n' -r 'author(\u30C2)'
@@ -1556,7 +1556,7 @@
   > hg --encoding cp932 log --template '{rev}\\n' -r 'keyword(\u30A2)'
   > echo ====
   > hg --encoding cp932 log --template '{rev}\\n' -r 'keyword(\u30C2)'
-  > '''.encode('cp932')
+  > '''.encode('cp932'))
   > EOF
   $ sh < test.sh
   0
--- a/tests/test-rollback.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-rollback.t	Mon Oct 22 14:46:06 2018 -0400
@@ -9,7 +9,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   $ hg parents
   changeset:   0:1f0dee641bb7
   tag:         tip
@@ -28,7 +28,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  0 files, 0 changesets, 0 total revisions
+  checked 0 changesets with 0 changes to 0 files
   $ hg parents
   $ hg status
   A a
@@ -197,7 +197,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
 
 rollback disabled by config
   $ cat >> $HGRCPATH <<EOF
@@ -436,7 +436,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
 
   $ cd ..
 
@@ -461,6 +461,6 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
 
   $ cd ..
--- a/tests/test-run-tests.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-run-tests.t	Mon Oct 22 14:46:06 2018 -0400
@@ -6,7 +6,8 @@
 
 Smoke test with install
 ============
-  $ $PYTHON $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
+  $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
+  running 0 tests using 0 parallel processes 
   
   # Ran 0 tests, 0 skipped, 0 failed.
 
@@ -14,15 +15,16 @@
 =============
   $ rt()
   > {
-  >     $PYTHON $TESTDIR/run-tests.py --with-hg=`which hg` "$@"
+  >     "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
   > }
 
 error paths
 
 #if symlink
   $ ln -s `which true` hg
-  $ $PYTHON $TESTDIR/run-tests.py --with-hg=./hg
+  $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
   warning: --with-hg should specify an hg script
+  running 0 tests using 0 parallel processes 
   
   # Ran 0 tests, 0 skipped, 0 failed.
   $ rm hg
@@ -30,7 +32,7 @@
 
 #if execbit
   $ touch hg
-  $ $PYTHON $TESTDIR/run-tests.py --with-hg=./hg
+  $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
   usage: run-tests.py [options] [tests]
   run-tests.py: error: --with-hg must specify an executable hg script
   [2]
@@ -55,6 +57,7 @@
 
   $ touch test-empty.t
   $ rt
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
   $ rm test-empty.t
@@ -88,6 +91,7 @@
   > EOF
 
   $ rt
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
 
@@ -102,6 +106,7 @@
   >   | fo (re)
   > EOF
   $ rt test-failure.t
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -139,6 +144,7 @@
   >   value: * (glob)
   > EOF
   $ rt test-failure-globs.t
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure-globs.t
   +++ $TESTTMP/test-failure-globs.t.err
@@ -167,6 +173,7 @@
 
 #if no-windows pygments
   $ rt test-failure.t --color always
+  running 1 tests using 1 parallel processes 
   
   \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
   \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
@@ -186,6 +193,7 @@
   [1]
 
   $ rt test-failure.t 2> tmp.log
+  running 1 tests using 1 parallel processes 
   [1]
   $ cat tmp.log
   
@@ -234,6 +242,7 @@
   >   missing (?)
   > EOF
   $ rt test-failure.t
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -292,6 +301,7 @@
   >>> fh.write(u'  l\u03b5\u03b5t\n'.encode('utf-8')) and None
 
   $ rt
+  running 3 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -323,6 +333,7 @@
 test --outputdir
   $ mkdir output
   $ rt --outputdir output
+  running 3 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/output/test-failure.t.err
@@ -359,6 +370,7 @@
 
 test --xunit support
   $ rt --xunit=xunit.xml
+  running 3 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -481,6 +493,7 @@
 ====================
 
   $ rt --retest
+  running 2 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -504,6 +517,7 @@
   $ mkdir output
   $ mv test-failure.t.err output
   $ rt --retest --outputdir output
+  running 2 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/output/test-failure.t.err
@@ -528,17 +542,20 @@
 successful
 
   $ rt test-success.t
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
 
 success w/ keyword
   $ rt -k xyzzy
+  running 2 tests using 1 parallel processes 
   .
   # Ran 2 tests, 1 skipped, 0 failed.
 
 failed
 
   $ rt test-failure.t
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -559,6 +576,7 @@
 
 failure w/ keyword
   $ rt -k rataxes
+  running 2 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -584,6 +602,7 @@
   >   $ echo 'abort: child process failed to start blah'
   > EOF
   $ rt test-serve-fail.t
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/test-serve-fail.t
   +++ $TESTTMP/test-serve-fail.t.err
@@ -614,6 +633,7 @@
   >   $ cat hg.pid >> \$DAEMON_PIDS
   > EOF
   $ rt test-serve-inuse.t
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
   $ rm test-serve-inuse.t
@@ -623,6 +643,7 @@
 ======================
 
   $ rt --debug 2>&1 | grep -v pwd
+  running 2 tests using 1 parallel processes 
   + echo *SALT* 0 0 (glob)
   *SALT* 0 0 (glob)
   + echo babar
@@ -661,6 +682,7 @@
   $ cp test-failure.t test-failure-copy.t
 
   $ rt --jobs 2 test-failure*.t -n
+  running 2 tests using 2 parallel processes 
   !!
   Failed test-failure*.t: output changed (glob)
   Failed test-failure*.t: output changed (glob)
@@ -670,6 +692,7 @@
 
 failures in parallel with --first should only print one failure
   $ rt --jobs 2 --first test-failure*.t
+  running 2 tests using 2 parallel processes 
   
   --- $TESTTMP/test-failure*.t (glob)
   +++ $TESTTMP/test-failure*.t.err (glob)
@@ -701,6 +724,7 @@
 Refuse the fix
 
   $ echo 'n' | rt -i
+  running 2 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -734,6 +758,7 @@
 Interactive with custom view
 
   $ echo 'n' | rt -i --view echo
+  running 2 tests using 1 parallel processes 
   $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
   Accept this change? [n]* (glob)
   ERROR: test-failure.t output changed
@@ -746,6 +771,7 @@
 View the fix
 
   $ echo 'y' | rt --view echo
+  running 2 tests using 1 parallel processes 
   $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
   
   ERROR: test-failure.t output changed
@@ -766,6 +792,7 @@
   >   saved backup bundle to \$TESTTMP/*.hg (glob)
   > EOF
   $ echo 'y' | rt -i 2>&1
+  running 2 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -815,6 +842,7 @@
   > EOF
 
   $ rt -i test-race.t
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/test-race.t
   +++ $TESTTMP/test-race.t.err
@@ -848,9 +876,10 @@
   > y
   > y
   > EOF
+  running 2 tests using 1 parallel processes 
   
   --- $TESTTMP/test-cases.t
-  +++ $TESTTMP/test-cases.t.a.err
+  +++ $TESTTMP/test-cases.t#a.err
   @@ -1,6 +1,7 @@
    #testcases a b
    #if a
@@ -861,7 +890,7 @@
      $ echo 2
   Accept this change? [n] .
   --- $TESTTMP/test-cases.t
-  +++ $TESTTMP/test-cases.t.b.err
+  +++ $TESTTMP/test-cases.t#b.err
   @@ -5,4 +5,5 @@
    #endif
    #if b
@@ -893,9 +922,45 @@
   >   B (b !)
   > EOF
   $ rt test-cases.t
+  running 2 tests using 1 parallel processes 
   ..
   # Ran 2 tests, 0 skipped, 0 failed.
 
+When using multiple dimensions of "#testcases" in .t files
+
+  $ cat > test-cases.t <<'EOF'
+  > #testcases a b
+  > #testcases c d
+  > #if a d
+  >   $ echo $TESTCASE
+  >   a#d
+  > #endif
+  > #if b c
+  >   $ echo yes
+  >   no
+  > #endif
+  > EOF
+  $ rt test-cases.t
+  running 4 tests using 1 parallel processes 
+  ..
+  --- $TESTTMP/test-cases.t
+  +++ $TESTTMP/test-cases.t#b#c.err
+  @@ -6,5 +6,5 @@
+   #endif
+   #if b c
+     $ echo yes
+  -  no
+  +  yes
+   #endif
+  
+  ERROR: test-cases.t#b#c output changed
+  !.
+  Failed test-cases.t#b#c: output changed
+  # Ran 4 tests, 0 skipped, 1 failed.
+  python hash seed: * (glob)
+  [1]
+
+  $ rm test-cases.t#b#c.err
   $ rm test-cases.t
 
 (reinstall)
@@ -905,6 +970,7 @@
 ===============
 
   $ rt --nodiff
+  running 2 tests using 1 parallel processes 
   !.
   Failed test-failure.t: output changed
   # Ran 2 tests, 0 skipped, 1 failed.
@@ -913,6 +979,7 @@
 
 test --tmpdir support
   $ rt --tmpdir=$TESTTMP/keep test-success.t
+  running 1 tests using 1 parallel processes 
   
   Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
   Keeping threadtmp dir: $TESTTMP/keep/child1 
@@ -929,6 +996,7 @@
   > echo '#require slow' > test-slow-timeout.t
   > cat test-timeout.t >> test-slow-timeout.t
   $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
+  running 2 tests using 1 parallel processes 
   st
   Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
   Failed test-timeout.t: timed out
@@ -937,6 +1005,7 @@
   [1]
   $ rt --timeout=1 --slowtimeout=3 \
   > test-timeout.t test-slow-timeout.t --allow-slow-tests
+  running 2 tests using 1 parallel processes 
   .t
   Failed test-timeout.t: timed out
   # Ran 2 tests, 0 skipped, 1 failed.
@@ -948,6 +1017,7 @@
 ==================
 
   $ rt test-success.t --time
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
   # Producing time report
@@ -958,6 +1028,7 @@
 ====================================
 
   $ rt test-success.t --time --jobs 2
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
   # Producing time report
@@ -978,6 +1049,7 @@
   > #endif
   > EOF
   $ rt --nodiff
+  running 4 tests using 1 parallel processes 
   !.s.
   Skipped test-skip.t: missing feature: nail clipper
   Failed test-failure.t: output changed
@@ -987,6 +1059,7 @@
 
   $ rm test-noskip.t
   $ rt --keyword xyzzy
+  running 3 tests using 1 parallel processes 
   .s
   Skipped test-skip.t: missing feature: nail clipper
   # Ran 2 tests, 2 skipped, 0 failed.
@@ -994,6 +1067,7 @@
 Skips with xml
   $ rt --keyword xyzzy \
   >  --xunit=xunit.xml
+  running 3 tests using 1 parallel processes 
   .s
   Skipped test-skip.t: missing feature: nail clipper
   # Ran 2 tests, 2 skipped, 0 failed.
@@ -1011,6 +1085,7 @@
   $ echo test-failure.t > blacklist
   $ rt --blacklist=blacklist --json\
   >   test-failure.t test-bogus.t
+  running 2 tests using 1 parallel processes 
   ss
   Skipped test-bogus.t: Doesn't exist
   Skipped test-failure.t: blacklisted
@@ -1029,6 +1104,7 @@
   $ echo test-failure.t > whitelist
   $ rt --blacklist=blacklist --whitelist=whitelist --json\
   >   test-failure.t test-bogus.t
+  running 2 tests using 1 parallel processes 
   s
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -1052,10 +1128,12 @@
 be executed.
   $ echo test-success.t >> onlytest
   $ rt --test-list=onlytest
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
   $ echo test-bogus.t >> anothertest
   $ rt --test-list=onlytest --test-list=anothertest
+  running 2 tests using 1 parallel processes 
   s.
   Skipped test-bogus.t: Doesn't exist
   # Ran 1 tests, 1 skipped, 0 failed.
@@ -1065,6 +1143,7 @@
 ==================
 
   $ rt --json
+  running 3 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -1120,6 +1199,7 @@
   $ rm -r output
   $ mkdir output
   $ rt --json --outputdir output
+  running 3 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/output/test-failure.t.err
@@ -1181,6 +1261,7 @@
 
   $ cp test-failure.t backup
   $ echo y | rt --json -i
+  running 3 tests using 1 parallel processes 
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
@@ -1235,6 +1316,7 @@
   > EOF
 
   $ rt test-glob-backslash.t
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
 
@@ -1249,7 +1331,8 @@
 Add support for external test formatter
 =======================================
 
-  $ CUSTOM_TEST_RESULT=basic_test_result $PYTHON $TESTDIR/run-tests.py --with-hg=`which hg` "$@" test-success.t test-failure.t
+  $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
+  running 2 tests using 1 parallel processes 
   
   # Ran 2 tests, 0 skipped, 0 failed.
   ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
@@ -1272,6 +1355,7 @@
   >   foo
   > EOF
   $ rt test-hghave.t
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
 
@@ -1300,6 +1384,7 @@
   >   # check-code - a style and portability checker for Mercurial
   > EOF
   $ rt test-runtestdir.t
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
 
@@ -1317,6 +1402,7 @@
   >   hello world
   > EOF
   $ rt test-testdir-path.t
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
 
@@ -1329,10 +1415,12 @@
   >   pass
   > EOF
   $ rt test-very-slow-test.t
+  running 1 tests using 1 parallel processes 
   s
   Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
   # Ran 0 tests, 1 skipped, 0 failed.
   $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
 
@@ -1343,6 +1431,7 @@
   >   pass
   > EOF
   $ rt nonlocal/test-is-not-here.t
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
 
@@ -1360,6 +1449,7 @@
   $ cp tmp/test-uno.t test-solo.t
 
   $ rt tmp/ test-solo.t tmpp
+  running 5 tests using 1 parallel processes 
   .....
   # Ran 5 tests, 0 skipped, 0 failed.
   $ rm -rf tmp tmpp
@@ -1383,6 +1473,7 @@
 
   $ cd ..
   $ rt tmp/test-*.t
+  running 2 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/tmp/test-folder-fail.t
   +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
@@ -1413,6 +1504,7 @@
   > EOF
   $ hg ci -m 'bad'
   $ rt --known-good-rev=0 test-bisect.t
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/bisect/test-bisect.t
   +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
@@ -1444,6 +1536,7 @@
   $ hg commit -Am dependent test-bisect-dependent.t
 
   $ rt --known-good-rev=0 test-bisect-dependent.t
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
   +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
@@ -1466,6 +1559,7 @@
   [2]
 
   $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
   +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
@@ -1501,6 +1595,7 @@
   > EOF
   > done
   $ rt -j 2
+  running 5 tests using 2 parallel processes 
   ....
   # Ran 5 tests, 0 skipped, 0 failed.
   skipped: unknown feature: notarealhghavefeature
@@ -1538,9 +1633,10 @@
   >   [1]
   > EOF
   $ rt
+  running 3 tests using 1 parallel processes 
   .
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1561,9 +1657,10 @@
 --restart works
 
   $ rt --restart
+  running 2 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1584,11 +1681,12 @@
 --restart works with outputdir
 
   $ mkdir output
-  $ mv test-cases-abc.t.B.err output
+  $ mv test-cases-abc.t#B.err output
   $ rt --restart --outputdir output
+  running 2 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1623,15 +1721,17 @@
   > #endif
   > EOF
   $ rt test-cases-ab.t
+  running 2 tests using 1 parallel processes 
   ..
   # Ran 2 tests, 0 skipped, 0 failed.
 
 Support running a specific test case
 
   $ rt "test-cases-abc.t#B"
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1652,9 +1752,10 @@
 Support running multiple test cases in the same file
 
   $ rt test-cases-abc.t#B test-cases-abc.t#C
+  running 2 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1675,9 +1776,10 @@
 Support ignoring invalid test cases
 
   $ rt test-cases-abc.t#B test-cases-abc.t#D
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/cases/test-cases-abc.t
-  +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err
+  +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
   @@ -7,7 +7,7 @@
      $ V=C
    #endif
@@ -1709,9 +1811,10 @@
     simple
 
   $ rt test-cases-advanced-cases.t
+  running 3 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
@@ -1721,7 +1824,7 @@
   ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
   !
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
@@ -1737,9 +1840,10 @@
   [1]
 
   $ rt "test-cases-advanced-cases.t#case-with-dashes"
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
@@ -1754,9 +1858,10 @@
   [1]
 
   $ rt "test-cases-advanced-cases.t#casewith_-.chars"
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
-  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err
+  +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
   @@ -1,3 +1,3 @@
    #testcases simple case-with-dashes casewith_-.chars
      $ echo $TESTCASE
@@ -1795,6 +1900,7 @@
   > EOF
 
   $ rt test-substitution.t
+  running 1 tests using 1 parallel processes 
   
   --- $TESTTMP/anothertests/cases/test-substitution.t
   +++ $TESTTMP/anothertests/cases/test-substitution.t.err
@@ -1819,5 +1925,6 @@
   > EOF
 
   $ rt --extra-config-opt extensions.purge= test-config-opt.t
+  running 1 tests using 1 parallel processes 
   .
   # Ran 1 tests, 0 skipped, 0 failed.
--- a/tests/test-schemes.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-schemes.t	Mon Oct 22 14:46:06 2018 -0400
@@ -42,6 +42,7 @@
   searching for changes
   all remote heads known locally
   no changes found
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
   [1]
 
 check that paths are expanded
--- a/tests/test-serve.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-serve.t	Mon Oct 22 14:46:06 2018 -0400
@@ -79,7 +79,7 @@
   listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob) (?)
   % errors
 
-  $ $PYTHON $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
+  $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
 
 With out of bounds accesses
 
--- a/tests/test-setdiscovery.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-setdiscovery.t	Mon Oct 22 14:46:06 2018 -0400
@@ -504,9 +504,9 @@
 #if false
 generate new bundles:
   $ hg init r1
-  $ for i in `$PYTHON $TESTDIR/seq.py 101`; do hg -R r1 up -qr null && hg -R r1 branch -q b$i && hg -R r1 ci -qmb$i; done
+  $ for i in `"$PYTHON" $TESTDIR/seq.py 101`; do hg -R r1 up -qr null && hg -R r1 branch -q b$i && hg -R r1 ci -qmb$i; done
   $ hg clone -q r1 r2
-  $ for i in `$PYTHON $TESTDIR/seq.py 10`; do hg -R r1 up -qr null && hg -R r1 branch -q c$i && hg -R r1 ci -qmc$i; done
+  $ for i in `"$PYTHON" $TESTDIR/seq.py 10`; do hg -R r1 up -qr null && hg -R r1 branch -q c$i && hg -R r1 ci -qmc$i; done
   $ hg -R r2 branch -q r2change && hg -R r2 ci -qmr2change
   $ hg -R r1 bundle -qa $TESTDIR/bundles/issue4438-r1.hg
   $ hg -R r2 bundle -qa $TESTDIR/bundles/issue4438-r2.hg
--- a/tests/test-share.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-share.t	Mon Oct 22 14:46:06 2018 -0400
@@ -32,6 +32,7 @@
   [1]
   $ ls -1 ../repo1/.hg/cache
   branch2-served
+  manifestfulltextcache (reporevlogstore !)
   rbc-names-v1
   rbc-revs-v1
   tags2-visible
@@ -297,15 +298,15 @@
 
 test behavior when sharing a shared repo
 
-  $ hg share -B repo3 repo5
+  $ hg share -B repo3 missingdir/repo5
   updating working directory
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ cd repo5
+  $ cd missingdir/repo5
   $ hg book
      bm1                       3:b87954705719
      bm3                       4:62f4ded848e4
      bm4                       5:92793bfc8cad
-  $ cd ..
+  $ cd ../..
 
 test what happens when an active bookmark is deleted
 
@@ -398,8 +399,8 @@
   ../../orig/.hg (no-eol)
   $ grep shared thisdir/*/.hg/requires
   thisdir/abs/.hg/requires:shared
+  thisdir/rel/.hg/requires:relshared
   thisdir/rel/.hg/requires:shared
-  thisdir/rel/.hg/requires:relshared
 
 test that relative shared paths aren't relative to $PWD
 
@@ -438,6 +439,29 @@
 
   $ rm -r thatdir
 
+Demonstrate buggy behavior around requirements validation
+See comment in localrepo.py:makelocalrepository() for more.
+
+  $ hg init sharenewrequires
+  $ hg share sharenewrequires shareoldrequires
+  updating working directory
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ cat >> sharenewrequires/.hg/requires << EOF
+  > missing-requirement
+  > EOF
+
+We cannot open the repo with the unknown requirement
+
+  $ hg -R sharenewrequires status
+  abort: repository requires features unknown to this Mercurial: missing-requirement!
+  (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
+  [255]
+
+BUG: we don't get the same error when opening the shared repo pointing to it
+
+  $ hg -R shareoldrequires status
+
 Explicitly kill daemons to let the test exit on Windows
 
   $ killdaemons.py
--- a/tests/test-shelve.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-shelve.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,3 +1,5 @@
+#testcases stripbased phasebased
+
   $ cat <<EOF >> $HGRCPATH
   > [extensions]
   > mq =
@@ -9,6 +11,15 @@
   > maxbackups = 2
   > EOF
 
+#if phasebased
+
+  $ cat <<EOF >> $HGRCPATH
+  > [format]
+  > internal-phase = yes
+  > EOF
+
+#endif
+
   $ hg init repo
   $ cd repo
   $ mkdir a b
@@ -102,6 +113,7 @@
   $ ls .hg/shelve-backup
   default.hg
   default.patch
+  default.shelve
 
 checks to make sure we dont create a directory or
 hidden file while choosing a new shelve name
@@ -206,8 +218,10 @@
   $ ls .hg/shelve-backup/
   default-1.hg
   default-1.patch
+  default-1.shelve
   default.hg
   default.patch
+  default.shelve
 
 local edits should not prevent a shelved change from applying
 
@@ -250,10 +264,13 @@
   $ ls .hg/shelve-backup/
   default-01.hg
   default-01.patch
+  default-01.shelve
   default-1.hg
   default-1.patch
+  default-1.shelve
   default.hg
   default.patch
+  default.shelve
 
   $ hg unshelve
   abort: no shelved changes to apply!
@@ -314,8 +331,10 @@
   $ ls .hg/shelve-backup/
   default-01.hg
   default-01.patch
+  default-01.shelve
   wibble.hg
   wibble.patch
+  wibble.shelve
 
 cause unshelving to result in a merge with 'a' conflicting
 
@@ -361,12 +380,24 @@
 
 ensure that we have a merge with unresolved conflicts
 
+#if phasebased
+  $ hg heads -q --template '{rev}\n'
+  8
+  5
+  $ hg parents -q --template '{rev}\n'
+  8
+  5
+#endif
+
+#if stripbased
   $ hg heads -q --template '{rev}\n'
   5
   4
   $ hg parents -q --template '{rev}\n'
   4
   5
+#endif
+
   $ hg status
   M a/a
   M b.rename/b
@@ -379,11 +410,11 @@
   +++ b/a/a
   @@ -1,2 +1,6 @@
    a
-  +<<<<<<< shelve:       562f7831e574 - shelve: pending changes temporary commit
+  +<<<<<<< shelve:       2377350b6337 - shelve: pending changes temporary commit
    c
   +=======
   +a
-  +>>>>>>> working-copy: 32c69314e062 - shelve: changes to: [mq]: second.patch
+  +>>>>>>> working-copy: a68ec3400638 - shelve: changes to: [mq]: second.patch
   diff --git a/b/b b/b.rename/b
   rename from b/b
   rename to b.rename/b
@@ -409,10 +440,11 @@
   $ hg unshelve -a
   unshelve of 'default' aborted
   $ hg heads -q
-  3:2e69b451d1ea
+  [37]:2e69b451d1ea (re)
   $ hg parents
-  changeset:   3:2e69b451d1ea
+  changeset:   [37]:2e69b451d1ea (re)
   tag:         tip
+  parent:      3:509104101065 (?)
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     second
@@ -465,14 +497,15 @@
 ensure the repo is as we hope
 
   $ hg parents
-  changeset:   3:2e69b451d1ea
+  changeset:   [37]:2e69b451d1ea (re)
   tag:         tip
+  parent:      3:509104101065 (?)
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     second
   
   $ hg heads -q
-  3:2e69b451d1ea
+  [37]:2e69b451d1ea (re)
 
   $ hg status -C
   A b.rename/b
@@ -499,6 +532,15 @@
   M a/a
   $ hg revert a/a
 
+#else
+
+Dummy shelve op, to keep rev numbers aligned
+
+  $ echo foo > a/a
+  $ hg shelve -q -n dummy a/a
+  $ hg unshelve -q dummy
+  $ hg revert a/a
+
 #endif
 
 #if symlink
@@ -512,6 +554,15 @@
   M a/a
   $ hg revert a/a
 
+#else
+
+Dummy shelve op, to keep rev numbers aligned
+
+  $ echo bar > a/a
+  $ hg shelve -q -n dummy a/a
+  $ hg unshelve -q dummy
+  $ hg revert a/a
+
 #endif
 
 set up another conflict between a commit and a shelved change
@@ -532,7 +583,7 @@
   rebasing shelved changes
   merging a/a
   $ hg parents -q
-  4:33f7f61e6c5e
+  (4|13):33f7f61e6c5e (re)
   $ hg shelve -l
   default         (*)* changes to: second (glob)
   $ hg status
@@ -555,7 +606,7 @@
   merging a/a
   note: unshelved changes already existed in the working copy
   $ hg parents -q
-  4:33f7f61e6c5e
+  (4|13):33f7f61e6c5e (re)
   $ hg shelve -l
   $ hg status
   A foo/foo
@@ -592,16 +643,16 @@
 
   $ hg bookmark test
   $ hg bookmark
-   * test                      4:33f7f61e6c5e
+   \* test                      (4|13):33f7f61e6c5e (re)
   $ hg shelve
   shelved as test
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg bookmark
-   * test                      4:33f7f61e6c5e
+   \* test                      (4|13):33f7f61e6c5e (re)
   $ hg unshelve
   unshelving change 'test'
   $ hg bookmark
-   * test                      4:33f7f61e6c5e
+   \* test                      (4|13):33f7f61e6c5e (re)
 
 shelve should still work even if mq is disabled
 
@@ -611,11 +662,11 @@
   $ hg --config extensions.mq=! shelve --list
   test            (*)* changes to: create conflict (glob)
   $ hg bookmark
-   * test                      4:33f7f61e6c5e
+   \* test                      (4|13):33f7f61e6c5e (re)
   $ hg --config extensions.mq=! unshelve
   unshelving change 'test'
   $ hg bookmark
-   * test                      4:33f7f61e6c5e
+   \* test                      (4|13):33f7f61e6c5e (re)
 
 shelve should leave dirstate clean (issue4055)
 
@@ -635,10 +686,11 @@
   $ hg shelve
   shelved as default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
   $ hg rebase -d 6c103be8f4e4 --config extensions.rebase=
-  rebasing 2:323bfa07f744 "xyz" (tip)
+  rebasing 2:323bfa07f744 "xyz"( \(tip\))? (re)
   merging x
-  saved backup bundle to $TESTTMP/shelverebase/.hg/strip-backup/323bfa07f744-78114325-rebase.hg
+  saved backup bundle to \$TESTTMP/shelverebase/.hg/strip-backup/323bfa07f744-(78114325|7ae538ef)-rebase.hg (re)
   $ hg unshelve
   unshelving change 'default'
   rebasing shelved changes
@@ -764,13 +816,13 @@
   shelved as default
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
 #if repobundlerepo
-  $ hg log -G --template '{rev}  {desc|firstline}  {author}' -R bundle://.hg/shelved/default.hg -r 'bundle()'
-  o  4  changes to: commit stuff  shelve@localhost
+  $ hg log -G --template '{rev}  {desc|firstline}  {author}' -R bundle://.hg/shelved/default.hg -r 'bundle()' --hidden
+  o  [48]  changes to: commit stuff  shelve@localhost (re)
   |
   ~
 #endif
   $ hg log -G --template '{rev}  {desc|firstline}  {author}'
-  @  3  commit stuff  test
+  @  [37]  commit stuff  test (re)
   |
   | o  2  c  test
   |/
@@ -786,6 +838,22 @@
   warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
   unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
   [1]
+
+#if phasebased
+  $ hg log -G --template '{rev}  {desc|firstline}  {author}  {date|isodate}'
+  @  9  pending changes temporary commit  shelve@localhost  2004-01-10 13:37 +0000
+  |
+  | @  8  changes to: commit stuff  shelve@localhost  1970-01-01 00:00 +0000
+  |/
+  o  7  commit stuff  test  1970-01-01 00:00 +0000
+  |
+  | o  2  c  test  1970-01-01 00:00 +0000
+  |/
+  o  0  a  test  1970-01-01 00:00 +0000
+  
+#endif
+
+#if stripbased
   $ hg log -G --template '{rev}  {desc|firstline}  {author}  {date|isodate}'
   @  5  changes to: commit stuff  shelve@localhost  1970-01-01 00:00 +0000
   |
@@ -797,15 +865,17 @@
   |/
   o  0  a  test  1970-01-01 00:00 +0000
   
+#endif
+
   $ hg st
   M f
   ? f.orig
   $ cat f
-  <<<<<<< shelve:       5f6b880e719b - shelve: pending changes temporary commit
+  <<<<<<< shelve:       d44eae5c3d33 - shelve: pending changes temporary commit
   g
   =======
   f
-  >>>>>>> working-copy: 81152db69da7 - shelve: changes to: commit stuff
+  >>>>>>> working-copy: aef214a5229c - shelve: changes to: commit stuff
   $ cat f.orig
   g
   $ hg unshelve --abort -t false
@@ -847,7 +917,7 @@
   g
   =======
   f
-  >>>>>>> working-copy: 81152db69da7 - shelve: changes to: commit stuff
+  >>>>>>> working-copy: aef214a5229c - shelve: changes to: commit stuff
   $ cat f.orig
   g
   $ hg unshelve --abort
@@ -872,7 +942,7 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (activating bookmark test)
   $ hg bookmark
-   * test                      4:33f7f61e6c5e
+   \* test                      (4|13):33f7f61e6c5e (re)
   $ hg unshelve
   unshelving change 'default'
   rebasing shelved changes
@@ -881,7 +951,7 @@
   unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
   [1]
   $ hg bookmark
-     test                      4:33f7f61e6c5e
+     test                      (4|13):33f7f61e6c5e (re)
 
 Test that resolving all conflicts in one direction (so that the rebase
 is a no-op), works (issue4398)
@@ -895,13 +965,13 @@
   note: unshelved changes already existed in the working copy
   unshelve of 'default' complete
   $ hg bookmark
-   * test                      4:33f7f61e6c5e
+   \* test                      (4|13):33f7f61e6c5e (re)
   $ hg diff
   $ hg status
   ? a/a.orig
   ? foo/foo
   $ hg summary
-  parent: 4:33f7f61e6c5e tip
+  parent: (4|13):33f7f61e6c5e tip (re)
    create conflict
   branch: default
   bookmarks: *test
@@ -980,14 +1050,14 @@
   M a/a
   ? foo/foo
   $ hg bookmark
-   * test                      4:33f7f61e6c5e
+   \* test                      (4|13):33f7f61e6c5e (re)
   $ hg unshelve
   unshelving change 'test'
   temporarily committing pending changes (restore with 'hg unshelve --abort')
   rebasing shelved changes
   merging a/a
   $ hg bookmark
-   * test                      4:33f7f61e6c5e
+   \* test                      (4|13):33f7f61e6c5e (re)
   $ cat a/a
   a
   a
@@ -1109,7 +1179,7 @@
   shelved as default
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg debugbundle .hg/shelved/*.hg
-  45993d65fe9dc3c6d8764b9c3b07fa831ee7d92d
+  330882a04d2ce8487636b1fb292e5beea77fa1e3
   $ cd ..
 
 with general delta
@@ -1132,7 +1202,7 @@
   $ hg debugbundle .hg/shelved/*.hg
   Stream params: {Compression: BZ}
   changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
-      45993d65fe9dc3c6d8764b9c3b07fa831ee7d92d
+      330882a04d2ce8487636b1fb292e5beea77fa1e3
   $ cd ..
 
 Test visibility of in-memory changes inside transaction to external hook
@@ -1179,24 +1249,24 @@
 
   $ sh $TESTTMP/checkvisibility.sh before-unshelving
   ==== before-unshelving:
-  VISIBLE 5:703117a2acfb
-  ACTUAL  5:703117a2acfb
+  VISIBLE (5|19):703117a2acfb (re)
+  ACTUAL  (5|19):703117a2acfb (re)
   ====
 
   $ hg unshelve --keep default
   temporarily committing pending changes (restore with 'hg unshelve --abort')
   rebasing shelved changes
   ==== preupdate:
-  VISIBLE 6:66b86db80ee4
-  ACTUAL  5:703117a2acfb
+  VISIBLE (6|20):54c00d20fb3f (re)
+  ACTUAL  (5|19):703117a2acfb (re)
   ====
   ==== preupdate:
-  VISIBLE 8:92fdbb7b4de7
-  ACTUAL  5:703117a2acfb
+  VISIBLE (8|21):8efe6f7537dc (re)
+  ACTUAL  (5|19):703117a2acfb (re)
   ====
   ==== preupdate:
-  VISIBLE 6:66b86db80ee4
-  ACTUAL  5:703117a2acfb
+  VISIBLE (6|20):54c00d20fb3f (re)
+  ACTUAL  (5|19):703117a2acfb (re)
   ====
 
   $ cat >> .hg/hgrc <<EOF
@@ -1206,8 +1276,8 @@
 
   $ sh $TESTTMP/checkvisibility.sh after-unshelving
   ==== after-unshelving:
-  VISIBLE 5:703117a2acfb
-  ACTUAL  5:703117a2acfb
+  VISIBLE (5|19):703117a2acfb (re)
+  ACTUAL  (5|19):703117a2acfb (re)
   ====
 
 == test visibility to external update hook
@@ -1223,25 +1293,25 @@
 
   $ sh $TESTTMP/checkvisibility.sh before-unshelving
   ==== before-unshelving:
-  VISIBLE 5:703117a2acfb
-  ACTUAL  5:703117a2acfb
+  VISIBLE (5|19):703117a2acfb (re)
+  ACTUAL  (5|19):703117a2acfb (re)
   ====
 
   $ hg unshelve --keep default
   temporarily committing pending changes (restore with 'hg unshelve --abort')
   rebasing shelved changes
   ==== update:
-  VISIBLE 6:66b86db80ee4
-  VISIBLE 7:206bf5d4f922
-  ACTUAL  5:703117a2acfb
+  VISIBLE (6|20):54c00d20fb3f (re)
+  VISIBLE 1?7:492ed9d705e5 (re)
+  ACTUAL  (5|19):703117a2acfb (re)
   ====
   ==== update:
-  VISIBLE 6:66b86db80ee4
-  ACTUAL  5:703117a2acfb
+  VISIBLE (6|20):54c00d20fb3f (re)
+  ACTUAL  (5|19):703117a2acfb (re)
   ====
   ==== update:
-  VISIBLE 5:703117a2acfb
-  ACTUAL  5:703117a2acfb
+  VISIBLE (5|19):703117a2acfb (re)
+  ACTUAL  (5|19):703117a2acfb (re)
   ====
 
   $ cat >> .hg/hgrc <<EOF
@@ -1251,8 +1321,8 @@
 
   $ sh $TESTTMP/checkvisibility.sh after-unshelving
   ==== after-unshelving:
-  VISIBLE 5:703117a2acfb
-  ACTUAL  5:703117a2acfb
+  VISIBLE (5|19):703117a2acfb (re)
+  ACTUAL  (5|19):703117a2acfb (re)
   ====
 
   $ cd ..
@@ -1303,31 +1373,31 @@
   > EOF
 
   $ hg bookmarks -R repo
-     test                      4:33f7f61e6c5e
+     test                      (4|13):33f7f61e6c5e (re)
   $ hg share -B repo share
   updating working directory
   6 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd share
 
   $ hg bookmarks
-     test                      4:33f7f61e6c5e
+     test                      (4|13):33f7f61e6c5e (re)
   $ hg bookmarks foo
   $ hg bookmarks
-   * foo                       5:703117a2acfb
-     test                      4:33f7f61e6c5e
+   \* foo                       (5|19):703117a2acfb (re)
+     test                      (4|13):33f7f61e6c5e (re)
   $ echo x >> x
   $ hg shelve
   shelved as foo
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg bookmarks
-   * foo                       5:703117a2acfb
-     test                      4:33f7f61e6c5e
+   \* foo                       (5|19):703117a2acfb (re)
+     test                      (4|13):33f7f61e6c5e (re)
 
   $ hg unshelve
   unshelving change 'foo'
   $ hg bookmarks
-   * foo                       5:703117a2acfb
-     test                      4:33f7f61e6c5e
+   \* foo                       (5|19):703117a2acfb (re)
+     test                      (4|13):33f7f61e6c5e (re)
 
   $ cd ..
 
@@ -1772,8 +1842,8 @@
   > ashelve
   > 8b058dae057a5a78f393f4535d9e363dd5efac9d
   > 8b058dae057a5a78f393f4535d9e363dd5efac9d
-  > 8b058dae057a5a78f393f4535d9e363dd5efac9d 003d2d94241cc7aff0c3a148e966d6a4a377f3a7
-  > 003d2d94241cc7aff0c3a148e966d6a4a377f3a7
+  > 8b058dae057a5a78f393f4535d9e363dd5efac9d f543b27db2cdb41737e2e0008dc524c471da1446
+  > f543b27db2cdb41737e2e0008dc524c471da1446
   > 
   > nokeep
   > :no-active-bookmark
@@ -1785,5 +1855,50 @@
 mercurial does not crash
   $ hg unshelve --continue
   unshelve of 'ashelve' complete
+
+#if phasebased
+
+Unshelve with some metadata file missing
+----------------------------------------
+
+  $ hg shelve
+  shelved as default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo 3 > a
+
+Test with the `.shelve` missing, but the changeset still in the repo (non-natural case)
+
+  $ rm .hg/shelved/default.shelve
+  $ hg unshelve
+  unshelving change 'default'
+  temporarily committing pending changes (restore with 'hg unshelve --abort')
+  rebasing shelved changes
+  merging a
+  warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+  [1]
+  $ hg unshelve --abort
+  unshelve of 'default' aborted
+
+Unshelve without .shelve metadata (can happen when upgrading a repository with old shelve)
+
+  $ cat .hg/shelved/default.shelve
+  node=82e0cb9893247d12667017593ce1e5655860f1ac
+  $ hg strip --hidden --rev 82e0cb989324 --no-backup
+  $ rm .hg/shelved/default.shelve
+  $ hg unshelve
+  unshelving change 'default'
+  temporarily committing pending changes (restore with 'hg unshelve --abort')
+  rebasing shelved changes
+  merging a
+  warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+  [1]
+  $ cat .hg/shelved/default.shelve
+  node=82e0cb9893247d12667017593ce1e5655860f1ac
+  $ hg unshelve --abort
+  unshelve of 'default' aborted
+
+#endif
+
   $ cd ..
-
--- a/tests/test-show-work.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-show-work.t	Mon Oct 22 14:46:06 2018 -0400
@@ -57,7 +57,6 @@
   $ hg show work
   @  128c commit 2
   o  181c commit 1
-  |
   ~
 
 Multiple DAG heads will be shown
@@ -72,7 +71,6 @@
   | o  128c commit 2
   |/
   o  181c commit 1
-  |
   ~
 
 Even when wdir is something else
@@ -84,7 +82,6 @@
   | o  128c commit 2
   |/
   o  181c commit 1
-  |
   ~
 
 Draft child shows public head (multiple heads)
@@ -131,7 +128,6 @@
   | o  128c commit 2
   |/
   o  181c commit 1
-  |
   ~
 
   $ cd ..
@@ -162,7 +158,6 @@
   | o  128c (@) commit 2
   |/
   o  181c commit 1
-  |
   ~
 
   $ cd ..
@@ -185,7 +180,6 @@
   @  3758 Added tag 0.2 for changeset 6379c25b76f1
   o  6379 (0.2) commit 3
   o  a2ad Added tag 0.1 for changeset 6a75536ea0b1
-  |
   ~
 
   $ cd ..
@@ -246,7 +240,6 @@
   $ hg show work --color=debug
   @  [log.changeset changeset.draft changeset.unstable instability.orphan|32f3] [log.description|commit 3]
   x  [log.changeset changeset.draft changeset.obsolete|6a75] [log.description|commit 2]
-  |
   ~
 
   $ cd ..
--- a/tests/test-simple-update.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-simple-update.t	Mon Oct 22 14:46:06 2018 -0400
@@ -10,7 +10,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
 
   $ hg clone . ../branch
   updating to branch default
@@ -39,7 +39,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
 
   $ hg co
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -79,7 +79,7 @@
   > [worker]
   > numcpus = 4
   > EOF
-  $ for i in `$PYTHON $TESTDIR/seq.py 1 100`; do
+  $ for i in `"$PYTHON" $TESTDIR/seq.py 1 100`; do
   >   echo $i > $i
   > done
   $ hg ci -qAm 'add 100 files'
--- a/tests/test-single-head.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-single-head.t	Mon Oct 22 14:46:06 2018 -0400
@@ -200,5 +200,4 @@
 
   $ hg strip --config extensions.strip= --rev 'desc("c_dH0")'
   saved backup bundle to $TESTTMP/client/.hg/strip-backup/fe47ea669cea-a41bf5a9-backup.hg
-  warning: ignoring unknown working parent 49003e504178!
 
--- a/tests/test-sparse-clone.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-sparse-clone.t	Mon Oct 22 14:46:06 2018 -0400
@@ -2,7 +2,7 @@
 
   $ cat >> $HGRCPATH << EOF
   > [ui]
-  > ssh = $PYTHON "$RUNTESTDIR/dummyssh"
+  > ssh = "$PYTHON" "$RUNTESTDIR/dummyssh"
   > username = nobody <no.reply@fb.com>
   > [extensions]
   > sparse=
--- a/tests/test-sparse-merges.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-sparse-merges.t	Mon Oct 22 14:46:06 2018 -0400
@@ -113,8 +113,76 @@
 
   $ hg merge
   temporarily included 1 file(s) in the sparse checkout for merging
-  local [working copy] changed d which other [merge rev] deleted
+  file 'd' was deleted in other [merge rev] but was modified in local [working copy].
+  What do you want to do?
   use (c)hanged version, (d)elete, or leave (u)nresolved? u
   0 files updated, 0 files merged, 0 files removed, 1 files unresolved
   use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
   [1]
+
+  $ cd ..
+
+Testing merging of a file which is renamed+modified on one side and modified on
+another
+
+  $ hg init mvtest
+  $ cd mvtest
+  $ echo "syntax: glob" >> .hgignore
+  $ echo "*.orig" >> .hgignore
+  $ hg ci -Aqm "added .hgignore"
+  $ for ch in a d; do echo foo > $ch; hg ci -Aqm "added "$ch; done;
+  $ cat >> .hg/hgrc <<EOF
+  > [alias]
+  > glog = log -GT "{rev}:{node|short} {desc}"
+  > [extensions]
+  > sparse =
+  > EOF
+
+  $ hg glog
+  @  2:f29feff37cfc added d
+  |
+  o  1:617125d27d6b added a
+  |
+  o  0:53f3774ed939 added .hgignore
+  
+  $ echo babar >> a
+  $ hg ci -m "added babar to a"
+
+  $ hg up '.^'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg mv a amove
+  $ hg ci -m "moved a to amove"
+  created new head
+
+  $ hg up 3
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg glog
+  o  4:5d1e85955f6d moved a to amove
+  |
+  | @  3:a06e41a6c16c added babar to a
+  |/
+  o  2:f29feff37cfc added d
+  |
+  o  1:617125d27d6b added a
+  |
+  o  0:53f3774ed939 added .hgignore
+  
+  $ hg debugsparse --exclude "a"
+  $ ls
+  d
+
+  $ hg merge
+  temporarily included 1 file(s) in the sparse checkout for merging
+  merging a and amove to amove
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+  $ hg up -C 4
+  cleaned up 1 temporarily added file(s) from the sparse checkout
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ hg merge
+  merging amove and a to amove
+  abort: cannot add 'a' - it is outside the sparse checkout
+  (include file with `hg debugsparse --include <pattern>` or use `hg add -s <file>` to include file directory while adding)
+  [255]
--- a/tests/test-sparse-profiles.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-sparse-profiles.t	Mon Oct 22 14:46:06 2018 -0400
@@ -119,7 +119,7 @@
 Verify conflicting merge pulls in the conflicting changes
 
   $ hg merge 1
-  temporarily included 1 file(s) in the sparse checkout for merging
+  temporarily included 2 file(s) in the sparse checkout for merging
   merging backend.sparse
   merging data.py
   warning: conflicts while merging backend.sparse! (edit, then use 'hg resolve --mark')
@@ -184,7 +184,7 @@
 
   $ hg rebase -d 2
   rebasing 1:a2b1de640a62 "edit profile"
-  temporarily included 1 file(s) in the sparse checkout for merging
+  temporarily included 2 file(s) in the sparse checkout for merging
   merging backend.sparse
   merging data.py
   warning: conflicts while merging backend.sparse! (edit, then use 'hg resolve --mark')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-sparse-revlog.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,134 @@
+====================================
+Test delta choice with sparse revlog
+====================================
+
+Sparse-revlog usually shows the most gain on Manifest. However, it is simpler
+to general an appropriate file, so we test with a single file instead. The
+goal is to observe intermediate snapshot being created.
+
+We need a large enough file. Part of the content needs to be replaced
+repeatedly while some of it changes rarely.
+
+  $ bundlepath="$TESTDIR/artifacts/cache/big-file-churn.hg"
+
+  $ expectedhash=`cat "$bundlepath".md5`
+  $ if [ ! -f "$bundlepath" ]; then
+  >     echo 'skipped: missing artifact, run "'"$TESTDIR"'/artifacts/scripts/generate-churning-bundle.py"'
+  >     exit 80
+  > fi
+  $ currenthash=`f -M "$bundlepath" | cut -d = -f 2`
+  $ if [ "$currenthash" != "$expectedhash" ]; then
+  >     echo 'skipped: outdated artifact, md5 "'"$currenthash"'" expected "'"$expectedhash"'" run "'"$TESTDIR"'/artifacts/scripts/generate-churning-bundle.py"'
+  >     exit 80
+  > fi
+
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > sparse-revlog = yes
+  > maxchainlen = 15
+  > [storage]
+  > revlog.optimize-delta-parent-choice = yes
+  > EOF
+  $ hg init sparse-repo
+  $ cd sparse-repo
+  $ hg unbundle $bundlepath
+  adding changesets
+  adding manifests
+  adding file changes
+  added 5001 changesets with 5001 changes to 1 files (+89 heads)
+  new changesets 9706f5af64f4:d9032adc8114 (5001 drafts)
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+  $ hg up
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  updated to "d9032adc8114: commit #5000"
+  89 other heads for branch "default"
+
+  $ hg log --stat -r 0:3
+  changeset:   0:9706f5af64f4
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     initial commit
+  
+   SPARSE-REVLOG-TEST-FILE |  10500 ++++++++++++++++++++++++++++++++++++++++++++++
+   1 files changed, 10500 insertions(+), 0 deletions(-)
+  
+  changeset:   1:724907deaa5e
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     commit #1
+  
+   SPARSE-REVLOG-TEST-FILE |  1068 +++++++++++++++++++++++-----------------------
+   1 files changed, 534 insertions(+), 534 deletions(-)
+  
+  changeset:   2:62c41bce3e5d
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     commit #2
+  
+   SPARSE-REVLOG-TEST-FILE |  1068 +++++++++++++++++++++++-----------------------
+   1 files changed, 534 insertions(+), 534 deletions(-)
+  
+  changeset:   3:348a9cbd6959
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     commit #3
+  
+   SPARSE-REVLOG-TEST-FILE |  1068 +++++++++++++++++++++++-----------------------
+   1 files changed, 534 insertions(+), 534 deletions(-)
+  
+
+  $ f -s .hg/store/data/*.d
+  .hg/store/data/_s_p_a_r_s_e-_r_e_v_l_o_g-_t_e_s_t-_f_i_l_e.d: size=63002924
+  $ hg debugrevlog *
+  format : 1
+  flags  : generaldelta
+  
+  revisions     :     5001
+      merges    :      625 (12.50%)
+      normal    :     4376 (87.50%)
+  revisions     :     5001
+      empty     :        0 ( 0.00%)
+                     text  :        0 (100.00%)
+                     delta :        0 (100.00%)
+      snapshot  :      374 ( 7.48%)
+        lvl-0   :              4 ( 0.08%)
+        lvl-1   :             23 ( 0.46%)
+        lvl-2   :             63 ( 1.26%)
+        lvl-3   :            118 ( 2.36%)
+        lvl-4   :            166 ( 3.32%)
+      deltas    :     4627 (92.52%)
+  revision size : 63002924
+      snapshot  :  9888099 (15.69%)
+        lvl-0   :         804262 ( 1.28%)
+        lvl-1   :        1561380 ( 2.48%)
+        lvl-2   :        2096696 ( 3.33%)
+        lvl-3   :        2749539 ( 4.36%)
+        lvl-4   :        2676222 ( 4.25%)
+      deltas    : 53114825 (84.31%)
+  
+  chunks        :     5001
+      0x78 (x)  :     5001 (100.00%)
+  chunks size   : 63002924
+      0x78 (x)  : 63002924 (100.00%)
+  
+  avg chain length  :        9
+  max chain length  :       15
+  max chain reach   : 28907121
+  compression ratio :       27
+  
+  uncompressed data size (min/max/avg) : 346468 / 346472 / 346471
+  full revision size (min/max/avg)     : 201008 / 201141 / 201065
+  inter-snapshot size (min/max/avg)    : 11601 / 157413 / 24550
+      level-1   (min/max/avg)          : 13061 / 157413 / 67886
+      level-2   (min/max/avg)          : 11674 / 85631 / 33280
+      level-3   (min/max/avg)          : 11602 / 42957 / 23301
+      level-4   (min/max/avg)          : 11601 / 21475 / 16121
+  delta size (min/max/avg)             : 10649 / 105465 / 11479
+  
+  deltas against prev  : 3966 (85.71%)
+      where prev = p1  : 3922     (98.89%)
+      where prev = p2  :    0     ( 0.00%)
+      other            :   44     ( 1.11%)
+  deltas against p1    :  611 (13.21%)
+  deltas against p2    :   50 ( 1.08%)
+  deltas against other :    0 ( 0.00%)
--- a/tests/test-sparse.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-sparse.t	Mon Oct 22 14:46:06 2018 -0400
@@ -189,7 +189,7 @@
 
   $ hg rebase -d 1 -r 2 --config extensions.rebase=
   rebasing 2:b91df4f39e75 "edit hide" (tip)
-  temporarily included 1 file(s) in the sparse checkout for merging
+  temporarily included 2 file(s) in the sparse checkout for merging
   merging hide
   warning: conflicts while merging hide! (edit, then use 'hg resolve --mark')
   unresolved conflicts (see hg resolve, then hg rebase --continue)
@@ -224,7 +224,7 @@
 
   $ hg up -q 1
   $ hg merge -r 2
-  temporarily included 1 file(s) in the sparse checkout for merging
+  temporarily included 2 file(s) in the sparse checkout for merging
   merging hide
   warning: conflicts while merging hide! (edit, then use 'hg resolve --mark')
   0 files updated, 0 files merged, 0 files removed, 1 files unresolved
@@ -294,7 +294,7 @@
   $ touch dir1/notshown
   $ hg commit -A dir1/notshown -m "notshown"
   $ hg debugsparse --include 'dir1/dir2'
-  $ $PYTHON $TESTDIR/list-tree.py . | egrep -v '\.[\/]\.hg'
+  $ "$PYTHON" $TESTDIR/list-tree.py . | egrep -v '\.[\/]\.hg'
   ./
   ./dir1/
   ./dir1/dir2/
@@ -302,7 +302,7 @@
   ./hide.orig
   $ hg debugsparse --delete 'dir1/dir2'
   $ hg debugsparse --include 'glob:dir1/dir2'
-  $ $PYTHON $TESTDIR/list-tree.py . | egrep -v '\.[\/]\.hg'
+  $ "$PYTHON" $TESTDIR/list-tree.py . | egrep -v '\.[\/]\.hg'
   ./
   ./dir1/
   ./dir1/dir2/
@@ -385,10 +385,10 @@
   $ cp ../dirstateallexcluded .hg/dirstate
   $ touch includedadded
   $ hg add includedadded
-  $ hg debugdirstate --nodates
+  $ hg debugdirstate --no-dates
   a   0         -1 unset               includedadded
   $ hg debugrebuilddirstate --minimal
-  $ hg debugdirstate --nodates
+  $ hg debugdirstate --no-dates
   n   0         -1 unset               included
   a   0         -1 * includedadded (glob)
 
@@ -410,13 +410,13 @@
   included
 We have files in the dirstate that are included and excluded. Some are in the
 manifest and some are not.
-  $ hg debugdirstate --nodates
+  $ hg debugdirstate --no-dates
   n 644          0 * excluded (glob)
   a   0         -1 * excludednomanifest (glob)
   n 644          0 * included (glob)
   a   0         -1 * includedadded (glob)
   $ hg debugrebuilddirstate --minimal
-  $ hg debugdirstate --nodates
+  $ hg debugdirstate --no-dates
   n 644          0 * included (glob)
   a   0         -1 * includedadded (glob)
 
--- a/tests/test-split.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-split.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,7 +1,7 @@
 #testcases obsstore-on obsstore-off
 
   $ cat > $TESTTMP/editor.py <<EOF
-  > #!$PYTHON
+  > #!"$PYTHON"
   > import os
   > import sys
   > path = os.path.join(os.environ['TESTTMP'], 'messages')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-sqlitestore.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,110 @@
+#require sqlite
+
+  $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > sqlitestore =
+  > EOF
+
+New repo should not use SQLite by default
+
+  $ hg init empty-no-sqlite
+  $ cat empty-no-sqlite/.hg/requires
+  dotencode
+  fncache
+  generaldelta
+  revlogv1
+  store
+
+storage.new-repo-backend=sqlite is recognized
+
+  $ hg --config storage.new-repo-backend=sqlite init empty-sqlite
+  $ cat empty-sqlite/.hg/requires
+  dotencode
+  exp-sqlite-001
+  exp-sqlite-comp-001=zstd (zstd !)
+  exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$ (no-zstd !)
+  fncache
+  generaldelta
+  revlogv1
+  store
+
+  $ cat >> $HGRCPATH << EOF
+  > [storage]
+  > new-repo-backend = sqlite
+  > EOF
+
+Can force compression to zlib
+
+  $ hg --config storage.sqlite.compression=zlib init empty-zlib
+  $ cat empty-zlib/.hg/requires
+  dotencode
+  exp-sqlite-001
+  exp-sqlite-comp-001=$BUNDLE2_COMPRESSIONS$
+  fncache
+  generaldelta
+  revlogv1
+  store
+
+Can force compression to none
+
+  $ hg --config storage.sqlite.compression=none init empty-none
+  $ cat empty-none/.hg/requires
+  dotencode
+  exp-sqlite-001
+  exp-sqlite-comp-001=none
+  fncache
+  generaldelta
+  revlogv1
+  store
+
+Can make a local commit
+
+  $ hg init local-commit
+  $ cd local-commit
+  $ echo 0 > foo
+  $ hg commit -A -m initial
+  adding foo
+
+That results in a row being inserted into various tables
+
+  $ sqlite3 .hg/store/db.sqlite << EOF
+  > SELECT * FROM filepath;
+  > EOF
+  1|foo
+
+  $ sqlite3 .hg/store/db.sqlite << EOF
+  > SELECT * FROM fileindex;
+  > EOF
+  1|1|0|-1|-1|0|0|1||6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe (esc)
+
+  $ sqlite3 .hg/store/db.sqlite << EOF
+  > SELECT * FROM delta;
+  > EOF
+  1|1|	\xd2\xaf\x8d\xd2"\x01\xdd\x8dH\xe5\xdc\xfc\xae\xd2\x81\xff\x94"\xc7|0 (esc)
+  
+
+Tracking multiple files works
+
+  $ echo 1 > bar
+  $ hg commit -A -m 'add bar'
+  adding bar
+
+  $ sqlite3 .hg/store/db.sqlite << EOF
+  > SELECT * FROM filedata ORDER BY id ASC;
+  > EOF
+  1|1|foo|0|6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe|-1|-1|0|0|1| (esc)
+  2|2|bar|0|\xb8\xe0/d3s\x80!\xa0e\xf9Au\xc7\xcd#\xdb_\x05\xbe|-1|-1|1|0|2| (esc)
+
+Multiple revisions of a file works
+
+  $ echo a >> foo
+  $ hg commit -m 'modify foo'
+
+  $ sqlite3 .hg/store/db.sqlite << EOF
+  > SELECT * FROM filedata ORDER BY id ASC;
+  > EOF
+  1|1|foo|0|6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe|-1|-1|0|0|1| (esc)
+  2|2|bar|0|\xb8\xe0/d3s\x80!\xa0e\xf9Au\xc7\xcd#\xdb_\x05\xbe|-1|-1|1|0|2| (esc)
+  3|1|foo|1|\xdd\xb3V\xcd\xde1p@\xf7\x8e\x90\xb8*\x8b,\xe9\x0e\xd6j+|0|-1|2|0|3|1 (esc)
+
+  $ cd ..
--- a/tests/test-ssh-bundle1.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-ssh-bundle1.t	Mon Oct 22 14:46:06 2018 -0400
@@ -48,7 +48,7 @@
   > [hooks]
   > changegroup = sh -c "printenv.py changegroup-in-remote 0 ../dummylog"
   > EOF
-  $ cd ..
+  $ cd $TESTTMP
 
 repo not found error
 
@@ -59,10 +59,12 @@
 
 non-existent absolute path
 
+#if no-msys
   $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy//`pwd`/nonexistent local
   remote: abort: repository /$TESTTMP/nonexistent not found!
   abort: no suitable response from remote hg!
   [255]
+#endif
 
 clone remote via stream
 
@@ -82,10 +84,10 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 3 changesets, 2 total revisions
+  checked 3 changesets with 2 changes to 2 files
   $ hg branches
   default                        0:1160648e36ce
-  $ cd ..
+  $ cd $TESTTMP
 
 clone bookmarks via stream
 
@@ -101,7 +103,7 @@
   $ cd stream2
   $ hg book
      mybook                    0:1160648e36ce
-  $ cd ..
+  $ cd $TESTTMP
   $ rm -rf local-stream stream2
 
 #endif
@@ -126,7 +128,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 3 changesets, 2 total revisions
+  checked 3 changesets with 2 changes to 2 files
   $ cat >> .hg/hgrc <<EOF
   > [hooks]
   > changegroup = sh -c "printenv.py changegroup-in-local 0 ../dummylog"
@@ -208,7 +210,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  $ cd ../remote
+  $ cd $TESTTMP/remote
 
 check remote tip
 
@@ -225,7 +227,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 4 changesets, 3 total revisions
+  checked 4 changesets with 3 changes to 2 files
   $ hg cat -r tip foo
   bleah
   $ echo z > z
@@ -234,7 +236,7 @@
 
 test pushkeys and bookmarks
 
-  $ cd ../local
+  $ cd $TESTTMP/local
   $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
   bookmarks	
   namespaces	
@@ -341,7 +343,7 @@
   abort: password in URL not supported!
   [255]
 
-  $ cd ..
+  $ cd $TESTTMP
 
 hide outer repo
   $ hg init
@@ -393,7 +395,7 @@
   abort: no suitable response from remote hg!
   [255]
 
-  $ SSH_ORIGINAL_COMMAND="'hg' serve -R 'a'repo' --stdio" $PYTHON "$TESTDIR/../contrib/hg-ssh"
+  $ SSH_ORIGINAL_COMMAND="'hg' serve -R 'a'repo' --stdio" "$PYTHON" "$TESTDIR/../contrib/hg-ssh"
   Illegal command "'hg' serve -R 'a'repo' --stdio": No closing quotation
   [255]
 
@@ -431,7 +433,7 @@
   updating 6c0482d977a3 to public failed!
   [1]
 
-  $ cd ..
+  $ cd $TESTTMP
 
 stderr from remote commands should be printed before stdout from local code (issue4336)
 
@@ -477,12 +479,12 @@
   $ hg pull --debug ssh://user@dummy/remote
   pulling from ssh://user@dummy/remote
   running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
-  sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
+  sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
   sending hello command
   sending between command
-  remote: 413 (sshv1 !)
-  protocol upgraded to exp-ssh-v2-0001 (sshv2 !)
-  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  remote: 427 (sshv1 !)
+  protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
+  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1 (sshv1 !)
   sending protocaps command
   preparing listkeys for "bookmarks"
@@ -498,11 +500,11 @@
   received listkey for "phases": 15 bytes
   checking for updated bookmarks
 
-  $ cd ..
+  $ cd $TESTTMP
 
   $ cat dummylog
   Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
-  Got arguments 1:user@dummy 2:hg -R /$TESTTMP/nonexistent serve --stdio
+  Got arguments 1:user@dummy 2:hg -R /$TESTTMP/nonexistent serve --stdio (no-msys !)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio (no-reposimplestore !)
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
--- a/tests/test-ssh-clone-r.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-ssh-clone-r.t	Mon Oct 22 14:46:06 2018 -0400
@@ -19,7 +19,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  new changesets bfaf4b5cbf01:916f1afdef90
+  new changesets bfaf4b5cbf01:916f1afdef90 (9 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg up tip
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -45,7 +45,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -57,7 +57,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -69,7 +69,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -81,7 +81,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 4 changesets, 4 total revisions
+  checked 4 changesets with 4 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -93,7 +93,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -105,7 +105,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -117,7 +117,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 4 changesets, 5 total revisions
+  checked 4 changesets with 5 changes to 2 files
   adding changesets
   adding manifests
   adding file changes
@@ -129,7 +129,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 5 changesets, 6 total revisions
+  checked 5 changesets with 6 changes to 3 files
   adding changesets
   adding manifests
   adding file changes
@@ -141,7 +141,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 5 changesets, 5 total revisions
+  checked 5 changesets with 5 changes to 2 files
   $ cd test-8
   $ hg pull ../test-7
   pulling from ../test-7
@@ -157,7 +157,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 9 changesets, 7 total revisions
+  checked 9 changesets with 7 changes to 4 files
   $ cd ..
   $ cd test-1
   $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" -r 4 ssh://user@dummy/remote
@@ -174,7 +174,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 3 changesets, 2 total revisions
+  checked 3 changesets with 2 changes to 1 files
   $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote
   pulling from ssh://user@dummy/remote
   searching for changes
@@ -200,7 +200,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 5 changesets, 3 total revisions
+  checked 5 changesets with 3 changes to 1 files
   $ hg pull -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote
   pulling from ssh://user@dummy/remote
   searching for changes
@@ -215,6 +215,6 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  4 files, 9 changesets, 7 total revisions
+  checked 9 changesets with 7 changes to 4 files
 
   $ cd ..
--- a/tests/test-ssh-proto-unbundle.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-ssh-proto-unbundle.t	Mon Oct 22 14:46:06 2018 -0400
@@ -56,9 +56,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -100,17 +100,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -235,9 +235,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -285,17 +285,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -361,9 +361,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -412,17 +412,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -489,9 +489,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -539,17 +539,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -615,9 +615,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -666,17 +666,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -743,9 +743,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -796,17 +796,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -875,9 +875,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -925,17 +925,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1001,9 +1001,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1054,17 +1054,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1133,9 +1133,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1186,17 +1186,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1271,9 +1271,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1322,17 +1322,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1400,9 +1400,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1451,17 +1451,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1531,9 +1531,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1584,17 +1584,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1672,9 +1672,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1729,17 +1729,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1812,9 +1812,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1858,17 +1858,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
@@ -1942,9 +1942,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1992,17 +1992,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending unbundle command
--- a/tests/test-ssh-proto.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-ssh-proto.t	Mon Oct 22 14:46:06 2018 -0400
@@ -22,7 +22,7 @@
 
   $ cat >> $HGRCPATH << EOF
   > [ui]
-  > ssh = $PYTHON "$TESTDIR/dummyssh"
+  > ssh = "$PYTHON" "$TESTDIR/dummyssh"
   > [devel]
   > debug.peer-request = true
   > [extensions]
@@ -64,8 +64,8 @@
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
-  remote: 413
-  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  remote: 427
+  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
   devel-peer-request:   caps: * bytes (glob)
@@ -86,9 +86,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
 `hg debugserve --sshstdio` works
 
@@ -96,8 +96,8 @@
   $ hg debugserve --sshstdio << EOF
   > hello
   > EOF
-  413
-  capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  427
+  capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
 I/O logging works
 
@@ -105,24 +105,24 @@
   > hello
   > EOF
   o> write(4) -> 4:
-  o>     413\n
-  o> write(413) -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
-  413
-  capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     427\n
+  o> write(427) -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  427
+  capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> flush() -> None
 
   $ hg debugserve --sshstdio --logiofile $TESTTMP/io << EOF
   > hello
   > EOF
-  413
-  capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  427
+  capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
   $ cat $TESTTMP/io
   o> write(4) -> 4:
-  o>     413\n
-  o> write(413) -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> write(427) -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> flush() -> None
 
   $ cd ..
@@ -147,9 +147,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
@@ -185,8 +185,8 @@
   remote: banner: line 7
   remote: banner: line 8
   remote: banner: line 9
-  remote: 413
-  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  remote: 427
+  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
   devel-peer-request:   caps: * bytes (glob)
@@ -243,9 +243,9 @@
   o> readline() -> 15:
   o>     banner: line 9\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
@@ -295,13 +295,13 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
+  o>     427\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
 
@@ -314,8 +314,8 @@
   sending hello command
   sending between command
   remote: 0
-  remote: 413
-  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  remote: 427
+  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
   devel-peer-request:   caps: * bytes (glob)
@@ -363,9 +363,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
@@ -388,8 +388,8 @@
   remote: 0
   remote: 0
   remote: 0
-  remote: 413
-  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  remote: 427
+  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
   devel-peer-request:   caps: * bytes (glob)
@@ -445,9 +445,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
@@ -492,9 +492,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
@@ -537,9 +537,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
@@ -607,9 +607,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
 Incomplete dictionary send
 
@@ -689,9 +689,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
@@ -723,9 +723,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
@@ -766,9 +766,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
@@ -795,9 +795,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(105) -> 105:
   i>     between\n
   i>     pairs 81\n
@@ -836,9 +836,9 @@
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -885,9 +885,9 @@
   o> readline() -> 41:
   o>     68986213bd4485ea51533535e3fc9e78007a711f\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
 
@@ -912,7 +912,7 @@
   o> readline() -> 41:
   o>     68986213bd4485ea51533535e3fc9e78007a711f\n
   o> readline() -> 4:
-  o>     413\n
+  o>     427\n
 
 Send an upgrade request to a server that doesn't support that command
 
@@ -941,9 +941,9 @@
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -954,14 +954,14 @@
   $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
   running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
   running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
-  sending upgrade request: * proto=exp-ssh-v2-0001 (glob)
+  sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
   devel-peer-request: hello+between
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
   remote: 0
-  remote: 413
-  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  remote: 427
+  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1
   devel-peer-request: protocaps
   devel-peer-request:   caps: * bytes (glob)
@@ -984,7 +984,7 @@
 
   $ hg debugwireproto --localssh --peer raw << EOF
   > raw
-  >     upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+  >     upgrade this-is-some-token proto=exp-ssh-v2-0003\n
   >     hello\n
   >     between\n
   >     pairs 81\n
@@ -995,30 +995,30 @@
   > EOF
   using raw connection to peer
   i> write(153) -> 153:
-  i>     upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+  i>     upgrade this-is-some-token proto=exp-ssh-v2-0003\n
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   o> readline() -> 44:
-  o>     upgraded this-is-some-token exp-ssh-v2-0001\n
+  o>     upgraded this-is-some-token exp-ssh-v2-0003\n
   o> readline() -> 4:
-  o>     412\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     426\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
   $ cd ..
 
   $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
   running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
   running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
-  sending upgrade request: * proto=exp-ssh-v2-0001 (glob)
+  sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
   devel-peer-request: hello+between
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
-  protocol upgraded to exp-ssh-v2-0001
-  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  protocol upgraded to exp-ssh-v2-0003
+  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   devel-peer-request: protocaps
   devel-peer-request:   caps: * bytes (glob)
   sending protocaps command
@@ -1031,20 +1031,20 @@
   $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server
   running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
   running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
-  sending upgrade request: * proto=exp-ssh-v2-0001 (glob)
+  sending upgrade request: * proto=exp-ssh-v2-0003 (glob)
   devel-peer-request: hello+between
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
-  protocol upgraded to exp-ssh-v2-0001
-  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  protocol upgraded to exp-ssh-v2-0003
+  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   devel-peer-request: protocaps
   devel-peer-request:   caps: * bytes (glob)
   sending protocaps command
   Main capabilities:
     batch
     branchmap
-    $USUAL_BUNDLE2_CAPS_SERVER$
+    $USUAL_BUNDLE2_CAPS$
     changegroupsubset
     getbundle
     known
@@ -1078,6 +1078,8 @@
       http
       https
     rev-branch-cache
+    stream
+      v2
 
 Command after upgrade to version 2 is processed
 
@@ -1085,7 +1087,7 @@
 
   $ hg debugwireproto --localssh --peer raw << EOF
   > raw
-  >      upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+  >      upgrade this-is-some-token proto=exp-ssh-v2-0003\n
   >      hello\n
   >      between\n
   >      pairs 81\n
@@ -1100,29 +1102,29 @@
   > EOF
   using raw connection to peer
   i> write(153) -> 153:
-  i>     upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+  i>     upgrade this-is-some-token proto=exp-ssh-v2-0003\n
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   o> readline() -> 44:
-  o>     upgraded this-is-some-token exp-ssh-v2-0001\n
+  o>     upgraded this-is-some-token exp-ssh-v2-0003\n
   o> readline() -> 4:
-  o>     412\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     426\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     397\n
-  o> readline() -> 397:
-  o>     capabilities: branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     411\n
+  o> readline() -> 411:
+  o>     capabilities: branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
 
 Multiple upgrades is not allowed
 
   $ hg debugwireproto --localssh --peer raw << EOF
   > raw
-  >     upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+  >     upgrade this-is-some-token proto=exp-ssh-v2-0003\n
   >     hello\n
   >     between\n
   >     pairs 81\n
@@ -1138,17 +1140,17 @@
   > EOF
   using raw connection to peer
   i> write(153) -> 153:
-  i>     upgrade this-is-some-token proto=exp-ssh-v2-0001\n
+  i>     upgrade this-is-some-token proto=exp-ssh-v2-0003\n
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   o> readline() -> 44:
-  o>     upgraded this-is-some-token exp-ssh-v2-0001\n
+  o>     upgraded this-is-some-token exp-ssh-v2-0003\n
   o> readline() -> 4:
-  o>     412\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     426\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(45) -> 45:
   i>     upgrade another-token proto=irrelevant\n
   i>     hello\n
@@ -1218,9 +1220,9 @@
   i> write(6) -> 6:
   i>     hello\n
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   i> write(98) -> 98:
   i>     between\n
   i>     pairs 81\n
@@ -1234,14 +1236,14 @@
 
   $ hg debugwireproto --localssh --peer raw << EOF
   > raw
-  >     upgrade token proto=exp-ssh-v2-0001\n
+  >     upgrade token proto=exp-ssh-v2-0003\n
   >     invalid\n
   > readline
   > readavailable
   > EOF
   using raw connection to peer
   i> write(44) -> 44:
-  i>     upgrade token proto=exp-ssh-v2-0001\n
+  i>     upgrade token proto=exp-ssh-v2-0003\n
   i>     invalid\n
   o> readline() -> 1:
   o>     \n
@@ -1251,7 +1253,7 @@
 
   $ hg debugwireproto --localssh --peer raw << EOF
   > raw
-  >     upgrade token proto=exp-ssh-v2-0001\n
+  >     upgrade token proto=exp-ssh-v2-0003\n
   >     hello\n
   >     invalid\n
   > readline
@@ -1259,7 +1261,7 @@
   > EOF
   using raw connection to peer
   i> write(50) -> 50:
-  i>     upgrade token proto=exp-ssh-v2-0001\n
+  i>     upgrade token proto=exp-ssh-v2-0003\n
   i>     hello\n
   i>     invalid\n
   o> readline() -> 1:
@@ -1270,7 +1272,7 @@
 
   $ hg debugwireproto --localssh --peer raw << EOF
   > raw
-  >     upgrade token proto=exp-ssh-v2-0001\n
+  >     upgrade token proto=exp-ssh-v2-0003\n
   >     hello\n
   >     between\n
   >     invalid\n
@@ -1279,7 +1281,7 @@
   > EOF
   using raw connection to peer
   i> write(58) -> 58:
-  i>     upgrade token proto=exp-ssh-v2-0001\n
+  i>     upgrade token proto=exp-ssh-v2-0003\n
   i>     hello\n
   i>     between\n
   i>     invalid\n
@@ -1337,9 +1339,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1357,22 +1359,26 @@
   o>     bookmarks\t\n
   o>     namespaces\t\n
   o>     phases\t
-  response: {b'bookmarks': b'', b'namespaces': b'', b'phases': b''}
+  response: {
+    b'bookmarks': b'',
+    b'namespaces': b'',
+    b'phases': b''
+  }
   
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1388,7 +1394,11 @@
   o>     bookmarks\t\n
   o>     namespaces\t\n
   o>     phases\t
-  response: {b'bookmarks': b'', b'namespaces': b'', b'phases': b''}
+  response: {
+    b'bookmarks': b'',
+    b'namespaces': b'',
+    b'phases': b''
+  }
 
   $ cd ..
 
@@ -1417,9 +1427,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1438,17 +1448,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1478,9 +1488,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1495,22 +1505,24 @@
   o> bufferedreadline() -> 3:
   o>     46\n
   o> bufferedread(46) -> 46: bookA\t68986213bd4485ea51533535e3fc9e78007a711f
-  response: {b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f'}
+  response: {
+    b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1523,7 +1535,9 @@
   o> bufferedreadline() -> 3:
   o>     46\n
   o> bufferedread(46) -> 46: bookA\t68986213bd4485ea51533535e3fc9e78007a711f
-  response: {b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f'}
+  response: {
+    b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f'
+  }
 
 With multiple bookmarks set
 
@@ -1541,9 +1555,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1560,22 +1574,25 @@
   o> bufferedread(93) -> 93:
   o>     bookA\t68986213bd4485ea51533535e3fc9e78007a711f\n
   o>     bookB\t1880f3755e2e52e3199e0ee5638128b08642f34d
-  response: {b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f', b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d'}
+  response: {
+    b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f',
+    b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1590,7 +1607,10 @@
   o> bufferedread(93) -> 93:
   o>     bookA\t68986213bd4485ea51533535e3fc9e78007a711f\n
   o>     bookB\t1880f3755e2e52e3199e0ee5638128b08642f34d
-  response: {b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f', b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d'}
+  response: {
+    b'bookA': b'68986213bd4485ea51533535e3fc9e78007a711f',
+    b'bookB': b'1880f3755e2e52e3199e0ee5638128b08642f34d'
+  }
 
 Test pushkey for bookmarks
 
@@ -1610,9 +1630,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1641,17 +1661,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending pushkey command
@@ -1702,9 +1722,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1719,22 +1739,24 @@
   o> bufferedreadline() -> 3:
   o>     15\n
   o> bufferedread(15) -> 15: publishing\tTrue
-  response: {b'publishing': b'True'}
+  response: {
+    b'publishing': b'True'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1747,7 +1769,9 @@
   o> bufferedreadline() -> 3:
   o>     15\n
   o> bufferedread(15) -> 15: publishing\tTrue
-  response: {b'publishing': b'True'}
+  response: {
+    b'publishing': b'True'
+  }
 
 Create some commits
 
@@ -1781,9 +1805,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1801,22 +1825,26 @@
   o>     20b8a89289d80036e6c4e87c2083e3bea1586637\t1\n
   o>     c4750011d906c18ea2f0527419cbc1a544435150\t1\n
   o>     publishing\tTrue
-  response: {b'20b8a89289d80036e6c4e87c2083e3bea1586637': b'1', b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', b'publishing': b'True'}
+  response: {
+    b'20b8a89289d80036e6c4e87c2083e3bea1586637': b'1',
+    b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
+    b'publishing': b'True'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1832,7 +1860,11 @@
   o>     20b8a89289d80036e6c4e87c2083e3bea1586637\t1\n
   o>     c4750011d906c18ea2f0527419cbc1a544435150\t1\n
   o>     publishing\tTrue
-  response: {b'20b8a89289d80036e6c4e87c2083e3bea1586637': b'1', b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', b'publishing': b'True'}
+  response: {
+    b'20b8a89289d80036e6c4e87c2083e3bea1586637': b'1',
+    b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
+    b'publishing': b'True'
+  }
 
 Single draft head
 
@@ -1850,9 +1882,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1869,22 +1901,25 @@
   o> bufferedread(58) -> 58:
   o>     c4750011d906c18ea2f0527419cbc1a544435150\t1\n
   o>     publishing\tTrue
-  response: {b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', b'publishing': b'True'}
+  response: {
+    b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
+    b'publishing': b'True'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1899,7 +1934,10 @@
   o> bufferedread(58) -> 58:
   o>     c4750011d906c18ea2f0527419cbc1a544435150\t1\n
   o>     publishing\tTrue
-  response: {b'c4750011d906c18ea2f0527419cbc1a544435150': b'1', b'publishing': b'True'}
+  response: {
+    b'c4750011d906c18ea2f0527419cbc1a544435150': b'1',
+    b'publishing': b'True'
+  }
 
 All public heads
 
@@ -1917,9 +1955,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -1934,22 +1972,24 @@
   o> bufferedreadline() -> 3:
   o>     15\n
   o> bufferedread(15) -> 15: publishing\tTrue
-  response: {b'publishing': b'True'}
+  response: {
+    b'publishing': b'True'
+  }
   
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending listkeys command
@@ -1962,7 +2002,9 @@
   o> bufferedreadline() -> 3:
   o>     15\n
   o> bufferedread(15) -> 15: publishing\tTrue
-  response: {b'publishing': b'True'}
+  response: {
+    b'publishing': b'True'
+  }
 
 Setting public phase via pushkey
 
@@ -1984,9 +2026,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -2016,17 +2058,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending pushkey command
@@ -2091,9 +2133,9 @@
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 4:
-  o>     413\n
-  o> readline() -> 413:
-  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+  o>     427\n
+  o> readline() -> 427:
+  o>     capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
   o> readline() -> 2:
   o>     1\n
   o> readline() -> 1:
@@ -2122,17 +2164,17 @@
   testing ssh2
   creating ssh peer from handshake results
   i> write(171) -> 171:
-  i>     upgrade * proto=exp-ssh-v2-0001\n (glob)
+  i>     upgrade * proto=exp-ssh-v2-0003\n (glob)
   i>     hello\n
   i>     between\n
   i>     pairs 81\n
   i>     0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
   i> flush() -> None
   o> readline() -> 62:
-  o>     upgraded * exp-ssh-v2-0001\n (glob)
+  o>     upgraded * exp-ssh-v2-0003\n (glob)
   o> readline() -> 4:
-  o>     412\n
-  o> read(412) -> 412: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  o>     426\n
+  o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   o> read(1) -> 1:
   o>     \n
   sending batch with 3 sub-commands
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-ssh-repoerror.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,68 @@
+#require unix-permissions no-root
+
+initial setup
+
+  $ cat << EOF >> $HGRCPATH
+  > [ui]
+  > ssh="$PYTHON" "$TESTDIR/dummyssh"
+  > EOF
+
+repository itself is non-readable
+---------------------------------
+
+  $ hg init no-read
+  $ hg id ssh://user@dummy/no-read
+  000000000000
+  $ chmod a-rx no-read
+
+  $ hg id ssh://user@dummy/no-read
+  remote: abort: Permission denied: *$TESTTMP/no-read/.hg* (glob)
+  abort: no suitable response from remote hg!
+  [255]
+
+special case files are visible, but unreadable
+----------------------------------------------
+
+This is "similar" to the test above, but the directory is "traversable". This
+seems an unexpected case in real life, but we test it anyway.
+
+  $ hg init other
+  $ hg id ssh://user@dummy/other
+  000000000000
+  $ for item in `find other | sort -r` ; do
+  >     chmod a-r $item
+  > done
+
+  $ hg id ssh://user@dummy/other
+  remote: abort: Permission denied: $TESTTMP/other/.hg/requires
+  abort: no suitable response from remote hg!
+  [255]
+
+directory toward the repository is read only
+--------------------------------------------
+
+  $ mkdir deep
+  $ hg init deep/nested
+
+  $ hg id ssh://user@dummy/deep/nested
+  000000000000
+
+  $ chmod a-rx deep
+
+  $ hg id ssh://user@dummy/deep/nested
+  remote: abort: Permission denied: *$TESTTMP/deep/nested/.hg* (glob)
+  abort: no suitable response from remote hg!
+  [255]
+
+repository has wrong requirement
+--------------------------------
+
+  $ hg init repo-future
+  $ hg id ssh://user@dummy/repo-future
+  000000000000
+  $ echo flying-car >> repo-future/.hg/requires
+  $ hg id ssh://user@dummy/repo-future
+  remote: abort: repository requires features unknown to this Mercurial: flying-car!
+  remote: (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
+  abort: no suitable response from remote hg!
+  [255]
--- a/tests/test-ssh.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-ssh.t	Mon Oct 22 14:46:06 2018 -0400
@@ -38,7 +38,7 @@
   > [hooks]
   > changegroup = sh -c "printenv.py changegroup-in-remote 0 ../dummylog"
   > EOF
-  $ cd ..
+  $ cd $TESTTMP
 
 repo not found error
 
@@ -60,10 +60,8 @@
 
   $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/remote local-stream
   streaming all changes
-  4 files to transfer, 602 bytes of data
-  transferred 602 bytes in * seconds (*) (glob)
-  searching for changes
-  no changes found
+  8 files to transfer, 827 bytes of data
+  transferred 827 bytes in * seconds (*) (glob)
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd local-stream
@@ -72,26 +70,24 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 3 changesets, 2 total revisions
+  checked 3 changesets with 2 changes to 2 files
   $ hg branches
   default                        0:1160648e36ce
-  $ cd ..
+  $ cd $TESTTMP
 
 clone bookmarks via stream
 
   $ hg -R local-stream book mybook
   $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" --stream ssh://user@dummy/local-stream stream2
   streaming all changes
-  4 files to transfer, 602 bytes of data
-  transferred 602 bytes in * seconds (*) (glob)
-  searching for changes
-  no changes found
+  9 files to transfer, 870 bytes of data
+  transferred 870 bytes in * seconds (*) (glob)
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd stream2
   $ hg book
      mybook                    0:1160648e36ce
-  $ cd ..
+  $ cd $TESTTMP
   $ rm -rf local-stream stream2
 
 #endif
@@ -116,7 +112,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 3 changesets, 2 total revisions
+  checked 3 changesets with 2 changes to 2 files
   $ cat >> .hg/hgrc <<EOF
   > [hooks]
   > changegroup = sh -c "printenv.py changegroup-in-local 0 ../dummylog"
@@ -198,7 +194,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  $ cd ../remote
+  $ cd $TESTTMP/remote
 
 check remote tip
 
@@ -215,7 +211,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 4 changesets, 3 total revisions
+  checked 4 changesets with 3 changes to 2 files
   $ hg cat -r tip foo
   bleah
   $ echo z > z
@@ -224,7 +220,7 @@
 
 test pushkeys and bookmarks
 
-  $ cd ../local
+  $ cd $TESTTMP/local
   $ hg debugpushkey --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
   bookmarks	
   namespaces	
@@ -284,7 +280,7 @@
 
   $ cat <<EOF >> ../remote/.hg/hgrc
   > [hooks]
-  > changegroup.stdout = $PYTHON $TESTTMP/badhook
+  > changegroup.stdout = "$PYTHON" $TESTTMP/badhook
   > changegroup.pystdout = python:$TESTTMP/badpyhook.py:hook
   > EOF
   $ echo r > r
@@ -363,7 +359,7 @@
   abort: password in URL not supported!
   [255]
 
-  $ cd ..
+  $ cd $TESTTMP
 
 hide outer repo
   $ hg init
@@ -428,7 +424,7 @@
   abort: no suitable response from remote hg!
   [255]
 
-  $ SSH_ORIGINAL_COMMAND="'hg' -R 'a'repo' serve --stdio" $PYTHON "$TESTDIR/../contrib/hg-ssh"
+  $ SSH_ORIGINAL_COMMAND="'hg' -R 'a'repo' serve --stdio" "$PYTHON" "$TESTDIR/../contrib/hg-ssh"
   Illegal command "'hg' -R 'a'repo' serve --stdio": No closing quotation
   [255]
 
@@ -464,7 +460,7 @@
   abort: push failed on remote
   [255]
 
-  $ cd ..
+  $ cd $TESTTMP
 
 stderr from remote commands should be printed before stdout from local code (issue4336)
 
@@ -512,14 +508,14 @@
   $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
   pulling from ssh://user@dummy/remote
   running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
-  sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
+  sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
   devel-peer-request: hello+between
   devel-peer-request:   pairs: 81 bytes
   sending hello command
   sending between command
-  remote: 413 (sshv1 !)
-  protocol upgraded to exp-ssh-v2-0001 (sshv2 !)
-  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  remote: 427 (sshv1 !)
+  protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
+  remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   remote: 1 (sshv1 !)
   devel-peer-request: protocaps
   devel-peer-request:   caps: * bytes (glob)
@@ -553,7 +549,7 @@
   bundle2-input-bundle: 2 parts total
   checking for updated bookmarks
 
-  $ cd ..
+  $ cd $TESTTMP
 
   $ cat dummylog
   Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
--- a/tests/test-static-http.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-static-http.t	Mon Oct 22 14:46:06 2018 -0400
@@ -9,7 +9,7 @@
 This server doesn't do range requests so it's basically only good for
 one pull
 
-  $ $PYTHON "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid \
+  $ "$PYTHON" "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid \
   > --logfile server.log
   $ cat dumb.pid >> $DAEMON_PIDS
   $ hg init remote
@@ -43,7 +43,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 1 changesets, 2 total revisions
+  checked 1 changesets with 2 changes to 2 files
   $ cat bar
   foo
   $ cd ../remote
@@ -130,7 +130,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 1 changesets, 3 total revisions
+  checked 1 changesets with 3 changes to 3 files
   checking subrepo links
   $ cat a
   a
@@ -151,7 +151,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  0 files, 0 changesets, 0 total revisions
+  checked 0 changesets with 0 changes to 0 files
   $ hg paths
   default = static-http://localhost:$HGPORT/remotempty
 
--- a/tests/test-status-color.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-status-color.t	Mon Oct 22 14:46:06 2018 -0400
@@ -168,10 +168,10 @@
   $ touch modified removed deleted ignored
   $ echo "^ignored$" > .hgignore
   $ hg ci -A -m 'initial checkin'
-  adding .hgignore
-  adding deleted
-  adding modified
-  adding removed
+  \x1b[0;32madding .hgignore\x1b[0m (esc)
+  \x1b[0;32madding deleted\x1b[0m (esc)
+  \x1b[0;32madding modified\x1b[0m (esc)
+  \x1b[0;32madding removed\x1b[0m (esc)
   $ hg log --color=debug
   [log.changeset changeset.draft|changeset:   0:389aef86a55e]
   [log.tag|tag:         tip]
@@ -240,8 +240,8 @@
 
 #if tic
 
-  $ mkdir "$TESTTMP/terminfo"
-  $ TERMINFO="$TESTTMP/terminfo" tic "$TESTDIR/hgterm.ti"
+  $ tic -o "$TESTTMP/terminfo" "$TESTDIR/hgterm.ti"
+  $ ln -s "$TESTTMP/terminfo" "$TESTTMP/terminfo.cdb"
   $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo -A
   \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1madded\x1b[30m (esc)
   \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1mcopied\x1b[30m (esc)
@@ -296,10 +296,10 @@
   $ touch modified removed deleted ignored
   $ echo "^ignored$" > .hgignore
   $ hg commit -A -m 'initial checkin'
-  adding .hgignore
-  adding deleted
-  adding modified
-  adding removed
+  \x1b[0;32madding .hgignore\x1b[0m (esc)
+  \x1b[0;32madding deleted\x1b[0m (esc)
+  \x1b[0;32madding modified\x1b[0m (esc)
+  \x1b[0;32madding removed\x1b[0m (esc)
   $ touch added unknown ignored
   $ hg add added
   $ echo "test" >> modified
@@ -393,6 +393,7 @@
 
   $ hg unknowncommand > /dev/null
   hg: unknown command 'unknowncommand'
+  (use 'hg help' for a list of commands)
   [255]
 
 color coding of error message without curses
@@ -400,6 +401,7 @@
   $ echo 'raise ImportError' > curses.py
   $ PYTHONPATH=`pwd`:$PYTHONPATH hg unknowncommand > /dev/null
   hg: unknown command 'unknowncommand'
+  (use 'hg help' for a list of commands)
   [255]
 
   $ cd ..
--- a/tests/test-status-inprocess.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-status-inprocess.py	Mon Oct 22 14:46:06 2018 -0400
@@ -22,7 +22,7 @@
 u = uimod.ui.load()
 
 print('% creating repo')
-repo = localrepo.localrepository(u, b'.', create=True)
+repo = localrepo.instance(u, b'.', create=True)
 
 f = open('test.py', 'w')
 try:
--- a/tests/test-status-rev.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-status-rev.t	Mon Oct 22 14:46:06 2018 -0400
@@ -5,7 +5,7 @@
 
 First commit
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 1
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 1
   $ hg addremove --similarity 0
   adding content1_content1_content1-tracked
   adding content1_content1_content1-untracked
@@ -31,7 +31,7 @@
 
 Second commit
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 2
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 2
   $ hg addremove --similarity 0
   removing content1_missing_content1-tracked
   removing content1_missing_content1-untracked
@@ -49,7 +49,7 @@
 
 Working copy
 
-  $ $PYTHON $TESTDIR/generate-working-copy-states.py state 2 wc
+  $ "$PYTHON" $TESTDIR/generate-working-copy-states.py state 2 wc
   $ hg addremove --similarity 0
   adding content1_missing_content1-tracked
   adding content1_missing_content1-untracked
--- a/tests/test-status.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-status.t	Mon Oct 22 14:46:06 2018 -0400
@@ -239,8 +239,8 @@
     "status": "A"
    },
    {
-    "copy": "modified",
     "path": "copied",
+    "source": "modified",
     "status": "A"
    },
    {
@@ -272,8 +272,17 @@
   $ hg status -A -Tpickle > pickle
   >>> from __future__ import print_function
   >>> import pickle
-  >>> print(sorted((x['status'], x['path']) for x in pickle.load(open("pickle"))))
-  [('!', 'deleted'), ('?', 'pickle'), ('?', 'unknown'), ('A', 'added'), ('A', 'copied'), ('C', '.hgignore'), ('C', 'modified'), ('I', 'ignored'), ('R', 'removed')]
+  >>> data = sorted((x[b'status'].decode(), x[b'path'].decode()) for x in pickle.load(open("pickle", r"rb")))
+  >>> for s, p in data: print("%s %s" % (s, p))
+  ! deleted
+  ? pickle
+  ? unknown
+  A added
+  A copied
+  C .hgignore
+  C modified
+  I ignored
+  R removed
   $ rm pickle
 
   $ echo "^ignoreddir$" > .hgignore
@@ -282,7 +291,7 @@
 
 Test templater support:
 
-  $ hg status -AT "[{status}]\t{if(copy, '{copy} -> ')}{path}\n"
+  $ hg status -AT "[{status}]\t{if(source, '{source} -> ')}{path}\n"
   [M]	.hgignore
   [A]	added
   [A]	modified -> copied
@@ -528,7 +537,10 @@
 
   $ hg status --config ui.formatdebug=True --rev 1 1
   status = [
-      {*'path': '1/2/3/4/5/b.txt'*}, (glob)
+      {
+          'path': '1/2/3/4/5/b.txt',
+          'status': 'R'
+      },
   ]
 
 #if windows
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-storage.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,125 @@
+# This test verifies the conformance of various classes to various
+# storage interfaces.
+from __future__ import absolute_import
+
+import silenttestrunner
+
+from mercurial import (
+    error,
+    filelog,
+    revlog,
+    transaction,
+    ui as uimod,
+    vfs as vfsmod,
+)
+
+from mercurial.testing import (
+    storage as storagetesting,
+)
+
+try:
+    from hgext import (
+        sqlitestore,
+    )
+except ImportError:
+    sqlitestore = None
+
+try:
+    from mercurial import zstd
+    zstd.__version__
+except ImportError:
+    zstd = None
+
+STATE = {
+    'lastindex': 0,
+    'ui': uimod.ui(),
+    'vfs': vfsmod.vfs(b'.', realpath=True),
+}
+
+def makefilefn(self):
+    """Factory for filelog instances."""
+    fl = filelog.filelog(STATE['vfs'], b'filelog-%d' % STATE['lastindex'])
+    STATE['lastindex'] += 1
+    return fl
+
+def maketransaction(self):
+    vfsmap = {b'plain': STATE['vfs'], b'store': STATE['vfs']}
+
+    return transaction.transaction(STATE['ui'].warn, STATE['vfs'], vfsmap,
+                                   b'journal', b'undo')
+
+def addrawrevision(self, fl, tr, node, p1, p2, linkrev, rawtext=None,
+                   delta=None, censored=False, ellipsis=False, extstored=False):
+    flags = 0
+
+    if censored:
+        flags |= revlog.REVIDX_ISCENSORED
+    if ellipsis:
+        flags |= revlog.REVIDX_ELLIPSIS
+    if extstored:
+        flags |= revlog.REVIDX_EXTSTORED
+
+    if rawtext is not None:
+        fl._revlog.addrawrevision(rawtext, tr, linkrev, p1, p2, node, flags)
+    elif delta is not None:
+        fl._revlog.addrawrevision(rawtext, tr, linkrev, p1, p2, node, flags,
+                                  cachedelta=delta)
+    else:
+        raise error.Abort('must supply rawtext or delta arguments')
+
+    # We may insert bad data. Clear caches to prevent e.g. cache hits to
+    # bypass hash verification.
+    fl._revlog.clearcaches()
+
+# Assigning module-level attributes that inherit from unittest.TestCase
+# is all that is needed to register tests.
+filelogindextests = storagetesting.makeifileindextests(makefilefn,
+                                                       maketransaction,
+                                                       addrawrevision)
+filelogdatatests = storagetesting.makeifiledatatests(makefilefn,
+                                                     maketransaction,
+                                                     addrawrevision)
+filelogmutationtests = storagetesting.makeifilemutationtests(makefilefn,
+                                                             maketransaction,
+                                                             addrawrevision)
+
+def makesqlitefile(self):
+    path = STATE['vfs'].join(b'db-%d.db' % STATE['lastindex'])
+    STATE['lastindex'] += 1
+
+    db = sqlitestore.makedb(path)
+
+    compression = b'zstd' if zstd else b'zlib'
+
+    return sqlitestore.sqlitefilestore(db, b'dummy-path', compression)
+
+def addrawrevisionsqlite(self, fl, tr, node, p1, p2, linkrev, rawtext=None,
+                         delta=None, censored=False, ellipsis=False,
+                         extstored=False):
+    flags = 0
+
+    if censored:
+        flags |= sqlitestore.FLAG_CENSORED
+
+    if ellipsis | extstored:
+        raise error.Abort(b'support for ellipsis and extstored flags not '
+                          b'supported')
+
+    if rawtext is not None:
+        fl._addrawrevision(node, rawtext, tr, linkrev, p1, p2, flags=flags)
+    elif delta is not None:
+        fl._addrawrevision(node, rawtext, tr, linkrev, p1, p2,
+                           storedelta=delta, flags=flags)
+    else:
+        raise error.Abort(b'must supply rawtext or delta arguments')
+
+if sqlitestore is not None:
+    sqlitefileindextests = storagetesting.makeifileindextests(
+        makesqlitefile, maketransaction, addrawrevisionsqlite)
+    sqlitefiledatatests = storagetesting.makeifiledatatests(
+        makesqlitefile, maketransaction, addrawrevisionsqlite)
+    sqlitefilemutationtests = storagetesting.makeifilemutationtests(
+        makesqlitefile, maketransaction, addrawrevisionsqlite)
+
+if __name__ == '__main__':
+    silenttestrunner.main(__name__)
--- a/tests/test-stream-bundle-v2.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-stream-bundle-v2.t	Mon Oct 22 14:46:06 2018 -0400
@@ -88,6 +88,7 @@
   transferred 1.65 KB in \d\.\d seconds \(.*/sec\) (re)
   bundle2-input-part: total payload size 1840
   bundle2-input-bundle: 0 parts total
+  updating the branch cache
   finished applying clone bundle
   query 1; heads
   sending batch command
@@ -116,6 +117,7 @@
    E: remote created -> g
   getting E
   5 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
 
   $ hg clone --stream http://localhost:$HGPORT streamv2-clone-explicit --debug
   using http://localhost:$HGPORT/
@@ -142,6 +144,7 @@
   transferred 1.65 KB in *.* seconds (*/sec) (glob)
   bundle2-input-part: total payload size 1840
   bundle2-input-bundle: 0 parts total
+  updating the branch cache
   finished applying clone bundle
   query 1; heads
   sending batch command
@@ -170,3 +173,4 @@
    E: remote created -> g
   getting E
   5 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
--- a/tests/test-strict.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-strict.t	Mon Oct 22 14:46:06 2018 -0400
@@ -15,29 +15,7 @@
 
   $ hg an a
   hg: unknown command 'an'
-  Mercurial Distributed SCM
-  
-  basic commands:
-  
-   add           add the specified files on the next commit
-   annotate      show changeset information by line for each file
-   clone         make a copy of an existing repository
-   commit        commit the specified files or all outstanding changes
-   diff          diff repository (or selected files)
-   export        dump the header and diffs for one or more changesets
-   forget        forget the specified files on the next commit
-   init          create a new repository in the given directory
-   log           show revision history of entire repository or files
-   merge         merge another revision into working directory
-   pull          pull changes from the specified source
-   push          push changes to the specified destination
-   remove        remove the specified files on the next commit
-   serve         start stand-alone webserver
-   status        show changed files in the working directory
-   summary       summarize working directory state
-   update        update working directory (or switch revisions)
-  
-  (use 'hg help' for the full list of commands or 'hg -v' for details)
+  (use 'hg help' for a list of commands)
   [255]
   $ hg annotate a
   0: a
--- a/tests/test-strip-cross.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-strip-cross.t	Mon Oct 22 14:46:06 2018 -0400
@@ -103,7 +103,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  7 files, 4 changesets, 15 total revisions
+  checked 4 changesets with 15 changes to 7 files
   
   % Trying to strip revision 1
   saved backup bundle to $TESTTMP/1/.hg/strip-backup/*-backup.hg (glob)
@@ -112,7 +112,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  7 files, 4 changesets, 14 total revisions
+  checked 4 changesets with 14 changes to 7 files
   
   % Trying to strip revision 2
   saved backup bundle to $TESTTMP/2/.hg/strip-backup/*-backup.hg (glob)
@@ -121,7 +121,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  7 files, 4 changesets, 14 total revisions
+  checked 4 changesets with 14 changes to 7 files
   
   % Trying to strip revision 3
   saved backup bundle to $TESTTMP/3/.hg/strip-backup/*-backup.hg (glob)
@@ -130,7 +130,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  7 files, 4 changesets, 19 total revisions
+  checked 4 changesets with 19 changes to 7 files
   
   % Trying to strip revision 4
   saved backup bundle to $TESTTMP/4/.hg/strip-backup/*-backup.hg (glob)
@@ -139,5 +139,5 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  7 files, 4 changesets, 19 total revisions
+  checked 4 changesets with 19 changes to 7 files
   
--- a/tests/test-strip.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-strip.t	Mon Oct 22 14:46:06 2018 -0400
@@ -220,7 +220,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 0 changes to 1 files (+1 heads)
-  new changesets 264128213d29
+  new changesets 264128213d29 (1 drafts)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ rm .hg/strip-backup/*
   $ hg log --graph
@@ -715,14 +715,14 @@
   
   options ([+] can be repeated):
   
-   -r --rev REV [+]        strip specified revision (optional, can specify
-                           revisions without this option)
-   -f --force              force removal of changesets, discard uncommitted
-                           changes (no backup)
-      --no-backup          do not save backup bundle
-   -k --keep               do not modify working directory during strip
-   -B --bookmark VALUE [+] remove revs only reachable from given bookmark
-      --mq                 operate on patch repository
+   -r --rev REV [+]           strip specified revision (optional, can specify
+                              revisions without this option)
+   -f --force                 force removal of changesets, discard uncommitted
+                              changes (no backup)
+      --no-backup             do not save backup bundle
+   -k --keep                  do not modify working directory during strip
+   -B --bookmark BOOKMARK [+] remove revs only reachable from given bookmark
+      --mq                    operate on patch repository
   
   (use 'hg strip -h' to show more help)
   [255]
@@ -1123,7 +1123,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 1 changes to 1 files
-  new changesets 35358f982181:4cf5e92caec2
+  new changesets 35358f982181:4cf5e92caec2 (2 drafts)
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
   $ hg strip -k -r 35358f982181
@@ -1246,7 +1246,7 @@
   >                            node(b'D'): [node(b'D2')],
   >                            node(b'G'): [node(b'G2')]}
   >                 scmutil.cleanupnodes(repo, mapping, b'replace')
-  >                 scmutil.cleanupnodes(repo, nodes(b'((B::)+I+Z)-D2'),
+  >                 scmutil.cleanupnodes(repo, nodes(b'((B::)+I+Z)-D2-obsolete()'),
   >                                      b'replace')
   > EOF
   $ hg testnodescleanup --config extensions.t=$TESTTMP/scmutilcleanup.py
--- a/tests/test-subrepo-deep-nested-change.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-subrepo-deep-nested-change.t	Mon Oct 22 14:46:06 2018 -0400
@@ -203,21 +203,8 @@
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
 Largefiles is NOT enabled in the clone if the source repo doesn't require it
-  $ cat cloned/.hg/hgrc
-  # example repository config (see 'hg help config' for more info)
-  [paths]
-  default = $TESTTMP/main
-  
-  # path aliases to other clones of this repo in URLs or filesystem paths
-  # (see 'hg help config.paths' for more info)
-  #
-  # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
-  # my-fork         = ssh://jdoe@example.net/hg/jdoes-fork
-  # my-clone        = /home/jdoe/jdoes-clone
-  
-  [ui]
-  # name and email (local to this repository, optional), e.g.
-  # username = Jane Doe <jdoe@example.com>
+  $ grep largefiles cloned/.hg/hgrc
+  [1]
 
 Checking cloned repo ids
 
@@ -790,29 +777,13 @@
   $ rm -rf ../archive_lf
 
 The local repo enables largefiles if a largefiles repo is cloned
+
   $ hg showconfig extensions
-  abort: repository requires features unknown to this Mercurial: largefiles!
-  (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
-  [255]
+  extensions.largefiles=
+
   $ hg --config extensions.largefiles= clone -qU . ../lfclone
-  $ cat ../lfclone/.hg/hgrc
-  # example repository config (see 'hg help config' for more info)
-  [paths]
-  default = $TESTTMP/cloned
-  
-  # path aliases to other clones of this repo in URLs or filesystem paths
-  # (see 'hg help config.paths' for more info)
-  #
-  # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
-  # my-fork         = ssh://jdoe@example.net/hg/jdoes-fork
-  # my-clone        = /home/jdoe/jdoes-clone
-  
-  [ui]
-  # name and email (local to this repository, optional), e.g.
-  # username = Jane Doe <jdoe@example.com>
-  
-  [extensions]
-  largefiles=
+  $ grep largefiles ../lfclone/.hg/requires
+  largefiles
 
 Find an exact match to a standin (should archive nothing)
   $ hg --config extensions.largefiles= archive -S -I 'sub/sub2/.hglf/large.bin' ../archive_lf
--- a/tests/test-subrepo-missing.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-subrepo-missing.t	Mon Oct 22 14:46:06 2018 -0400
@@ -114,7 +114,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 5 changesets, 5 total revisions
+  checked 5 changesets with 5 changes to 2 files
   checking subrepo links
   subrepo 'subrepo' is hidden in revision a66de08943b6
   subrepo 'subrepo' is hidden in revision 674d05939c1e
@@ -128,7 +128,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 5 changesets, 5 total revisions
+  checked 5 changesets with 5 changes to 2 files
   checking subrepo links
   0: repository $TESTTMP/repo/subrepo not found
   1: repository $TESTTMP/repo/subrepo not found
--- a/tests/test-subrepo-svn.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-subrepo-svn.t	Mon Oct 22 14:46:06 2018 -0400
@@ -2,9 +2,9 @@
 
   $ SVNREPOPATH=`pwd`/svn-repo
 #if windows
-  $ SVNREPOURL=file:///`$PYTHON -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file:///`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
 #else
-  $ SVNREPOURL=file://`$PYTHON -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file://`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
 #endif
 
   $ filter_svn_output () {
@@ -247,7 +247,7 @@
 
 verify subrepo is contained within the repo directory
 
-  $ $PYTHON -c "import os.path; print os.path.exists('s')"
+  $ "$PYTHON" -c "from __future__ import print_function; import os.path; print(os.path.exists('s'))"
   True
 
 update to nullrev (must delete the subrepo)
--- a/tests/test-subrepo.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-subrepo.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1066,19 +1066,18 @@
   $ hg cat sub/repo/foo -Tjson | sed 's|\\\\|/|g'
   [
    {
-    "abspath": "foo",
     "data": "test\ntest\n",
-    "path": "sub/repo/foo"
+    "path": "foo"
    }
   ]
 
  non-exact match:
 
-  $ hg cat -T '{path}\n' 'glob:**'
+  $ hg cat -T '{path|relpath}\n' 'glob:**'
   .hgsub
   .hgsubstate
   sub/repo/foo
-  $ hg cat -T '{path}\n' 're:^sub'
+  $ hg cat -T '{path|relpath}\n' 're:^sub'
   sub/repo/foo
 
  missing subrepos in working directory:
--- a/tests/test-symlink-os-yes-fs-no.py.out	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-symlink-os-yes-fs-no.py.out	Mon Oct 22 14:46:06 2018 -0400
@@ -2,11 +2,11 @@
 adding manifests
 adding file changes
 added 1 changesets with 4 changes to 4 files
-new changesets d326ae2d01ee
+new changesets d326ae2d01ee (1 drafts)
 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
 adding changesets
 adding manifests
 adding file changes
 added 1 changesets with 4 changes to 4 files
-new changesets d326ae2d01ee
+new changesets d326ae2d01ee (1 drafts)
 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-tag.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-tag.t	Mon Oct 22 14:46:06 2018 -0400
@@ -230,7 +230,7 @@
 Issue601: hg tag doesn't do the right thing if .hgtags or localtags
 doesn't end with EOL
 
-  $ $PYTHON << EOF
+  $ "$PYTHON" << EOF
   > f = open('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
   > f = open('.hg/localtags', 'w'); f.write(last); f.close()
   > EOF
@@ -242,7 +242,7 @@
   c2899151f4e76890c602a2597a650a72666681bf localnewline
   
 
-  $ $PYTHON << EOF
+  $ "$PYTHON" << EOF
   > f = open('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
   > f = open('.hgtags', 'w'); f.write(last); f.close()
   > EOF
--- a/tests/test-template-functions.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-template-functions.t	Mon Oct 22 14:46:06 2018 -0400
@@ -696,6 +696,12 @@
   > '{pad(label(red, "red"), 5, label(cyan, "-"))}\n'
   \x1b[0;31mred\x1b[0m\x1b[0;36m-\x1b[0m\x1b[0;36m-\x1b[0m (esc)
 
+pad() with truncate has to strip color codes, though
+
+  $ hg debugtemplate --color=always \
+  > '{pad(label(red, "scarlet"), 5, truncate=true)}\n'
+  scarl
+
 label should be no-op if color is disabled:
 
   $ hg log --color=never -l 1 --template '{label(red, "text\n")}'
@@ -798,6 +804,8 @@
   e777603221
   bcc7ff960b
   f7769ec2ab
+  $ hg log --template '{shortest(node, 1)}\n' -r null
+  00
   $ hg log --template '{node|shortest}\n' -l1
   e777
 
@@ -892,6 +900,11 @@
   $ hg log -r 4 -T '{rev}:{shortest(node, 0)}\n' --hidden
   4:107
 
+  $ hg --config experimental.revisions.prefixhexnode=yes log -r 4 -T '{rev}:{shortest(node, 0)}\n'
+  4:x10
+  $ hg --config experimental.revisions.prefixhexnode=yes log -r 4 -T '{rev}:{shortest(node, 0)}\n' --hidden
+  4:x10
+
  node 'c562' should be unique if the other 'c562' nodes are hidden
  (but we don't try the slow path to filter out hidden nodes for now)
 
@@ -904,6 +917,55 @@
 
   $ cd ..
 
+Test prefixhexnode when the first character of the hash is 0.
+  $ hg init hashcollision2
+  $ cd hashcollision2
+  $ cat <<EOF >> .hg/hgrc
+  > [experimental]
+  > evolution.createmarkers=True
+  > EOF
+  $ echo 0 > a
+  $ hg ci -qAm 0
+  $ echo 21 > a
+  $ hg ci -qm 21
+  $ hg up -q null
+  $ hg log -r0: -T '{rev}:{node}\n'
+  0:b4e73ffab476aa0ee32ed81ca51e07169844bc6a
+  1:0cf177ba2b1dc3862a00fb81715fec90950201be
+
+ we need the 'x' prefix to ensure we aren't colliding with rev0. We identify
+ the collision with nullid if we aren't using disambiguatewithin, so we need to set
+ that as well.
+  $ hg --config experimental.revisions.disambiguatewithin='descendants(0)' \
+  >    --config experimental.revisions.prefixhexnode=yes \
+  >    log -r 1 -T '{rev}:{shortest(node, 0)}\n'
+  1:x0
+
+  $ hg debugobsolete 0cf177ba2b1dc3862a00fb81715fec90950201be
+  obsoleted 1 changesets
+  $ hg up -q 0
+  $ echo 61 > a
+  $ hg ci -m 61
+  $ hg log -r0: -T '{rev}:{node}\n'
+  0:b4e73ffab476aa0ee32ed81ca51e07169844bc6a
+  2:01384dde84b3a511ae0835f35ac40bd806c99bb8
+
+ we still have the 'x' prefix because '0' is still the shortest prefix, since
+ rev1's '0c' is hidden.
+  $ hg --config experimental.revisions.disambiguatewithin=0:-1-0 \
+  >    --config experimental.revisions.prefixhexnode=yes \
+  >    log -r 0:-1-0 -T '{rev}:{shortest(node, 0)}\n'
+  2:x0
+
+ we don't have the 'x' prefix on 2 because '01' is not a synonym for rev1.
+  $ hg --config experimental.revisions.disambiguatewithin=0:-1-0 \
+  >    --config experimental.revisions.prefixhexnode=yes \
+  >    log -r 0:-1-0 -T '{rev}:{shortest(node, 0)}\n' --hidden
+  1:0c
+  2:01
+
+  $ cd ..
+
 Test pad function
 
   $ cd r
@@ -923,6 +985,15 @@
   1------------------- {node|short}
   0------------------- test
 
+  $ hg log --template '{pad(author, 5, "-", False, True)}\n'
+  test-
+  {node
+  test-
+  $ hg log --template '{pad(author, 5, "-", True, True)}\n'
+  -test
+  hort}
+  -test
+
 Test template string in pad function
 
   $ hg log -r 0 -T '{pad("\{{rev}}", 10)} {author|user}\n'
@@ -1193,6 +1264,12 @@
   
   0
   
+
+  $ hg log -l1 -T "{files('aa') % '{file}\n'}"
+  aa
+  $ hg log -l1 -T "{files('aa') % '{path}\n'}"
+  aa
+
   $ hg rm a
   $ hg log -r "wdir()" -T "{rev}\n{join(files('*'), '\n')}\n"
   2147483647
@@ -1382,7 +1459,7 @@
 
   $ hg init nonascii
   $ cd nonascii
-  $ $PYTHON <<EOF
+  $ "$PYTHON" <<EOF
   > open('latin1', 'wb').write(b'\xe9')
   > open('utf-8', 'wb').write(b'\xc3\xa9')
   > EOF
--- a/tests/test-template-keywords.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-template-keywords.t	Mon Oct 22 14:46:06 2018 -0400
@@ -52,11 +52,34 @@
   $ hg log -r 'wdir()' -T '{rev}:{node}\n'
   2147483647:ffffffffffffffffffffffffffffffffffffffff
 
-Some keywords are invalid for working-directory revision, but they should
-never cause crash:
+  $ hg log -r 'wdir()' -Tjson --debug
+  [
+   {
+    "added": [],
+    "bookmarks": [],
+    "branch": "default",
+    "date": [0, 0],
+    "desc": "",
+    "extra": {"branch": "default"},
+    "manifest": "ffffffffffffffffffffffffffffffffffffffff",
+    "modified": [],
+    "node": "ffffffffffffffffffffffffffffffffffffffff",
+    "parents": ["95c24699272ef57d062b8bccc32c878bf841784a"],
+    "phase": "draft",
+    "removed": [],
+    "rev": 2147483647,
+    "tags": [],
+    "user": "test"
+   }
+  ]
 
   $ hg log -r 'wdir()' -T '{manifest}\n'
-  
+  2147483647:ffffffffffff
+
+Changectx-derived keywords are disabled within {manifest} as {node} changes:
+
+  $ hg log -r0 -T 'outer:{p1node} {manifest % "inner:{p1node}"}\n'
+  outer:0000000000000000000000000000000000000000 inner:
 
 Check that {phase} works correctly on parents:
 
@@ -91,7 +114,7 @@
   $ for key in author branch branches date desc file_adds file_dels file_mods \
   >         file_copies file_copies_switch files \
   >         manifest node parents rev tags diffstat extras \
-  >         p1rev p2rev p1node p2node; do
+  >         p1rev p2rev p1node p2node user; do
   >     for mode in '' --verbose --debug; do
   >         hg log $mode --template "$key$mode: {$key}\n"
   >     done
@@ -702,6 +725,33 @@
   p2node--debug: 0000000000000000000000000000000000000000
   p2node--debug: 0000000000000000000000000000000000000000
   p2node--debug: 0000000000000000000000000000000000000000
+  user: test
+  user: User Name <user@hostname>
+  user: person
+  user: person
+  user: person
+  user: person
+  user: other@place
+  user: A. N. Other <other@place>
+  user: User Name <user@hostname>
+  user--verbose: test
+  user--verbose: User Name <user@hostname>
+  user--verbose: person
+  user--verbose: person
+  user--verbose: person
+  user--verbose: person
+  user--verbose: other@place
+  user--verbose: A. N. Other <other@place>
+  user--verbose: User Name <user@hostname>
+  user--debug: test
+  user--debug: User Name <user@hostname>
+  user--debug: person
+  user--debug: person
+  user--debug: person
+  user--debug: person
+  user--debug: other@place
+  user--debug: A. N. Other <other@place>
+  user--debug: User Name <user@hostname>
 
 Add a dummy commit to make up for the instability of the above:
 
@@ -718,6 +768,64 @@
   $ hg rm a
   $ hg ci -m "Modify, add, remove, rename"
 
+Test files list:
+
+  $ hg log -l1 -T '{join(file_mods, " ")}\n'
+  third
+  $ hg log -l1 -T '{file_mods % "{file}\n"}'
+  third
+  $ hg log -l1 -T '{file_mods % "{path}\n"}'
+  third
+
+  $ hg log -l1 -T '{join(files, " ")}\n'
+  a b fifth fourth third
+  $ hg log -l1 -T '{files % "{file}\n"}'
+  a
+  b
+  fifth
+  fourth
+  third
+  $ hg log -l1 -T '{files % "{path}\n"}'
+  a
+  b
+  fifth
+  fourth
+  third
+
+Test file copies dict:
+
+  $ hg log -r8 -T '{join(file_copies, " ")}\n'
+  fourth (second)
+  $ hg log -r8 -T '{file_copies % "{name} <- {source}\n"}'
+  fourth <- second
+  $ hg log -r8 -T '{file_copies % "{path} <- {source}\n"}'
+  fourth <- second
+
+  $ hg log -r8 -T '{join(file_copies_switch, " ")}\n'
+  
+  $ hg log -r8 -C -T '{join(file_copies_switch, " ")}\n'
+  fourth (second)
+  $ hg log -r8 -C -T '{file_copies_switch % "{name} <- {source}\n"}'
+  fourth <- second
+  $ hg log -r8 -C -T '{file_copies_switch % "{path} <- {source}\n"}'
+  fourth <- second
+
+Test file attributes:
+
+  $ hg log -l1 -T '{files % "{status} {pad(size, 3, left=True)} {path}\n"}'
+  R     a
+  A   0 b
+  A   7 fifth
+  R     fourth
+  M  13 third
+
+Test file status including clean ones:
+
+  $ hg log -r9 -T '{files("**") % "{status} {path}\n"}'
+  A a
+  C fourth
+  C third
+
 Test index keyword:
 
   $ hg log -l 2 -T '{index + 10}{files % " {index}:{file}"}\n'
--- a/tests/test-template-map.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-template-map.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1756,5 +1756,5 @@
 
   $ hg -R latesttag log -r tip --style=style1989
   M|test
-  11,test
+  11,
   branch: test
--- a/tests/test-tools.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-tools.t	Mon Oct 22 14:46:06 2018 -0400
@@ -51,10 +51,10 @@
 #endif
 
 #if no-windows
-  $ $PYTHON $TESTDIR/seq.py 10 > bar
+  $ "$PYTHON" $TESTDIR/seq.py 10 > bar
 #else
 Convert CRLF -> LF for consistency
-  $ $PYTHON $TESTDIR/seq.py 10 | sed "s/$//" > bar
+  $ "$PYTHON" $TESTDIR/seq.py 10 | sed "s/$//" > bar
 #endif
 
 #if unix-permissions symlink
--- a/tests/test-transplant.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-transplant.t	Mon Oct 22 14:46:06 2018 -0400
@@ -758,7 +758,7 @@
   $ cd twin2
   $ echo '[patch]' >> .hg/hgrc
   $ echo 'eol = crlf' >> .hg/hgrc
-  $ $PYTHON -c "open('b', 'wb').write(b'b\r\nb\r\n')"
+  $ "$PYTHON" -c "open('b', 'wb').write(b'b\r\nb\r\n')"
   $ hg ci -Am addb
   adding b
   $ hg transplant -s ../twin1 tip
--- a/tests/test-treemanifest.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-treemanifest.t	Mon Oct 22 14:46:06 2018 -0400
@@ -1,6 +1,6 @@
   $ cat << EOF >> $HGRCPATH
   > [ui]
-  > ssh=$PYTHON "$TESTDIR/dummyssh"
+  > ssh="$PYTHON" "$TESTDIR/dummyssh"
   > EOF
 
 Set up repo
@@ -341,7 +341,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  new changesets 51cfd7b1e13b
+  new changesets 51cfd7b1e13b (1 drafts)
   (run 'hg update' to get a working copy)
   $ hg --config extensions.strip= strip tip
   saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/*-backup.hg (glob)
@@ -410,7 +410,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  10 files, 11 changesets, 15 total revisions
+  checked 11 changesets with 15 changes to 10 files
 
 Create deeper repo with tree manifests.
 
@@ -578,7 +578,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 4 changesets, 18 total revisions
+  checked 4 changesets with 18 changes to 8 files
 
 #if repofncache
 Dirlogs are included in fncache
@@ -636,7 +636,7 @@
    b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
    b/foo/apple/bees/flower.py@0: in changeset but not in manifest
   checking files
-  8 files, 4 changesets, 18 total revisions
+  checked 4 changesets with 18 changes to 8 files
   6 warnings encountered! (reporevlogstore !)
   9 integrity errors encountered!
   (first damaged changeset appears to be 0)
@@ -661,7 +661,7 @@
    (expected None)
   crosschecking files in changesets and manifests
   checking files
-  8 files, 4 changesets, 18 total revisions
+  checked 4 changesets with 18 changes to 8 files
   2 warnings encountered!
   8 integrity errors encountered!
   (first damaged changeset appears to be 2)
@@ -718,7 +718,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 4 changesets, 18 total revisions
+  checked 4 changesets with 18 changes to 8 files
   $ cd ..
 
 #if reporevlogstore
@@ -766,7 +766,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 4 changesets, 18 total revisions
+  checked 4 changesets with 18 changes to 8 files
 
 Local clone with encodedstore
   $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
@@ -776,7 +776,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 4 changesets, 18 total revisions
+  checked 4 changesets with 18 changes to 8 files
 
 Local clone with fncachestore
   $ hg clone -U deeprepo local-clone-fncachestore
@@ -786,55 +786,49 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 4 changesets, 18 total revisions
+  checked 4 changesets with 18 changes to 8 files
 
 Stream clone with basicstore
   $ hg clone --config experimental.changegroup3=True --stream -U \
   >   http://localhost:$HGPORT1 stream-clone-basicstore
   streaming all changes
-  18 files to transfer, * of data (glob)
+  21 files to transfer, * of data (glob)
   transferred * in * seconds (*) (glob)
-  searching for changes
-  no changes found
   $ hg -R stream-clone-basicstore verify
   checking changesets
   checking manifests
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 4 changesets, 18 total revisions
+  checked 4 changesets with 18 changes to 8 files
 
 Stream clone with encodedstore
   $ hg clone --config experimental.changegroup3=True --stream -U \
   >   http://localhost:$HGPORT2 stream-clone-encodedstore
   streaming all changes
-  18 files to transfer, * of data (glob)
+  21 files to transfer, * of data (glob)
   transferred * in * seconds (*) (glob)
-  searching for changes
-  no changes found
   $ hg -R stream-clone-encodedstore verify
   checking changesets
   checking manifests
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 4 changesets, 18 total revisions
+  checked 4 changesets with 18 changes to 8 files
 
 Stream clone with fncachestore
   $ hg clone --config experimental.changegroup3=True --stream -U \
   >   http://localhost:$HGPORT stream-clone-fncachestore
   streaming all changes
-  18 files to transfer, * of data (glob)
+  22 files to transfer, * of data (glob)
   transferred * in * seconds (*) (glob)
-  searching for changes
-  no changes found
   $ hg -R stream-clone-fncachestore verify
   checking changesets
   checking manifests
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 4 changesets, 18 total revisions
+  checked 4 changesets with 18 changes to 8 files
 
 Packed bundle
   $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
--- a/tests/test-unionrepo.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-unionrepo.t	Mon Oct 22 14:46:06 2018 -0400
@@ -128,7 +128,7 @@
   adding manifests
   adding file changes
   added 6 changesets with 11 changes to 6 files (+1 heads)
-  new changesets f093fec0529b:2f0d178c469c
+  new changesets f093fec0529b:2f0d178c469c (6 drafts)
 
   $ hg -R repo3 paths
   default = union:repo1+repo2
@@ -138,7 +138,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  6 files, 6 changesets, 11 total revisions
+  checked 6 changesets with 11 changes to 6 files
 
   $ hg -R repo3 heads --template '{rev}:{node|short}  {desc|firstline}\n'
   5:2f0d178c469c  repo2-3
--- a/tests/test-update-names.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-update-names.t	Mon Oct 22 14:46:06 2018 -0400
@@ -50,7 +50,8 @@
   $ hg st
   ? name/file
   $ hg up 1
-  abort: Directory not empty: '$TESTTMP/r1/r2/name'
+  abort: Unlinking directory not permitted: *$TESTTMP/r1/r2/name* (glob) (windows !)
+  abort: Directory not empty: '?\$TESTTMP/r1/r2/name'? (re) (no-windows !)
   [255]
   $ cat name/file
   text
--- a/tests/test-upgrade-repo.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-upgrade-repo.t	Mon Oct 22 14:46:06 2018 -0400
@@ -345,12 +345,12 @@
   creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
   (it is safe to interrupt this process any time before data migration completes)
   migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
-  migrating 341 bytes in store; 401 bytes tracked data
-  migrating 3 filelogs containing 3 revisions (0 bytes in store; 0 bytes tracked data)
+  migrating 917 bytes in store; 401 bytes tracked data
+  migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
   finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
-  migrating 1 manifests containing 3 revisions (157 bytes in store; 220 bytes tracked data)
+  migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
   finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
-  migrating changelog containing 3 revisions (184 bytes in store; 181 bytes tracked data)
+  migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
   finished migrating 3 changelog revisions; change in size: 0 bytes
   finished migrating 9 total revisions; total change in store size: 0 bytes
   copying phaseroots
@@ -406,7 +406,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 3 changesets, 3 total revisions
+  checked 3 changesets with 3 changes to 3 files
 
 old store should be backed up
 
@@ -442,12 +442,12 @@
   creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
   (it is safe to interrupt this process any time before data migration completes)
   migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
-  migrating 109 bytes in store; 107 bytes tracked data
-  migrating 1 filelogs containing 1 revisions (0 bytes in store; 0 bytes tracked data)
+  migrating 301 bytes in store; 107 bytes tracked data
+  migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
   finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
-  migrating 1 manifests containing 1 revisions (46 bytes in store; 45 bytes tracked data)
+  migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
   finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
-  migrating changelog containing 1 revisions (63 bytes in store; 62 bytes tracked data)
+  migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
   finished migrating 1 changelog revisions; change in size: 0 bytes
   finished migrating 3 total revisions; total change in store size: 0 bytes
   copying .XX_special_filename
@@ -476,12 +476,12 @@
   creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
   (it is safe to interrupt this process any time before data migration completes)
   migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
-  migrating 109 bytes in store; 107 bytes tracked data
-  migrating 1 filelogs containing 1 revisions (0 bytes in store; 0 bytes tracked data)
+  migrating 301 bytes in store; 107 bytes tracked data
+  migrating 1 filelogs containing 1 revisions (64 bytes in store; 0 bytes tracked data)
   finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
-  migrating 1 manifests containing 1 revisions (46 bytes in store; 45 bytes tracked data)
+  migrating 1 manifests containing 1 revisions (110 bytes in store; 45 bytes tracked data)
   finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
-  migrating changelog containing 1 revisions (63 bytes in store; 62 bytes tracked data)
+  migrating changelog containing 1 revisions (127 bytes in store; 62 bytes tracked data)
   finished migrating 1 changelog revisions; change in size: 0 bytes
   finished migrating 3 total revisions; total change in store size: 0 bytes
   copying .XX_special_filename
@@ -531,12 +531,12 @@
   creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
   (it is safe to interrupt this process any time before data migration completes)
   migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
-  migrating 163 bytes in store; 160 bytes tracked data
-  migrating 1 filelogs containing 1 revisions (42 bytes in store; 41 bytes tracked data)
+  migrating 355 bytes in store; 160 bytes tracked data
+  migrating 1 filelogs containing 1 revisions (106 bytes in store; 41 bytes tracked data)
   finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
-  migrating 1 manifests containing 1 revisions (52 bytes in store; 51 bytes tracked data)
+  migrating 1 manifests containing 1 revisions (116 bytes in store; 51 bytes tracked data)
   finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
-  migrating changelog containing 1 revisions (69 bytes in store; 68 bytes tracked data)
+  migrating changelog containing 1 revisions (133 bytes in store; 68 bytes tracked data)
   finished migrating 1 changelog revisions; change in size: 0 bytes
   finished migrating 3 total revisions; total change in store size: 0 bytes
   copying phaseroots
@@ -583,12 +583,12 @@
   creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
   (it is safe to interrupt this process any time before data migration completes)
   migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
-  migrating 417 bytes in store; 467 bytes tracked data
-  migrating 2 filelogs containing 2 revisions (168 bytes in store; 182 bytes tracked data)
+  migrating 801 bytes in store; 467 bytes tracked data
+  migrating 2 filelogs containing 2 revisions (296 bytes in store; 182 bytes tracked data)
   finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
-  migrating 1 manifests containing 2 revisions (113 bytes in store; 151 bytes tracked data)
+  migrating 1 manifests containing 2 revisions (241 bytes in store; 151 bytes tracked data)
   finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
-  migrating changelog containing 2 revisions (136 bytes in store; 134 bytes tracked data)
+  migrating changelog containing 2 revisions (264 bytes in store; 134 bytes tracked data)
   finished migrating 2 changelog revisions; change in size: 0 bytes
   finished migrating 6 total revisions; total change in store size: 0 bytes
   copying phaseroots
@@ -613,7 +613,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  2 files, 2 changesets, 2 total revisions
+  checked 2 changesets with 2 changes to 2 files
   $ hg debugdata lfs.bin 0
   version https://git-lfs.github.com/spec/v1
   oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
@@ -681,12 +681,12 @@
   creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
   (it is safe to interrupt this process any time before data migration completes)
   migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
-  migrating 497 bytes in store; 882 bytes tracked data
-  migrating 1 filelogs containing 3 revisions (182 bytes in store; 573 bytes tracked data)
+  migrating 1.05 KB in store; 882 bytes tracked data
+  migrating 1 filelogs containing 3 revisions (374 bytes in store; 573 bytes tracked data)
   finished migrating 3 filelog revisions across 1 filelogs; change in size: -63 bytes
-  migrating 1 manifests containing 3 revisions (141 bytes in store; 138 bytes tracked data)
+  migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
   finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
-  migrating changelog containing 3 revisions (174 bytes in store; 171 bytes tracked data)
+  migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
   finished migrating 3 changelog revisions; change in size: 0 bytes
   finished migrating 9 total revisions; total change in store size: -63 bytes
   copying phaseroots
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-util.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,137 @@
+# unit tests for mercuril.util utilities
+from __future__ import absolute_import
+
+import contextlib
+import itertools
+import unittest
+
+from mercurial import pycompat, util, utils
+
+@contextlib.contextmanager
+def mocktimer(incr=0.1, *additional_targets):
+    """Replaces util.timer and additional_targets with a mock
+
+    The timer starts at 0. On each call the time incremented by the value
+    of incr. If incr is an iterable, then the time is incremented by the
+    next value from that iterable, looping in a cycle when reaching the end.
+
+    additional_targets must be a sequence of (object, attribute_name) tuples;
+    the mock is set with setattr(object, attribute_name, mock).
+
+    """
+    time = [0]
+    try:
+        incr = itertools.cycle(incr)
+    except TypeError:
+        incr = itertools.repeat(incr)
+
+    def timer():
+        time[0] += next(incr)
+        return time[0]
+
+    # record original values
+    orig = util.timer
+    additional_origs = [(o, a, getattr(o, a)) for o, a in additional_targets]
+
+    # mock out targets
+    util.timer = timer
+    for obj, attr in additional_targets:
+        setattr(obj, attr, timer)
+
+    try:
+        yield
+    finally:
+        # restore originals
+        util.timer = orig
+        for args in additional_origs:
+            setattr(*args)
+
+# attr.s default factory for util.timedstats.start binds the timer we
+# need to mock out.
+_start_default = (util.timedcmstats.start.default, 'factory')
+
+@contextlib.contextmanager
+def capturestderr():
+    """Replace utils.procutil.stderr with a pycompat.bytesio instance
+
+    The instance is made available as the return value of __enter__.
+
+    This contextmanager is reentrant.
+
+    """
+    orig = utils.procutil.stderr
+    utils.procutil.stderr = pycompat.bytesio()
+    try:
+        yield utils.procutil.stderr
+    finally:
+        utils.procutil.stderr = orig
+
+class timedtests(unittest.TestCase):
+    def testtimedcmstatsstr(self):
+        stats = util.timedcmstats()
+        self.assertEqual(str(stats), '<unknown>')
+        self.assertEqual(bytes(stats), b'<unknown>')
+        stats.elapsed = 12.34
+        self.assertEqual(str(stats), pycompat.sysstr(util.timecount(12.34)))
+        self.assertEqual(bytes(stats), util.timecount(12.34))
+
+    def testtimedcmcleanexit(self):
+        # timestamps 1, 4, elapsed time of 4 - 1 = 3
+        with mocktimer([1, 3], _start_default):
+            with util.timedcm('pass') as stats:
+                # actual context doesn't matter
+                pass
+
+        self.assertEqual(stats.start, 1)
+        self.assertEqual(stats.elapsed, 3)
+        self.assertEqual(stats.level, 1)
+
+    def testtimedcmnested(self):
+        # timestamps 1, 3, 6, 10, elapsed times of 6 - 3 = 3 and 10 - 1 = 9
+        with mocktimer([1, 2, 3, 4], _start_default):
+            with util.timedcm('outer') as outer_stats:
+                with util.timedcm('inner') as inner_stats:
+                    # actual context doesn't matter
+                    pass
+
+        self.assertEqual(outer_stats.start, 1)
+        self.assertEqual(outer_stats.elapsed, 9)
+        self.assertEqual(outer_stats.level, 1)
+
+        self.assertEqual(inner_stats.start, 3)
+        self.assertEqual(inner_stats.elapsed, 3)
+        self.assertEqual(inner_stats.level, 2)
+
+    def testtimedcmexception(self):
+        # timestamps 1, 4, elapsed time of 4 - 1 = 3
+        with mocktimer([1, 3], _start_default):
+            try:
+                with util.timedcm('exceptional') as stats:
+                    raise ValueError()
+            except ValueError:
+                pass
+
+        self.assertEqual(stats.start, 1)
+        self.assertEqual(stats.elapsed, 3)
+        self.assertEqual(stats.level, 1)
+
+    def testtimeddecorator(self):
+        @util.timed
+        def testfunc(callcount=1):
+            callcount -= 1
+            if callcount:
+                testfunc(callcount)
+
+        # timestamps 1, 2, 3, 4, elapsed time of 3 - 2 = 1 and 4 - 1 = 3
+        with mocktimer(1, _start_default):
+            with capturestderr() as out:
+                testfunc(2)
+
+        self.assertEqual(out.getvalue(), (
+            b'    testfunc: 1.000 s\n'
+            b'  testfunc: 3.000 s\n'
+        ))
+
+if __name__ == '__main__':
+    import silenttestrunner
+    silenttestrunner.main(__name__)
--- a/tests/test-verify.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-verify.t	Mon Oct 22 14:46:06 2018 -0400
@@ -20,7 +20,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 1 changesets, 3 total revisions
+  checked 1 changesets with 3 changes to 3 files
 
 verify with journal
 
@@ -31,7 +31,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  3 files, 1 changesets, 3 total revisions
+  checked 1 changesets with 3 changes to 3 files
   $ rm .hg/store/journal
 
 introduce some bugs in repo
@@ -55,7 +55,7 @@
    warning: revlog 'data/bar.txt.i' not in fncache!
    0: empty or missing bar.txt
    bar.txt@0: manifest refers to unknown revision 256559129457
-  3 files, 1 changesets, 0 total revisions
+  checked 1 changesets with 0 changes to 3 files
   3 warnings encountered!
   hint: run "hg debugrebuildfncache" to recover from corrupt fncache
   6 integrity errors encountered!
@@ -280,7 +280,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  0 files, 1 changesets, 0 total revisions
+  checked 1 changesets with 0 changes to 0 files
 
 test revlog corruption
 
@@ -299,7 +299,7 @@
   checking files
    a@1: broken revlog! (index data/a.i is corrupted)
   warning: orphan data file 'data/a.i'
-  1 files, 2 changesets, 0 total revisions
+  checked 2 changesets with 0 changes to 1 files
   1 warnings encountered!
   1 integrity errors encountered!
   (first damaged changeset appears to be 1)
@@ -317,7 +317,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   $ cd ..
 
 test flag processor and skipflags
@@ -335,7 +335,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
 
   $ cat >> $TESTTMP/break-base64.py <<EOF
   > from __future__ import absolute_import
@@ -352,7 +352,7 @@
   crosschecking files in changesets and manifests
   checking files
    base64@0: unpacking 794cee7777cb: integrity check failed on data/base64.i:0
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
   1 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
@@ -361,5 +361,5 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-  1 files, 1 changesets, 1 total revisions
+  checked 1 changesets with 1 changes to 1 files
 
--- a/tests/test-walk.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-walk.t	Mon Oct 22 14:46:06 2018 -0400
@@ -143,25 +143,25 @@
 
   $ hg debugwalk -v 'rootfilesin:'
   * matcher:
-  <patternmatcher patterns='(?:[^/]+$)'>
+  <patternmatcher patterns="rootfilesin: ['.']">
   f  fennel      ../fennel
   f  fenugreek   ../fenugreek
   f  fiddlehead  ../fiddlehead
   $ hg debugwalk -v -I 'rootfilesin:'
   * matcher:
-  <includematcher includes='(?:[^/]+$)'>
+  <includematcher includes="rootfilesin: ['.']">
   f  fennel      ../fennel
   f  fenugreek   ../fenugreek
   f  fiddlehead  ../fiddlehead
   $ hg debugwalk -v 'rootfilesin:.'
   * matcher:
-  <patternmatcher patterns='(?:[^/]+$)'>
+  <patternmatcher patterns="rootfilesin: ['.']">
   f  fennel      ../fennel
   f  fenugreek   ../fenugreek
   f  fiddlehead  ../fiddlehead
   $ hg debugwalk -v -I 'rootfilesin:.'
   * matcher:
-  <includematcher includes='(?:[^/]+$)'>
+  <includematcher includes="rootfilesin: ['.']">
   f  fennel      ../fennel
   f  fenugreek   ../fenugreek
   f  fiddlehead  ../fiddlehead
@@ -169,7 +169,7 @@
   * matcher:
   <differencematcher
     m1=<alwaysmatcher>,
-    m2=<includematcher includes='(?:[^/]+$)'>>
+    m2=<includematcher includes="rootfilesin: ['.']">>
   f  beans/black                     ../beans/black
   f  beans/borlotti                  ../beans/borlotti
   f  beans/kidney                    ../beans/kidney
@@ -182,19 +182,19 @@
   f  mammals/skunk                   skunk
   $ hg debugwalk -v 'rootfilesin:fennel'
   * matcher:
-  <patternmatcher patterns='(?:fennel/[^/]+$)'>
+  <patternmatcher patterns="rootfilesin: ['fennel']">
   $ hg debugwalk -v -I 'rootfilesin:fennel'
   * matcher:
-  <includematcher includes='(?:fennel/[^/]+$)'>
+  <includematcher includes="rootfilesin: ['fennel']">
   $ hg debugwalk -v 'rootfilesin:skunk'
   * matcher:
-  <patternmatcher patterns='(?:skunk/[^/]+$)'>
+  <patternmatcher patterns="rootfilesin: ['skunk']">
   $ hg debugwalk -v -I 'rootfilesin:skunk'
   * matcher:
-  <includematcher includes='(?:skunk/[^/]+$)'>
+  <includematcher includes="rootfilesin: ['skunk']">
   $ hg debugwalk -v 'rootfilesin:beans'
   * matcher:
-  <patternmatcher patterns='(?:beans/[^/]+$)'>
+  <patternmatcher patterns="rootfilesin: ['beans']">
   f  beans/black     ../beans/black
   f  beans/borlotti  ../beans/borlotti
   f  beans/kidney    ../beans/kidney
@@ -203,7 +203,7 @@
   f  beans/turtle    ../beans/turtle
   $ hg debugwalk -v -I 'rootfilesin:beans'
   * matcher:
-  <includematcher includes='(?:beans/[^/]+$)'>
+  <includematcher includes="rootfilesin: ['beans']">
   f  beans/black     ../beans/black
   f  beans/borlotti  ../beans/borlotti
   f  beans/kidney    ../beans/kidney
@@ -212,25 +212,25 @@
   f  beans/turtle    ../beans/turtle
   $ hg debugwalk -v 'rootfilesin:mammals'
   * matcher:
-  <patternmatcher patterns='(?:mammals/[^/]+$)'>
+  <patternmatcher patterns="rootfilesin: ['mammals']">
   f  mammals/skunk  skunk
   $ hg debugwalk -v -I 'rootfilesin:mammals'
   * matcher:
-  <includematcher includes='(?:mammals/[^/]+$)'>
+  <includematcher includes="rootfilesin: ['mammals']">
   f  mammals/skunk  skunk
   $ hg debugwalk -v 'rootfilesin:mammals/'
   * matcher:
-  <patternmatcher patterns='(?:mammals/[^/]+$)'>
+  <patternmatcher patterns="rootfilesin: ['mammals']">
   f  mammals/skunk  skunk
   $ hg debugwalk -v -I 'rootfilesin:mammals/'
   * matcher:
-  <includematcher includes='(?:mammals/[^/]+$)'>
+  <includematcher includes="rootfilesin: ['mammals']">
   f  mammals/skunk  skunk
   $ hg debugwalk -v -X 'rootfilesin:mammals'
   * matcher:
   <differencematcher
     m1=<alwaysmatcher>,
-    m2=<includematcher includes='(?:mammals/[^/]+$)'>>
+    m2=<includematcher includes="rootfilesin: ['mammals']">>
   f  beans/black                     ../beans/black
   f  beans/borlotti                  ../beans/borlotti
   f  beans/kidney                    ../beans/kidney
@@ -603,13 +603,13 @@
 
 Test listfile and listfile0
 
-  $ $PYTHON -c "open('listfile0', 'wb').write(b'fenugreek\0new\0')"
+  $ "$PYTHON" -c "open('listfile0', 'wb').write(b'fenugreek\0new\0')"
   $ hg debugwalk -v -I 'listfile0:listfile0'
   * matcher:
   <includematcher includes='(?:fenugreek(?:/|$)|new(?:/|$))'>
   f  fenugreek  fenugreek
   f  new        new
-  $ $PYTHON -c "open('listfile', 'wb').write(b'fenugreek\nnew\r\nmammals/skunk\n')"
+  $ "$PYTHON" -c "open('listfile', 'wb').write(b'fenugreek\nnew\r\nmammals/skunk\n')"
   $ hg debugwalk -v -I 'listfile:listfile'
   * matcher:
   <includematcher includes='(?:fenugreek(?:/|$)|new(?:/|$)|mammals/skunk(?:/|$))'>
@@ -644,7 +644,7 @@
   > for i in range(20000 // 100):
   >   print('x' * 100)
   > EOF
-  $ $PYTHON printnum.py >> overflow.list
+  $ "$PYTHON" printnum.py >> overflow.list
   $ echo fenugreek >> overflow.list
   $ hg debugwalk 'listfile:overflow.list' 2>&1 | egrep -v '^xxx'
   f  fennel     fennel     exact
--- a/tests/test-win32text.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-win32text.t	Mon Oct 22 14:46:06 2018 -0400
@@ -28,7 +28,7 @@
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cp .hg/hgrc ../zoz/.hg
-  $ $PYTHON unix2dos.py f
+  $ "$PYTHON" unix2dos.py f
 
 commit should fail
 
@@ -102,7 +102,7 @@
 
   $ mkdir d
   $ echo hello > d/f2
-  $ $PYTHON unix2dos.py d/f2
+  $ "$PYTHON" unix2dos.py d/f2
   $ hg add d/f2
   $ hg ci -m 3
   attempt to commit or push text file(s) using CRLF line endings
@@ -118,7 +118,7 @@
   $ hg rem f
   $ hg ci -m 4
 
-  $ $PYTHON -c 'open("bin", "wb").write(b"hello\x00\x0D\x0A")'
+  $ "$PYTHON" -c 'open("bin", "wb").write(b"hello\x00\x0D\x0A")'
   $ hg add bin
   $ hg ci -m 5
   $ hg log -v
@@ -181,7 +181,7 @@
   adding dupe/b
   adding dupe/c
   adding dupe/d
-  $ $PYTHON unix2dos.py dupe/b dupe/c dupe/d
+  $ "$PYTHON" unix2dos.py dupe/b dupe/c dupe/d
   $ hg -R dupe ci -m a dupe/a
   $ hg -R dupe ci -m b/c dupe/[bc]
   $ hg -R dupe ci -m d dupe/d
@@ -342,7 +342,7 @@
   
   $ rm .hg/hgrc
   $ (echo some; echo text) > f3
-  $ $PYTHON -c 'open("f4.bat", "wb").write(b"rem empty\x0D\x0A")'
+  $ "$PYTHON" -c 'open("f4.bat", "wb").write(b"rem empty\x0D\x0A")'
   $ hg add f3 f4.bat
   $ hg ci -m 6
   $ cat bin
@@ -395,7 +395,7 @@
   $ cat f4.bat
   rem empty\r (esc)
 
-  $ $PYTHON -c 'open("f5.sh", "wb").write(b"# empty\x0D\x0A")'
+  $ "$PYTHON" -c 'open("f5.sh", "wb").write(b"# empty\x0D\x0A")'
   $ hg add f5.sh
   $ hg ci -m 7
   $ cat f5.sh
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-wireproto-caching.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,462 @@
+  $ . $TESTDIR/wireprotohelpers.sh
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > blackbox =
+  > [blackbox]
+  > track = simplecache
+  > EOF
+  $ hg init server
+  $ enablehttpv2 server
+  $ cd server
+  $ cat >> .hg/hgrc << EOF
+  > [extensions]
+  > simplecache = $TESTDIR/wireprotosimplecache.py
+  > EOF
+
+  $ echo a0 > a
+  $ echo b0 > b
+  $ hg -q commit -A -m 'commit 0'
+  $ echo a1 > a
+  $ hg commit -m 'commit 1'
+  $ echo b1 > b
+  $ hg commit -m 'commit 2'
+  $ echo a2 > a
+  $ echo b2 > b
+  $ hg commit -m 'commit 3'
+
+  $ hg log -G -T '{rev}:{node} {desc}'
+  @  3:50590a86f3ff5d1e9a1624a7a6957884565cc8e8 commit 3
+  |
+  o  2:4d01eda50c6ac5f7e89cbe1880143a32f559c302 commit 2
+  |
+  o  1:4432d83626e8a98655f062ec1f2a43b07f7fbbb0 commit 1
+  |
+  o  0:3390ef850073fbc2f0dfff2244342c8e9229013a commit 0
+  
+
+  $ hg --debug debugindex -m
+     rev linkrev nodeid                                   p1                                       p2
+       0       0 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+       1       1 a988fb43583e871d1ed5750ee074c6d840bbbfc8 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000
+       2       2 a8853dafacfca6fc807055a660d8b835141a3bb4 a988fb43583e871d1ed5750ee074c6d840bbbfc8 0000000000000000000000000000000000000000
+       3       3 3fe11dfbb13645782b0addafbe75a87c210ffddc a8853dafacfca6fc807055a660d8b835141a3bb4 0000000000000000000000000000000000000000
+
+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+Performing the same request should result in same result, with 2nd response
+coming from cache.
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
+  >     tree eval:b''
+  >     fields eval:[b'parents']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
+  >     tree eval:b''
+  >     fields eval:[b'parents']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+Sending different request doesn't yield cache hit.
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41', b'\xa9\x88\xfb\x43\x58\x3e\x87\x1d\x1e\xd5\x75\x0e\xe0\x74\xc6\xd8\x40\xbb\xbf\xc8']
+  >     tree eval:b''
+  >     fields eval:[b'parents']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 2
+    },
+    {
+      b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    {
+      b'node': b'\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8',
+      b'parents': [
+        b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+  $ cat .hg/blackbox.log
+  *> cacher constructed for manifestdata (glob)
+  *> cache miss for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
+  *> storing cache entry for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
+  *> cacher constructed for manifestdata (glob)
+  *> cache hit for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
+  *> cacher constructed for manifestdata (glob)
+  *> cache miss for 37326a83e9843f15161fce9d1e92d06b795d5e8e (glob)
+  *> storing cache entry for 37326a83e9843f15161fce9d1e92d06b795d5e8e (glob)
+
+  $ cat error.log
+
+  $ killdaemons.py
+  $ rm .hg/blackbox.log
+
+Try with object caching mode
+
+  $ cat >> .hg/hgrc << EOF
+  > [simplecache]
+  > cacheobjects = true
+  > EOF
+
+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
+  >     tree eval:b''
+  >     fields eval:[b'parents']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
+  >     tree eval:b''
+  >     fields eval:[b'parents']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+  $ cat .hg/blackbox.log
+  *> cacher constructed for manifestdata (glob)
+  *> cache miss for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
+  *> storing cache entry for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
+  *> cacher constructed for manifestdata (glob)
+  *> cache hit for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
+
+  $ cat error.log
+
+  $ killdaemons.py
+  $ rm .hg/blackbox.log
+
+A non-cacheable command does not instantiate cacher
+
+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+  $ sendhttpv2peer << EOF
+  > command capabilities
+  > EOF
+  creating http peer for wire protocol version 2
+  sending capabilities command
+  response: gen[
+    {
+      b'commands': {
+        b'branchmap': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'capabilities': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'changesetdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'bookmarks',
+                b'parents',
+                b'phase',
+                b'revision'
+              ])
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filedata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'path': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filesdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'firstchangeset',
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'dict'
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 50000
+        },
+        b'heads': {
+          b'args': {
+            b'publiconly': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'known': {
+          b'args': {
+            b'nodes': {
+              b'default': [],
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'listkeys': {
+          b'args': {
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'lookup': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'manifestdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'tree': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 100000
+        },
+        b'pushkey': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'new': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'old': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'push'
+          ]
+        },
+        b'rawstorefiledata': {
+          b'args': {
+            b'files': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        }
+      },
+      b'framingmediatypes': [
+        b'application/mercurial-exp-framing-0006'
+      ],
+      b'pathfilterprefixes': set([
+        b'path:',
+        b'rootfilesin:'
+      ]),
+      b'rawrepoformats': [
+        b'generaldelta',
+        b'revlogv1'
+      ]
+    }
+  ]
+
+  $ test -f .hg/blackbox.log
+  [1]
+
+An error is not cached
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa']
+  >     tree eval:b''
+  >     fields eval:[b'parents']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  abort: unknown node: \xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa! (esc)
+  [255]
+
+  $ cat .hg/blackbox.log
+  *> cacher constructed for manifestdata (glob)
+  *> cache miss for 2cba2a7d0d1575fea2fe68f597e97a7c2ac2f705 (glob)
+  *> cacher exiting due to error (glob)
+
+  $ killdaemons.py
+  $ rm .hg/blackbox.log
--- a/tests/test-wireproto-clientreactor.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-wireproto-clientreactor.py	Mon Oct 22 14:46:06 2018 -0400
@@ -1,14 +1,27 @@
 from __future__ import absolute_import
 
 import unittest
+import zlib
 
 from mercurial import (
     error,
+    ui as uimod,
     wireprotoframing as framing,
 )
+from mercurial.utils import (
+    cborutil,
+)
+
+try:
+    from mercurial import zstd
+    zstd.__version__
+except ImportError:
+    zstd = None
 
 ffs = framing.makeframefromhumanstring
 
+globalui = uimod.ui()
+
 def sendframe(reactor, frame):
     """Send a frame bytearray to a reactor."""
     header = framing.parseheader(frame)
@@ -32,7 +45,9 @@
             unittest.TestCase.assertRaisesRegexp)
 
     def testbasic(self):
-        reactor = framing.clientreactor(hasmultiplesend=False, buffersends=True)
+        reactor = framing.clientreactor(globalui,
+                                        hasmultiplesend=False,
+                                        buffersends=True)
 
         request, action, meta = reactor.callcommand(b'foo', {})
         self.assertEqual(request.state, b'pending')
@@ -57,7 +72,9 @@
 class NoBufferTests(unittest.TestCase):
     """A reactor without send buffering sends requests immediately."""
     def testbasic(self):
-        reactor = framing.clientreactor(hasmultiplesend=True, buffersends=False)
+        reactor = framing.clientreactor(globalui,
+                                        hasmultiplesend=True,
+                                        buffersends=False)
 
         request, action, meta = reactor.callcommand(b'command1', {})
         self.assertEqual(request.requestid, 1)
@@ -91,7 +108,7 @@
             unittest.TestCase.assertRaisesRegexp)
 
     def testoddstream(self):
-        reactor = framing.clientreactor()
+        reactor = framing.clientreactor(globalui)
 
         action, meta = sendframe(reactor, ffs(b'1 1 0 1 0 foo'))
         self.assertEqual(action, b'error')
@@ -99,7 +116,7 @@
                          b'received frame with odd numbered stream ID: 1')
 
     def testunknownstream(self):
-        reactor = framing.clientreactor()
+        reactor = framing.clientreactor(globalui)
 
         action, meta = sendframe(reactor, ffs(b'1 0 0 1 0 foo'))
         self.assertEqual(action, b'error')
@@ -108,7 +125,7 @@
                          b'of stream flag set')
 
     def testunhandledframetype(self):
-        reactor = framing.clientreactor(buffersends=False)
+        reactor = framing.clientreactor(globalui, buffersends=False)
 
         request, action, meta = reactor.callcommand(b'foo', {})
         for frame in meta[b'framegen']:
@@ -120,7 +137,7 @@
 
 class StreamTests(unittest.TestCase):
     def testmultipleresponseframes(self):
-        reactor = framing.clientreactor(buffersends=False)
+        reactor = framing.clientreactor(globalui, buffersends=False)
 
         request, action, meta = reactor.callcommand(b'foo', {})
 
@@ -139,6 +156,449 @@
             ffs(b'%d 0 0 command-response eos bar' % request.requestid))
         self.assertEqual(action, b'responsedata')
 
+class RedirectTests(unittest.TestCase):
+    def testredirect(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        redirect = {
+            b'targets': [b'a', b'b'],
+            b'hashes': [b'sha256'],
+        }
+
+        request, action, meta = reactor.callcommand(
+            b'foo', {}, redirect=redirect)
+
+        self.assertEqual(action, b'sendframes')
+
+        frames = list(meta[b'framegen'])
+        self.assertEqual(len(frames), 1)
+
+        self.assertEqual(frames[0],
+                         ffs(b'1 1 stream-begin command-request new '
+                             b"cbor:{b'name': b'foo', "
+                             b"b'redirect': {b'targets': [b'a', b'b'], "
+                             b"b'hashes': [b'sha256']}}"))
+
+class StreamSettingsTests(unittest.TestCase):
+    def testnoflags(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        action, meta = sendframe(reactor,
+            ffs(b'1 2 stream-begin stream-settings 0 '))
+
+        self.assertEqual(action, b'error')
+        self.assertEqual(meta, {
+            b'message': b'stream encoding settings frame must have '
+                        b'continuation or end of stream flag set',
+        })
+
+    def testconflictflags(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        action, meta = sendframe(reactor,
+            ffs(b'1 2 stream-begin stream-settings continuation|eos '))
+
+        self.assertEqual(action, b'error')
+        self.assertEqual(meta, {
+            b'message': b'stream encoding settings frame cannot have both '
+                        b'continuation and end of stream flags set',
+        })
+
+    def testemptypayload(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        action, meta = sendframe(reactor,
+            ffs(b'1 2 stream-begin stream-settings eos '))
+
+        self.assertEqual(action, b'error')
+        self.assertEqual(meta, {
+            b'message': b'stream encoding settings frame did not contain '
+                        b'CBOR data'
+        })
+
+    def testbadcbor(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        action, meta = sendframe(reactor,
+            ffs(b'1 2 stream-begin stream-settings eos badvalue'))
+
+        self.assertEqual(action, b'error')
+
+    def testsingleobject(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        action, meta = sendframe(reactor,
+            ffs(b'1 2 stream-begin stream-settings eos cbor:b"identity"'))
+
+        self.assertEqual(action, b'noop')
+        self.assertEqual(meta, {})
+
+    def testmultipleobjects(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        data = b''.join([
+            b''.join(cborutil.streamencode(b'identity')),
+            b''.join(cborutil.streamencode({b'foo', b'bar'})),
+        ])
+
+        action, meta = sendframe(reactor,
+            ffs(b'1 2 stream-begin stream-settings eos %s' % data))
+
+        self.assertEqual(action, b'error')
+        self.assertEqual(meta, {
+            b'message': b'error setting stream decoder: identity decoder '
+                        b'received unexpected additional values',
+        })
+
+    def testmultipleframes(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        data = b''.join(cborutil.streamencode(b'identity'))
+
+        action, meta = sendframe(reactor,
+            ffs(b'1 2 stream-begin stream-settings continuation %s' %
+                data[0:3]))
+
+        self.assertEqual(action, b'noop')
+        self.assertEqual(meta, {})
+
+        action, meta = sendframe(reactor,
+            ffs(b'1 2 0 stream-settings eos %s' % data[3:]))
+
+        self.assertEqual(action, b'noop')
+        self.assertEqual(meta, {})
+
+    def testinvalidencoder(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        action, meta = sendframe(reactor,
+            ffs(b'1 2 stream-begin stream-settings eos cbor:b"badvalue"'))
+
+        self.assertEqual(action, b'error')
+        self.assertEqual(meta, {
+            b'message': b'error setting stream decoder: unknown stream '
+                        b'decoder: badvalue',
+        })
+
+    def testzlibencoding(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zlib"' %
+                request.requestid))
+
+        self.assertEqual(action, b'noop')
+        self.assertEqual(meta, {})
+
+        result = {
+            b'status': b'ok',
+        }
+        encoded = b''.join(cborutil.streamencode(result))
+
+        compressed = zlib.compress(encoded)
+        self.assertEqual(zlib.decompress(compressed), encoded)
+
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 encoded command-response eos %s' %
+                (request.requestid, compressed)))
+
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], encoded)
+
+    def testzlibencodingsinglebyteframes(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zlib"' %
+                request.requestid))
+
+        self.assertEqual(action, b'noop')
+        self.assertEqual(meta, {})
+
+        result = {
+            b'status': b'ok',
+        }
+        encoded = b''.join(cborutil.streamencode(result))
+
+        compressed = zlib.compress(encoded)
+        self.assertEqual(zlib.decompress(compressed), encoded)
+
+        chunks = []
+
+        for i in range(len(compressed)):
+            char = compressed[i:i + 1]
+            if char == b'\\':
+                char = b'\\\\'
+            action, meta = sendframe(reactor,
+                ffs(b'%d 2 encoded command-response continuation %s' %
+                    (request.requestid, char)))
+
+            self.assertEqual(action, b'responsedata')
+            chunks.append(meta[b'data'])
+            self.assertTrue(meta[b'expectmore'])
+            self.assertFalse(meta[b'eos'])
+
+        # zlib will have the full data decoded at this point, even though
+        # we haven't flushed.
+        self.assertEqual(b''.join(chunks), encoded)
+
+        # End the stream for good measure.
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 stream-end command-response eos ' % request.requestid))
+
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], b'')
+        self.assertFalse(meta[b'expectmore'])
+        self.assertTrue(meta[b'eos'])
+
+    def testzlibmultipleresponses(self):
+        # We feed in zlib compressed data on the same stream but belonging to
+        # 2 different requests. This tests our flushing behavior.
+        reactor = framing.clientreactor(globalui, buffersends=False,
+                                        hasmultiplesend=True)
+
+        request1, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        request2, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        outstream = framing.outputstream(2)
+        outstream.setencoder(globalui, b'zlib')
+
+        response1 = b''.join(cborutil.streamencode({
+            b'status': b'ok',
+            b'extra': b'response1' * 10,
+        }))
+
+        response2 = b''.join(cborutil.streamencode({
+            b'status': b'error',
+            b'extra': b'response2' * 10,
+        }))
+
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zlib"' %
+                request1.requestid))
+
+        self.assertEqual(action, b'noop')
+        self.assertEqual(meta, {})
+
+        # Feeding partial data in won't get anything useful out.
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 encoded command-response continuation %s' % (
+                request1.requestid, outstream.encode(response1))))
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], b'')
+
+        # But flushing data at both ends will get our original data.
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 encoded command-response eos %s' % (
+                request1.requestid, outstream.flush())))
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], response1)
+
+        # We should be able to reuse the compressor/decompressor for the
+        # 2nd response.
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 encoded command-response continuation %s' % (
+                request2.requestid, outstream.encode(response2))))
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], b'')
+
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 encoded command-response eos %s' % (
+                request2.requestid, outstream.flush())))
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], response2)
+
+    @unittest.skipUnless(zstd, 'zstd not available')
+    def testzstd8mbencoding(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zstd-8mb"' %
+                request.requestid))
+
+        self.assertEqual(action, b'noop')
+        self.assertEqual(meta, {})
+
+        result = {
+            b'status': b'ok',
+        }
+        encoded = b''.join(cborutil.streamencode(result))
+
+        encoder = framing.zstd8mbencoder(globalui)
+        compressed = encoder.encode(encoded) + encoder.finish()
+        self.assertEqual(zstd.ZstdDecompressor().decompress(
+            compressed, max_output_size=len(encoded)), encoded)
+
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 encoded command-response eos %s' %
+                (request.requestid, compressed)))
+
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], encoded)
+
+    @unittest.skipUnless(zstd, 'zstd not available')
+    def testzstd8mbencodingsinglebyteframes(self):
+        reactor = framing.clientreactor(globalui, buffersends=False)
+
+        request, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zstd-8mb"' %
+                request.requestid))
+
+        self.assertEqual(action, b'noop')
+        self.assertEqual(meta, {})
+
+        result = {
+            b'status': b'ok',
+        }
+        encoded = b''.join(cborutil.streamencode(result))
+
+        compressed = zstd.ZstdCompressor().compress(encoded)
+        self.assertEqual(zstd.ZstdDecompressor().decompress(compressed),
+                         encoded)
+
+        chunks = []
+
+        for i in range(len(compressed)):
+            char = compressed[i:i + 1]
+            if char == b'\\':
+                char = b'\\\\'
+            action, meta = sendframe(reactor,
+                ffs(b'%d 2 encoded command-response continuation %s' %
+                    (request.requestid, char)))
+
+            self.assertEqual(action, b'responsedata')
+            chunks.append(meta[b'data'])
+            self.assertTrue(meta[b'expectmore'])
+            self.assertFalse(meta[b'eos'])
+
+        # zstd decompressor will flush at frame boundaries.
+        self.assertEqual(b''.join(chunks), encoded)
+
+        # End the stream for good measure.
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 stream-end command-response eos ' % request.requestid))
+
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], b'')
+        self.assertFalse(meta[b'expectmore'])
+        self.assertTrue(meta[b'eos'])
+
+    @unittest.skipUnless(zstd, 'zstd not available')
+    def testzstd8mbmultipleresponses(self):
+        # We feed in zstd compressed data on the same stream but belonging to
+        # 2 different requests. This tests our flushing behavior.
+        reactor = framing.clientreactor(globalui, buffersends=False,
+                                        hasmultiplesend=True)
+
+        request1, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        request2, action, meta = reactor.callcommand(b'foo', {})
+        for f in meta[b'framegen']:
+            pass
+
+        outstream = framing.outputstream(2)
+        outstream.setencoder(globalui, b'zstd-8mb')
+
+        response1 = b''.join(cborutil.streamencode({
+            b'status': b'ok',
+            b'extra': b'response1' * 10,
+        }))
+
+        response2 = b''.join(cborutil.streamencode({
+            b'status': b'error',
+            b'extra': b'response2' * 10,
+        }))
+
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 stream-begin stream-settings eos cbor:b"zstd-8mb"' %
+                request1.requestid))
+
+        self.assertEqual(action, b'noop')
+        self.assertEqual(meta, {})
+
+        # Feeding partial data in won't get anything useful out.
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 encoded command-response continuation %s' % (
+                request1.requestid, outstream.encode(response1))))
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], b'')
+
+        # But flushing data at both ends will get our original data.
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 encoded command-response eos %s' % (
+                request1.requestid, outstream.flush())))
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], response1)
+
+        # We should be able to reuse the compressor/decompressor for the
+        # 2nd response.
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 encoded command-response continuation %s' % (
+                request2.requestid, outstream.encode(response2))))
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], b'')
+
+        action, meta = sendframe(reactor,
+            ffs(b'%d 2 encoded command-response eos %s' % (
+                request2.requestid, outstream.flush())))
+        self.assertEqual(action, b'responsedata')
+        self.assertEqual(meta[b'data'], response2)
+
 if __name__ == '__main__':
     import silenttestrunner
     silenttestrunner.main(__name__)
--- a/tests/test-wireproto-command-branchmap.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-wireproto-command-branchmap.t	Mon Oct 22 14:46:06 2018 -0400
@@ -43,30 +43,17 @@
   > EOF
   creating http peer for wire protocol version 2
   sending branchmap command
-  s>     POST /api/exp-http-v2-0001/ro/branchmap HTTP/1.1\r\n
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 24\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     \x10\x00\x00\x01\x00\x01\x01\x11\xa1DnameIbranchmap
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     83\r\n
-  s>     {\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBok\xa3Gbranch1\x81T\xb5\xfa\xac\xdf\xd2c7h\xcb1R3l\xc0\x953\x81&f\x88Gbranch2\x81T"Aa\xc7X\x9a\xa4\x8f\xa8:H\xfe\xff^\x95\xb5j\xe3\'\xfcGdefault\x82T&\x80Z\xba\x1e`\n
-  s>     \x82\xe96a\x14\x9f#\x13\x86j"\x1a{T\xbe\x0e\xf7<\x17\xad\xe3\xfc\x89\xdcAp\x1e\xb9\xfc:\x91\xb5\x82\x82
-  s>     \r\n
-  received frame(size=123; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
-  response: {b'branch1': [b'\xb5\xfa\xac\xdf\xd2c7h\xcb1R3l\xc0\x953\x81&f\x88'], b'branch2': [b'"Aa\xc7X\x9a\xa4\x8f\xa8:H\xfe\xff^\x95\xb5j\xe3\'\xfc'], b'default': [b'&\x80Z\xba\x1e`\n\x82\xe96a\x14\x9f#\x13\x86j"\x1a{', b'\xbe\x0e\xf7<\x17\xad\xe3\xfc\x89\xdcAp\x1e\xb9\xfc:\x91\xb5\x82\x82']}
+  response: {
+    b'branch1': [
+      b'\xb5\xfa\xac\xdf\xd2c7h\xcb1R3l\xc0\x953\x81&f\x88'
+    ],
+    b'branch2': [
+      b'"Aa\xc7X\x9a\xa4\x8f\xa8:H\xfe\xff^\x95\xb5j\xe3\'\xfc'
+    ],
+    b'default': [
+      b'&\x80Z\xba\x1e`\n\x82\xe96a\x14\x9f#\x13\x86j"\x1a{',
+      b'\xbe\x0e\xf7<\x17\xad\xe3\xfc\x89\xdcAp\x1e\xb9\xfc:\x91\xb5\x82\x82'
+    ]
+  }
 
   $ cat error.log
--- a/tests/test-wireproto-command-capabilities.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-wireproto-command-capabilities.t	Mon Oct 22 14:46:06 2018 -0400
@@ -34,7 +34,7 @@
   s>     Content-Type: application/mercurial-0.1\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
 A proper request without the API server enabled returns the legacy response
 
@@ -59,7 +59,7 @@
   s>     Content-Type: application/mercurial-0.1\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
 Restart with just API server enabled. This enables serving the new format.
 
@@ -95,7 +95,7 @@
   s>     Content-Type: application/mercurial-0.1\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
 X-HgUpgrade-<N> without known serialization in X-HgProto-<N> uses legacy response
 
@@ -120,7 +120,7 @@
   s>     Content-Type: application/mercurial-0.1\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
 
 X-HgUpgrade-<N> + X-HgProto-<N> headers trigger new response format
 
@@ -145,8 +145,14 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3Dapis\xa0GapibaseDapi/Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
-  cbor> {b'apibase': b'api/', b'apis': {}, b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'}
+  s>     \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  cbor> [
+    {
+      b'apibase': b'api/',
+      b'apis': {},
+      b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
+    }
+  ]
 
 Restart server to enable HTTPv2
 
@@ -178,15 +184,21 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3Dapis\xa0GapibaseDapi/Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
-  cbor> {b'apibase': b'api/', b'apis': {}, b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'}
+  s>     \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  cbor> [
+    {
+      b'apibase': b'api/',
+      b'apis': {},
+      b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
+    }
+  ]
 
 Request for HTTPv2 service returns information about it
 
   $ sendhttpraw << EOF
   > httprequest GET ?cmd=capabilities
   >    user-agent: test
-  >    x-hgupgrade-1: exp-http-v2-0001 foo bar
+  >    x-hgupgrade-1: exp-http-v2-0003 foo bar
   >    x-hgproto-1: cbor
   > EOF
   using raw connection to peer
@@ -194,7 +206,7 @@
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
   s>     x-hgproto-1: cbor\r\n
-  s>     x-hgupgrade-1: exp-http-v2-0001 foo bar\r\n
+  s>     x-hgupgrade-1: exp-http-v2-0003 foo bar\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
   s>     \r\n
   s> makefile('rb', None)
@@ -204,8 +216,242 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa7Eheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyCnewCnewColdColdInamespaceBnsKpermissions\x81DpushHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullKcompression\x81\xa1DnameDzlibNrawrepoformats\x82LgeneraldeltaHrevlogv1Qframingmediatypes\x81X&application/mercurial-exp-framing-0005GapibaseDapi/Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
-  cbor> {b'apibase': b'api/', b'apis': {b'exp-http-v2-0001': {b'commands': {b'branchmap': {b'args': {}, b'permissions': [b'pull']}, b'capabilities': {b'args': {}, b'permissions': [b'pull']}, b'heads': {b'args': {b'publiconly': False}, b'permissions': [b'pull']}, b'known': {b'args': {b'nodes': [b'deadbeef']}, b'permissions': [b'pull']}, b'listkeys': {b'args': {b'namespace': b'ns'}, b'permissions': [b'pull']}, b'lookup': {b'args': {b'key': b'foo'}, b'permissions': [b'pull']}, b'pushkey': {b'args': {b'key': b'key', b'namespace': b'ns', b'new': b'new', b'old': b'old'}, b'permissions': [b'push']}}, b'compression': [{b'name': b'zlib'}], b'framingmediatypes': [b'application/mercurial-exp-framing-0005'], b'rawrepoformats': [b'generaldelta', b'revlogv1']}}, b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'}
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  cbor> [
+    {
+      b'apibase': b'api/',
+      b'apis': {
+        b'exp-http-v2-0003': {
+          b'commands': {
+            b'branchmap': {
+              b'args': {},
+              b'permissions': [
+                b'pull'
+              ]
+            },
+            b'capabilities': {
+              b'args': {},
+              b'permissions': [
+                b'pull'
+              ]
+            },
+            b'changesetdata': {
+              b'args': {
+                b'fields': {
+                  b'default': set([]),
+                  b'required': False,
+                  b'type': b'set',
+                  b'validvalues': set([
+                    b'bookmarks',
+                    b'parents',
+                    b'phase',
+                    b'revision'
+                  ])
+                },
+                b'revisions': {
+                  b'required': True,
+                  b'type': b'list'
+                }
+              },
+              b'permissions': [
+                b'pull'
+              ]
+            },
+            b'filedata': {
+              b'args': {
+                b'fields': {
+                  b'default': set([]),
+                  b'required': False,
+                  b'type': b'set',
+                  b'validvalues': set([
+                    b'linknode',
+                    b'parents',
+                    b'revision'
+                  ])
+                },
+                b'haveparents': {
+                  b'default': False,
+                  b'required': False,
+                  b'type': b'bool'
+                },
+                b'nodes': {
+                  b'required': True,
+                  b'type': b'list'
+                },
+                b'path': {
+                  b'required': True,
+                  b'type': b'bytes'
+                }
+              },
+              b'permissions': [
+                b'pull'
+              ]
+            },
+            b'filesdata': {
+              b'args': {
+                b'fields': {
+                  b'default': set([]),
+                  b'required': False,
+                  b'type': b'set',
+                  b'validvalues': set([
+                    b'firstchangeset',
+                    b'linknode',
+                    b'parents',
+                    b'revision'
+                  ])
+                },
+                b'haveparents': {
+                  b'default': False,
+                  b'required': False,
+                  b'type': b'bool'
+                },
+                b'pathfilter': {
+                  b'default': None,
+                  b'required': False,
+                  b'type': b'dict'
+                },
+                b'revisions': {
+                  b'required': True,
+                  b'type': b'list'
+                }
+              },
+              b'permissions': [
+                b'pull'
+              ],
+              b'recommendedbatchsize': 50000
+            },
+            b'heads': {
+              b'args': {
+                b'publiconly': {
+                  b'default': False,
+                  b'required': False,
+                  b'type': b'bool'
+                }
+              },
+              b'permissions': [
+                b'pull'
+              ]
+            },
+            b'known': {
+              b'args': {
+                b'nodes': {
+                  b'default': [],
+                  b'required': False,
+                  b'type': b'list'
+                }
+              },
+              b'permissions': [
+                b'pull'
+              ]
+            },
+            b'listkeys': {
+              b'args': {
+                b'namespace': {
+                  b'required': True,
+                  b'type': b'bytes'
+                }
+              },
+              b'permissions': [
+                b'pull'
+              ]
+            },
+            b'lookup': {
+              b'args': {
+                b'key': {
+                  b'required': True,
+                  b'type': b'bytes'
+                }
+              },
+              b'permissions': [
+                b'pull'
+              ]
+            },
+            b'manifestdata': {
+              b'args': {
+                b'fields': {
+                  b'default': set([]),
+                  b'required': False,
+                  b'type': b'set',
+                  b'validvalues': set([
+                    b'parents',
+                    b'revision'
+                  ])
+                },
+                b'haveparents': {
+                  b'default': False,
+                  b'required': False,
+                  b'type': b'bool'
+                },
+                b'nodes': {
+                  b'required': True,
+                  b'type': b'list'
+                },
+                b'tree': {
+                  b'required': True,
+                  b'type': b'bytes'
+                }
+              },
+              b'permissions': [
+                b'pull'
+              ],
+              b'recommendedbatchsize': 100000
+            },
+            b'pushkey': {
+              b'args': {
+                b'key': {
+                  b'required': True,
+                  b'type': b'bytes'
+                },
+                b'namespace': {
+                  b'required': True,
+                  b'type': b'bytes'
+                },
+                b'new': {
+                  b'required': True,
+                  b'type': b'bytes'
+                },
+                b'old': {
+                  b'required': True,
+                  b'type': b'bytes'
+                }
+              },
+              b'permissions': [
+                b'push'
+              ]
+            },
+            b'rawstorefiledata': {
+              b'args': {
+                b'files': {
+                  b'required': True,
+                  b'type': b'list'
+                },
+                b'pathfilter': {
+                  b'default': None,
+                  b'required': False,
+                  b'type': b'list'
+                }
+              },
+              b'permissions': [
+                b'pull'
+              ]
+            }
+          },
+          b'framingmediatypes': [
+            b'application/mercurial-exp-framing-0006'
+          ],
+          b'pathfilterprefixes': set([
+            b'path:',
+            b'rootfilesin:'
+          ]),
+          b'rawrepoformats': [
+            b'generaldelta',
+            b'revlogv1'
+          ]
+        }
+      },
+      b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
+    }
+  ]
 
 capabilities command returns expected info
 
@@ -217,7 +463,7 @@
   s>     Accept-Encoding: identity\r\n
   s>     vary: X-HgProto-1,X-HgUpgrade-1\r\n
   s>     x-hgproto-1: cbor\r\n
-  s>     x-hgupgrade-1: exp-http-v2-0001\r\n
+  s>     x-hgupgrade-1: exp-http-v2-0003\r\n
   s>     accept: application/mercurial-0.1\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
   s>     user-agent: Mercurial debugwireproto\r\n
@@ -229,31 +475,274 @@
   s>     Content-Type: application/mercurial-cbor\r\n
   s>     Content-Length: *\r\n (glob)
   s>     \r\n
-  s>     \xa3Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa7Eheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyCnewCnewColdColdInamespaceBnsKpermissions\x81DpushHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullKcompression\x81\xa1DnameDzlibNrawrepoformats\x82LgeneraldeltaHrevlogv1Qframingmediatypes\x81X&application/mercurial-exp-framing-0005GapibaseDapi/Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
   sending capabilities command
-  s>     POST /api/exp-http-v2-0001/ro/capabilities HTTP/1.1\r\n
+  s>     POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
-  s>     *\r\n (glob)
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 27\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
+  s>     content-length: 63\r\n
   s>     host: $LOCALIP:$HGPORT\r\n (glob)
   s>     user-agent: Mercurial debugwireproto\r\n
   s>     \r\n
-  s>     \x13\x00\x00\x01\x00\x01\x01\x11\xa1DnameLcapabilities
+  s>     \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity\x13\x00\x00\x01\x00\x01\x00\x11\xa1DnameLcapabilities
   s> makefile('rb', None)
   s>     HTTP/1.1 200 OK\r\n
   s>     Server: testing stub value\r\n
   s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
   s>     Transfer-Encoding: chunked\r\n
   s>     \r\n
-  s>     1d7\r\n
-  s>     \xcf\x01\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBok\xa4Hcommands\xa7Eheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyCnewCnewColdColdInamespaceBnsKpermissions\x81DpushHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullKcompression\x81\xa1DnameDzlibNrawrepoformats\x82LgeneraldeltaHrevlogv1Qframingmediatypes\x81X&application/mercurial-exp-framing-0005
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92
+  s>     Hidentity
+  s>     \r\n
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041
+  s>     \xa1FstatusBok
   s>     \r\n
-  received frame(size=463; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     651\r\n
+  s>     I\x06\x00\x01\x00\x02\x041
+  s>     \xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1
+  s>     \r\n
+  received frame(size=1609; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
+  s>     \r\n
   s>     0\r\n
   s>     \r\n
-  response: [{b'status': b'ok'}, {b'commands': {b'branchmap': {b'args': {}, b'permissions': [b'pull']}, b'capabilities': {b'args': {}, b'permissions': [b'pull']}, b'heads': {b'args': {b'publiconly': False}, b'permissions': [b'pull']}, b'known': {b'args': {b'nodes': [b'deadbeef']}, b'permissions': [b'pull']}, b'listkeys': {b'args': {b'namespace': b'ns'}, b'permissions': [b'pull']}, b'lookup': {b'args': {b'key': b'foo'}, b'permissions': [b'pull']}, b'pushkey': {b'args': {b'key': b'key', b'namespace': b'ns', b'new': b'new', b'old': b'old'}, b'permissions': [b'push']}}, b'compression': [{b'name': b'zlib'}], b'framingmediatypes': [b'application/mercurial-exp-framing-0005'], b'rawrepoformats': [b'generaldelta', b'revlogv1']}]
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  response: gen[
+    {
+      b'commands': {
+        b'branchmap': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'capabilities': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'changesetdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'bookmarks',
+                b'parents',
+                b'phase',
+                b'revision'
+              ])
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filedata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'path': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filesdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'firstchangeset',
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'dict'
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 50000
+        },
+        b'heads': {
+          b'args': {
+            b'publiconly': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'known': {
+          b'args': {
+            b'nodes': {
+              b'default': [],
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'listkeys': {
+          b'args': {
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'lookup': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'manifestdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'tree': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 100000
+        },
+        b'pushkey': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'new': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'old': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'push'
+          ]
+        },
+        b'rawstorefiledata': {
+          b'args': {
+            b'files': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        }
+      },
+      b'framingmediatypes': [
+        b'application/mercurial-exp-framing-0006'
+      ],
+      b'pathfilterprefixes': set([
+        b'path:',
+        b'rootfilesin:'
+      ]),
+      b'rawrepoformats': [
+        b'generaldelta',
+        b'revlogv1'
+      ]
+    }
+  ]
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
 
   $ cat error.log
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-wireproto-command-changesetdata.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,613 @@
+  $ . $TESTDIR/wireprotohelpers.sh
+
+  $ hg init server
+  $ enablehttpv2 server
+  $ cd server
+  $ cat >> .hg/hgrc << EOF
+  > [phases]
+  > publish = false
+  > EOF
+  $ echo a0 > a
+  $ echo b0 > b
+
+  $ hg -q commit -A -m 'commit 0'
+
+  $ echo a1 > a
+  $ echo b1 > b
+  $ hg commit -m 'commit 1'
+  $ echo b2 > b
+  $ hg commit -m 'commit 2'
+  $ hg phase --public -r .
+
+  $ hg -q up -r 0
+  $ echo a2 > a
+  $ hg commit -m 'commit 3'
+  created new head
+
+  $ hg log -G -T '{rev}:{node} {desc}\n'
+  @  3:eae5f82c2e622368d27daecb76b7e393d0f24211 commit 3
+  |
+  | o  2:0bb8ad894a15b15380b2a2a5b183e20f2a4b28dd commit 2
+  | |
+  | o  1:7592917e1c3e82677cb0a4bc715ca25dd12d28c1 commit 1
+  |/
+  o  0:3390ef850073fbc2f0dfff2244342c8e9229013a commit 0
+  
+
+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+No arguments is an invalid request
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  abort: missing required arguments: revisions!
+  [255]
+
+Missing nodes for changesetexplicit results in error
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{b'type': b'changesetexplicit'}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  abort: nodes key not present in changesetexplicit revision specifier!
+  [255]
+
+changesetexplicitdepth requires nodes and depth keys
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{b'type': b'changesetexplicitdepth'}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  abort: nodes key not present in changesetexplicitdepth revision specifier!
+  [255]
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{b'type': b'changesetexplicitdepth', b'nodes': []}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  abort: depth key not present in changesetexplicitdepth revision specifier!
+  [255]
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{b'type': b'changesetexplicitdepth', b'depth': 42}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  abort: nodes key not present in changesetexplicitdepth revision specifier!
+  [255]
+
+changesetdagrange requires roots and heads keys
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{b'type': b'changesetdagrange'}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  abort: roots key not present in changesetdagrange revision specifier!
+  [255]
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{b'type': b'changesetdagrange', b'roots': []}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  abort: heads key not present in changesetdagrange revision specifier!
+  [255]
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{b'type': b'changesetdagrange', b'heads': [b'dummy']}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  abort: roots key not present in changesetdagrange revision specifier!
+  [255]
+
+Empty changesetdagrange heads results in an error
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{b'type': b'changesetdagrange', b'heads': [], b'roots': []}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  abort: heads key in changesetdagrange cannot be empty!
+  [255]
+
+Sending just dagrange heads sends all revisions
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{
+  >         b'type': b'changesetdagrange',
+  >         b'roots': [],
+  >         b'heads': [
+  >             b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
+  >             b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 4
+    },
+    {
+      b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
+    },
+    {
+      b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
+    },
+    {
+      b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
+    },
+    {
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
+    }
+  ]
+
+Sending root nodes limits what data is sent
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{
+  >         b'type': b'changesetdagrange',
+  >         b'roots': [b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a'],
+  >         b'heads': [
+  >             b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 2
+    },
+    {
+      b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
+    },
+    {
+      b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
+    }
+  ]
+
+Requesting data on a single node by node works
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a']}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
+    }
+  ]
+
+Specifying a noderange and nodes takes union
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[
+  >         {
+  >             b'type': b'changesetexplicit',
+  >             b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'],
+  >         },
+  >         {
+  >             b'type': b'changesetdagrange',
+  >             b'roots': [b'\x75\x92\x91\x7e\x1c\x3e\x82\x67\x7c\xb0\xa4\xbc\x71\x5c\xa2\x5d\xd1\x2d\x28\xc1'],
+  >             b'heads': [b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd'],
+  >         }]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 2
+    },
+    {
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
+    },
+    {
+      b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
+    }
+  ]
+
+nodesdepth of 1 limits to exactly requested nodes
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicitdepth',
+  >         b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'],
+  >         b'depth': 1}] 
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
+    }
+  ]
+
+nodesdepth of 2 limits to first ancestor
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicitdepth',
+  >         b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'],
+  >         b'depth': 2}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 2
+    },
+    {
+      b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
+    },
+    {
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
+    }
+  ]
+
+nodesdepth with multiple nodes
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicitdepth',
+  >         b'nodes': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11', b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd'],
+  >         b'depth': 2}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 4
+    },
+    {
+      b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
+    },
+    {
+      b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
+    },
+    {
+      b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
+    },
+    {
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
+    }
+  ]
+
+Parents data is transferred upon request
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     fields eval:[b'parents']
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
+      b'parents': [
+        b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+Phase data is transferred upon request
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     fields eval:[b'phase']
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd',
+      b'phase': b'public'
+    }
+  ]
+
+Revision data is transferred upon request
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     fields eval:[b'revision']
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          61
+        ]
+      ],
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
+    },
+    b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3'
+  ]
+
+Bookmarks key isn't present if no bookmarks data
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     fields eval:[b'bookmarks']
+  >     revisions eval:[{
+  >         b'type': b'changesetdagrange',
+  >         b'roots': [],
+  >         b'heads': [
+  >             b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
+  >             b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 4
+    },
+    {
+      b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
+    },
+    {
+      b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
+    },
+    {
+      b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
+    },
+    {
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
+    }
+  ]
+
+Bookmarks are sent when requested
+
+  $ hg -R ../server bookmark -r 0bb8ad894a15b15380b2a2a5b183e20f2a4b28dd book-1
+  $ hg -R ../server bookmark -r eae5f82c2e622368d27daecb76b7e393d0f24211 book-2
+  $ hg -R ../server bookmark -r eae5f82c2e622368d27daecb76b7e393d0f24211 book-3
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     fields eval:[b'bookmarks']
+  >     revisions eval:[{
+  >         b'type': b'changesetdagrange',
+  >         b'roots': [],
+  >         b'heads': [
+  >             b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
+  >             b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 4
+    },
+    {
+      b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
+    },
+    {
+      b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
+    },
+    {
+      b'bookmarks': [
+        b'book-1'
+      ],
+      b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
+    },
+    {
+      b'bookmarks': [
+        b'book-2',
+        b'book-3'
+      ],
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
+    }
+  ]
+
+Bookmarks are sent when we make a no-new-revisions request
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     fields eval:[b'bookmarks', b'revision']
+  >     revisions eval:[{
+  >         b'type': b'changesetdagrange',
+  >         b'roots': [b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11'],
+  >         b'heads': [
+  >             b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
+  >             b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 2
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          63
+        ]
+      ],
+      b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
+    },
+    b'7f144aea0ba742713887b564d57e9d12f12ff382\ntest\n0 0\na\nb\n\ncommit 1',
+    {
+      b'bookmarks': [
+        b'book-1'
+      ],
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          61
+        ]
+      ],
+      b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
+    },
+    b'37f0a2d1c28ffe4b879109a7d1bbf8f07b3c763b\ntest\n0 0\nb\n\ncommit 2',
+    {
+      b'bookmarks': [
+        b'book-2',
+        b'book-3'
+      ],
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
+    }
+  ]
+
+Multiple fields can be transferred
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     fields eval:[b'parents', b'revision']
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          61
+        ]
+      ],
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
+      b'parents': [
+        b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3'
+  ]
+
+Base nodes have just their metadata (e.g. phase) transferred
+TODO this doesn't work
+
+  $ sendhttpv2peer << EOF
+  > command changesetdata
+  >     fields eval:[b'phase', b'parents', b'revision']
+  >     revisions eval:[{
+  >         b'type': b'changesetdagrange',
+  >         b'roots': [b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a'],
+  >         b'heads': [
+  >             b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd',
+  >             b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending changesetdata command
+  response: gen[
+    {
+      b'totalitems': 3
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          63
+        ]
+      ],
+      b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1',
+      b'parents': [
+        b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ],
+      b'phase': b'public'
+    },
+    b'7f144aea0ba742713887b564d57e9d12f12ff382\ntest\n0 0\na\nb\n\ncommit 1',
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          61
+        ]
+      ],
+      b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd',
+      b'parents': [
+        b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ],
+      b'phase': b'public'
+    },
+    b'37f0a2d1c28ffe4b879109a7d1bbf8f07b3c763b\ntest\n0 0\nb\n\ncommit 2',
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          61
+        ]
+      ],
+      b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
+      b'parents': [
+        b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ],
+      b'phase': b'draft'
+    },
+    b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3'
+  ]
+
+  $ cat error.log
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-wireproto-command-filedata.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,312 @@
+  $ . $TESTDIR/wireprotohelpers.sh
+
+  $ hg init server
+  $ enablehttpv2 server
+  $ cd server
+  $ cat > a << EOF
+  > a0
+  > 00000000000000000000000000000000000000
+  > 11111111111111111111111111111111111111
+  > EOF
+  $ echo b0 > b
+  $ mkdir -p dir0/child0 dir0/child1 dir1
+  $ echo c0 > dir0/c
+  $ echo d0 > dir0/d
+  $ echo e0 > dir0/child0/e
+  $ echo f0 > dir0/child1/f
+  $ hg -q commit -A -m 'commit 0'
+
+  $ echo a1 >> a
+  $ echo d1 > dir0/d
+  $ hg commit -m 'commit 1'
+  $ echo f0 > dir0/child1/f
+  $ hg commit -m 'commit 2'
+  nothing changed
+  [1]
+
+  $ hg -q up -r 0
+  $ echo a2 >> a
+  $ hg commit -m 'commit 3'
+  created new head
+
+  $ hg log -G -T '{rev}:{node} {desc}\n'
+  @  2:5ce944d7fece1252dae06c34422b573c191b9489 commit 3
+  |
+  | o  1:3ef5e551f219ba505481d34d6b0316b017fa3f00 commit 1
+  |/
+  o  0:91b232a2253ce0638496f67bdfd7a4933fb51b25 commit 0
+  
+
+  $ hg --debug debugindex a
+     rev linkrev nodeid                                   p1                                       p2
+       0       0 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+       1       1 0a86321f1379d1a9ecd0579a22977af7a5acaf11 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000
+       2       2 7e5801b6d5f03a5a54f3c47b583f7567aad43e5b 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000
+
+  $ hg --debug debugindex dir0/child0/e
+     rev linkrev nodeid                                   p1                                       p2
+       0       0 bbba6c06b30f443d34ff841bc985c4d0827c6be4 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+
+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+Missing arguments is an error
+
+  $ sendhttpv2peer << EOF
+  > command filedata
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filedata command
+  abort: missing required arguments: nodes, path!
+  [255]
+
+  $ sendhttpv2peer << EOF
+  > command filedata
+  >     nodes eval:[]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filedata command
+  abort: missing required arguments: path!
+  [255]
+
+Unknown node is an error
+
+  $ sendhttpv2peer << EOF
+  > command filedata
+  >     nodes eval:[b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa']
+  >     path eval:b'a'
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filedata command
+  abort: unknown file node: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa!
+  [255]
+
+Fetching a single revision returns just metadata by default
+
+  $ sendhttpv2peer << EOF
+  > command filedata
+  >     nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
+  >     path eval:b'a'
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filedata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    }
+  ]
+
+Requesting parents works
+
+  $ sendhttpv2peer << EOF
+  > command filedata
+  >     nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
+  >     path eval:b'a'
+  >     fields eval:[b'parents']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filedata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11',
+      b'parents': [
+        b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+Requesting revision data works
+(haveparents defaults to False, so fulltext is emitted)
+
+  $ sendhttpv2peer << EOF
+  > command filedata
+  >     nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
+  >     path eval:b'a'
+  >     fields eval:[b'revision']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filedata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          84
+        ]
+      ],
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n'
+  ]
+
+haveparents=False should be same as above
+
+  $ sendhttpv2peer << EOF
+  > command filedata
+  >     nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
+  >     path eval:b'a'
+  >     fields eval:[b'revision']
+  >     haveparents eval:False
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filedata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          84
+        ]
+      ],
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n'
+  ]
+
+haveparents=True should emit a delta
+
+  $ sendhttpv2peer << EOF
+  > command filedata
+  >     nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
+  >     path eval:b'a'
+  >     fields eval:[b'revision']
+  >     haveparents eval:True
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filedata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          15
+        ]
+      ],
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n'
+  ]
+
+Requesting multiple revisions works
+(first revision is a fulltext since haveparents=False by default)
+
+  $ sendhttpv2peer << EOF
+  > command filedata
+  >     nodes eval:[b'\x64\x9d\x14\x9d\xf4\x3d\x83\x88\x25\x23\xb7\xfb\x1e\x6a\x3a\xf6\xf1\x90\x7b\x39', b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11']
+  >     path eval:b'a'
+  >     fields eval:[b'revision']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filedata command
+  response: gen[
+    {
+      b'totalitems': 2
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          81
+        ]
+      ],
+      b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9'
+    },
+    b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\n',
+    {
+      b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          15
+        ]
+      ],
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n'
+  ]
+
+Revisions are sorted by DAG order, parents first
+
+  $ sendhttpv2peer << EOF
+  > command filedata
+  >     nodes eval:[b'\x0a\x86\x32\x1f\x13\x79\xd1\xa9\xec\xd0\x57\x9a\x22\x97\x7a\xf7\xa5\xac\xaf\x11', b'\x64\x9d\x14\x9d\xf4\x3d\x83\x88\x25\x23\xb7\xfb\x1e\x6a\x3a\xf6\xf1\x90\x7b\x39']
+  >     path eval:b'a'
+  >     fields eval:[b'revision']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filedata command
+  response: gen[
+    {
+      b'totalitems': 2
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          81
+        ]
+      ],
+      b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9'
+    },
+    b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\n',
+    {
+      b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          15
+        ]
+      ],
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n'
+  ]
+
+Requesting parents and revision data works
+
+  $ sendhttpv2peer << EOF
+  > command filedata
+  >     nodes eval:[b'\x7e\x58\x01\xb6\xd5\xf0\x3a\x5a\x54\xf3\xc4\x7b\x58\x3f\x75\x67\xaa\xd4\x3e\x5b']
+  >     path eval:b'a'
+  >     fields eval:[b'parents', b'revision']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filedata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          84
+        ]
+      ],
+      b'node': b'~X\x01\xb6\xd5\xf0:ZT\xf3\xc4{X?ug\xaa\xd4>[',
+      b'parents': [
+        b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na2\n'
+  ]
+
+  $ cat error.log
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-wireproto-command-filesdata.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,1164 @@
+  $ . $TESTDIR/wireprotohelpers.sh
+
+  $ hg init server
+  $ enablehttpv2 server
+  $ cd server
+  $ cat > a << EOF
+  > a0
+  > 00000000000000000000000000000000000000
+  > 11111111111111111111111111111111111111
+  > EOF
+  $ cat > b << EOF
+  > b0
+  > aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+  > bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+  > EOF
+  $ mkdir -p dir0/child0 dir0/child1 dir1
+  $ echo c0 > dir0/c
+  $ echo d0 > dir0/d
+  $ echo e0 > dir0/child0/e
+  $ echo f0 > dir0/child1/f
+  $ hg -q commit -A -m 'commit 0'
+
+  $ echo a1 >> a
+  $ echo d1 > dir0/d
+  $ echo g0 > g
+  $ echo h0 > h
+  $ hg -q commit -A -m 'commit 1'
+  $ echo f1 > dir0/child1/f
+  $ echo i0 > dir0/i
+  $ hg -q commit -A -m 'commit 2'
+
+  $ hg -q up -r 0
+  $ echo a2 >> a
+  $ hg commit -m 'commit 3'
+  created new head
+
+  $ hg log -G -T '{rev}:{node} {desc}\n'
+  @  3:476fbf122cd82f6726f0191ff146f67140946abc commit 3
+  |
+  | o  2:b91c03cbba3519ab149b6cd0a0afbdb5cf1b5c8a commit 2
+  | |
+  | o  1:5b0b1a23577e205ea240e39c9704e28d7697cbd8 commit 1
+  |/
+  o  0:6e875ff18c227659ad6143bb3580c65700734884 commit 0
+  
+
+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+Missing arguments is an error
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  abort: missing required arguments: revisions!
+  [255]
+
+Bad pattern to pathfilter is rejected
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >          b'type': b'changesetexplicit',
+  >          b'nodes': [
+  >              b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >          ]}]
+  >     pathfilter eval:{b'include': [b'bad:foo']}
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  abort: include pattern must begin with `path:` or `rootfilesin:`; got bad:foo!
+  [255]
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >         ]}]
+  >     pathfilter eval:{b'exclude': [b'glob:foo']}
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  abort: exclude pattern must begin with `path:` or `rootfilesin:`; got glob:foo!
+  [255]
+
+Fetching a single changeset without parents fetches all files
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 8,
+      b'totalpaths': 8
+    },
+    {
+      b'path': b'a',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    {
+      b'path': b'b',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
+    },
+    {
+      b'path': b'dir0/c',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
+    },
+    {
+      b'path': b'dir0/child0/e',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
+    },
+    {
+      b'path': b'dir0/child1/f',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
+    },
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
+    },
+    {
+      b'path': b'g',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
+    },
+    {
+      b'path': b'h',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
+    }
+  ]
+
+Fetching a single changeset saying parents data is available fetches just new files
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >         ]}]
+  >     haveparents eval:True
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 4,
+      b'totalpaths': 4
+    },
+    {
+      b'path': b'a',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
+    },
+    {
+      b'path': b'g',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
+    },
+    {
+      b'path': b'h',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
+    }
+  ]
+
+A path filter for a sub-directory is honored
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >         ]}]
+  >     haveparents eval:True
+  >     pathfilter eval:{b'include': [b'path:dir0']}
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 1,
+      b'totalpaths': 1
+    },
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
+    }
+  ]
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >         ]}]
+  >     haveparents eval:True
+  >     pathfilter eval:{b'exclude': [b'path:a', b'path:g']}
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 2,
+      b'totalpaths': 2
+    },
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
+    },
+    {
+      b'path': b'h',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
+    }
+  ]
+
+Requesting multiple changeset nodes without haveparents sends all data for both
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >             b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a',
+  >         ]}]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 10,
+      b'totalpaths': 9
+    },
+    {
+      b'path': b'a',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    {
+      b'path': b'b',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
+    },
+    {
+      b'path': b'dir0/c',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
+    },
+    {
+      b'path': b'dir0/child0/e',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
+    },
+    {
+      b'path': b'dir0/child1/f',
+      b'totalitems': 2
+    },
+    {
+      b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
+    },
+    {
+      b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e'
+    },
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
+    },
+    {
+      b'path': b'dir0/i',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7'
+    },
+    {
+      b'path': b'g',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
+    },
+    {
+      b'path': b'h',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
+    }
+  ]
+
+Requesting multiple changeset nodes with haveparents sends incremental data for both
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >             b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a',
+  >         ]}]
+  >     haveparents eval:True
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 6,
+      b'totalpaths': 6
+    },
+    {
+      b'path': b'a',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    {
+      b'path': b'dir0/child1/f',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e'
+    },
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
+    },
+    {
+      b'path': b'dir0/i',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7'
+    },
+    {
+      b'path': b'g',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
+    },
+    {
+      b'path': b'h',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
+    }
+  ]
+
+Requesting parents works
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >         ]}]
+  >     fields eval:[b'parents']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 8,
+      b'totalpaths': 8
+    },
+    {
+      b'path': b'a',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11',
+      b'parents': [
+        b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    {
+      b'path': b'b',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    {
+      b'path': b'dir0/c',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    {
+      b'path': b'dir0/child0/e',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    {
+      b'path': b'dir0/child1/f',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G',
+      b'parents': [
+        b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    {
+      b'path': b'g',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    {
+      b'path': b'h',
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+Requesting revision data works
+(haveparents defaults to False, so fulltext is emitted)
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >         ]}]
+  >     fields eval:[b'revision']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 8,
+      b'totalpaths': 8
+    },
+    {
+      b'path': b'a',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          84
+        ]
+      ],
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n',
+    {
+      b'path': b'b',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          81
+        ]
+      ],
+      b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
+    },
+    b'b0\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n',
+    {
+      b'path': b'dir0/c',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
+    },
+    b'c0\n',
+    {
+      b'path': b'dir0/child0/e',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
+    },
+    b'e0\n',
+    {
+      b'path': b'dir0/child1/f',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
+    },
+    b'f0\n',
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
+    },
+    b'd1\n',
+    {
+      b'path': b'g',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
+    },
+    b'g0\n',
+    {
+      b'path': b'h',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
+    },
+    b'h0\n'
+  ]
+
+haveparents=False should be same as above
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >         ]}]
+  >     fields eval:[b'revision']
+  >     haveparents eval:False
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 8,
+      b'totalpaths': 8
+    },
+    {
+      b'path': b'a',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          84
+        ]
+      ],
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na1\n',
+    {
+      b'path': b'b',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          81
+        ]
+      ],
+      b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
+    },
+    b'b0\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n',
+    {
+      b'path': b'dir0/c',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
+    },
+    b'c0\n',
+    {
+      b'path': b'dir0/child0/e',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
+    },
+    b'e0\n',
+    {
+      b'path': b'dir0/child1/f',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
+    },
+    b'f0\n',
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
+    },
+    b'd1\n',
+    {
+      b'path': b'g',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
+    },
+    b'g0\n',
+    {
+      b'path': b'h',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
+    },
+    b'h0\n'
+  ]
+
+haveparents=True should emit a delta
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >         ]}]
+  >     fields eval:[b'revision']
+  >     haveparents eval:True
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 4,
+      b'totalpaths': 4
+    },
+    {
+      b'path': b'a',
+      b'totalitems': 1
+    },
+    {
+      b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          15
+        ]
+      ],
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n',
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 1
+    },
+    {
+      b'deltabasenode': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          15
+        ]
+      ],
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
+    },
+    b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x03d1\n',
+    {
+      b'path': b'g',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
+    },
+    b'g0\n',
+    {
+      b'path': b'h',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
+    },
+    b'h0\n'
+  ]
+
+Requesting multiple revisions works
+(first revision is a fulltext since haveparents=False by default)
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x6e\x87\x5f\xf1\x8c\x22\x76\x59\xad\x61\x43\xbb\x35\x80\xc6\x57\x00\x73\x48\x84',
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >             b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a',
+  >         ]}]
+  >     fields eval:[b'revision']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 12,
+      b'totalpaths': 9
+    },
+    {
+      b'path': b'a',
+      b'totalitems': 2
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          81
+        ]
+      ],
+      b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9'
+    },
+    b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\n',
+    {
+      b'deltabasenode': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          15
+        ]
+      ],
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    b'\x00\x00\x00Q\x00\x00\x00Q\x00\x00\x00\x03a1\n',
+    {
+      b'path': b'b',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          81
+        ]
+      ],
+      b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
+    },
+    b'b0\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n',
+    {
+      b'path': b'dir0/c',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
+    },
+    b'c0\n',
+    {
+      b'path': b'dir0/child0/e',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
+    },
+    b'e0\n',
+    {
+      b'path': b'dir0/child1/f',
+      b'totalitems': 2
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
+    },
+    b'f0\n',
+    {
+      b'deltabasenode': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          15
+        ]
+      ],
+      b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e'
+    },
+    b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x03f1\n',
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 2
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&'
+    },
+    b'd0\n',
+    {
+      b'deltabasenode': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          15
+        ]
+      ],
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
+    },
+    b'\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x03d1\n',
+    {
+      b'path': b'dir0/i',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7'
+    },
+    b'i0\n',
+    {
+      b'path': b'g',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
+    },
+    b'g0\n',
+    {
+      b'path': b'h',
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          3
+        ]
+      ],
+      b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
+    },
+    b'h0\n'
+  ]
+
+Requesting linknode field works
+
+  $ sendhttpv2peer << EOF
+  > command filesdata
+  >     revisions eval:[{
+  >         b'type': b'changesetexplicit',
+  >         b'nodes': [
+  >             b'\x6e\x87\x5f\xf1\x8c\x22\x76\x59\xad\x61\x43\xbb\x35\x80\xc6\x57\x00\x73\x48\x84',
+  >             b'\x5b\x0b\x1a\x23\x57\x7e\x20\x5e\xa2\x40\xe3\x9c\x97\x04\xe2\x8d\x76\x97\xcb\xd8',
+  >             b'\xb9\x1c\x03\xcb\xba\x35\x19\xab\x14\x9b\x6c\xd0\xa0\xaf\xbd\xb5\xcf\x1b\x5c\x8a',
+  >         ]}]
+  >     fields eval:[b'linknode']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending filesdata command
+  response: gen[
+    {
+      b'totalitems': 12,
+      b'totalpaths': 9
+    },
+    {
+      b'path': b'a',
+      b'totalitems': 2
+    },
+    {
+      b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
+      b'node': b'd\x9d\x14\x9d\xf4=\x83\x88%#\xb7\xfb\x1ej:\xf6\xf1\x90{9'
+    },
+    {
+      b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8',
+      b'node': b'\n\x862\x1f\x13y\xd1\xa9\xec\xd0W\x9a"\x97z\xf7\xa5\xac\xaf\x11'
+    },
+    {
+      b'path': b'b',
+      b'totalitems': 1
+    },
+    {
+      b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
+      b'node': b'\x88\xbac\xb8\xd8\xc6 :\xc6z\xc9\x98\xac\xd9\x17K\xf7\x05!\xb2'
+    },
+    {
+      b'path': b'dir0/c',
+      b'totalitems': 1
+    },
+    {
+      b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
+      b'node': b'\x91DE4j\x0c\xa0b\x9b\xd4|\xeb]\xfe\x07\xe4\xd4\xcf%\x01'
+    },
+    {
+      b'path': b'dir0/child0/e',
+      b'totalitems': 1
+    },
+    {
+      b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
+      b'node': b'\xbb\xbal\x06\xb3\x0fD=4\xff\x84\x1b\xc9\x85\xc4\xd0\x82|k\xe4'
+    },
+    {
+      b'path': b'dir0/child1/f',
+      b'totalitems': 2
+    },
+    {
+      b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
+      b'node': b'\x12\xfc}\xcdw;Z\n\x92\x9c\xe1\x95"\x80\x83\xc6\xdd\xc9\xce\xc4'
+    },
+    {
+      b'linknode': b'\xb9\x1c\x03\xcb\xba5\x19\xab\x14\x9bl\xd0\xa0\xaf\xbd\xb5\xcf\x1b\\\x8a',
+      b'node': b'(\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e'
+    },
+    {
+      b'path': b'dir0/d',
+      b'totalitems': 2
+    },
+    {
+      b'linknode': b'n\x87_\xf1\x8c"vY\xadaC\xbb5\x80\xc6W\x00sH\x84',
+      b'node': b'S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4&'
+    },
+    {
+      b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8',
+      b'node': b'\x93\x88)\xad\x01R}2\xba\x06_\x81#6\xfe\xc7\x9d\xdd9G'
+    },
+    {
+      b'path': b'dir0/i',
+      b'totalitems': 1
+    },
+    {
+      b'linknode': b'\xb9\x1c\x03\xcb\xba5\x19\xab\x14\x9bl\xd0\xa0\xaf\xbd\xb5\xcf\x1b\\\x8a',
+      b'node': b'\xd7t\xb5\x80Jq\xfd1\xe1\xae\x05\xea\x8e2\xdd\x9b\xa3\xd8S\xd7'
+    },
+    {
+      b'path': b'g',
+      b'totalitems': 1
+    },
+    {
+      b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8',
+      b'node': b'\xde\xca\xba5DFjI\x95r\xe9\x0f\xac\xe6\xfa\x0c!k\xba\x8c'
+    },
+    {
+      b'path': b'h',
+      b'totalitems': 1
+    },
+    {
+      b'linknode': b'[\x0b\x1a#W~ ^\xa2@\xe3\x9c\x97\x04\xe2\x8dv\x97\xcb\xd8',
+      b'node': b'\x03A\xfc\x84\x1b\xb5\xb4\xba\x93\xb2mM\xdaa\xf7y6]\xb3K'
+    }
+  ]
+
+  $ cat error.log
--- a/tests/test-wireproto-command-heads.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-wireproto-command-heads.t	Mon Oct 22 14:46:06 2018 -0400
@@ -35,30 +35,11 @@
   > EOF
   creating http peer for wire protocol version 2
   sending heads command
-  s>     POST /api/exp-http-v2-0001/ro/heads HTTP/1.1\r\n
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 20\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     \x0c\x00\x00\x01\x00\x01\x01\x11\xa1DnameEheads
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     53\r\n
-  s>     K\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBok\x83T\x1dok\x91\xd4J\xab\xa6\xd5\xe5\x80\xbc0\xa9\x94\x850\xdb\xe0\x0bT\xaeI.6\xb0\xc83\x9f\xfa\xf3(\xd0\x0b\x85\xb4R]\xe1\x16^T)Dm-\xc5A\x9c_\x97Dz\x8b\xc0b\xe4\xcc2\x8b\xf2A
-  s>     \r\n
-  received frame(size=75; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
-  response: [b'\x1dok\x91\xd4J\xab\xa6\xd5\xe5\x80\xbc0\xa9\x94\x850\xdb\xe0\x0b', b'\xaeI.6\xb0\xc83\x9f\xfa\xf3(\xd0\x0b\x85\xb4R]\xe1\x16^', b')Dm-\xc5A\x9c_\x97Dz\x8b\xc0b\xe4\xcc2\x8b\xf2A']
+  response: [
+    b'\x1dok\x91\xd4J\xab\xa6\xd5\xe5\x80\xbc0\xa9\x94\x850\xdb\xe0\x0b',
+    b'\xaeI.6\xb0\xc83\x9f\xfa\xf3(\xd0\x0b\x85\xb4R]\xe1\x16^',
+    b')Dm-\xc5A\x9c_\x97Dz\x8b\xc0b\xe4\xcc2\x8b\xf2A'
+  ]
 
 Requesting just the public heads works
 
@@ -68,29 +49,8 @@
   > EOF
   creating http peer for wire protocol version 2
   sending heads command
-  s>     POST /api/exp-http-v2-0001/ro/heads HTTP/1.1\r\n
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 39\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     \x1f\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1JpubliconlyA1DnameEheads
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     29\r\n
-  s>     !\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBok\x81Tx\xd2\xdc\xa46\xb2\xf5\xb1\x88\xac&~)\xb8\x1e\x07&m8\xfc
-  s>     \r\n
-  received frame(size=33; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
-  response: [b'x\xd2\xdc\xa46\xb2\xf5\xb1\x88\xac&~)\xb8\x1e\x07&m8\xfc']
+  response: [
+    b'x\xd2\xdc\xa46\xb2\xf5\xb1\x88\xac&~)\xb8\x1e\x07&m8\xfc'
+  ]
 
   $ cat error.log
--- a/tests/test-wireproto-command-known.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-wireproto-command-known.t	Mon Oct 22 14:46:06 2018 -0400
@@ -27,29 +27,6 @@
   > EOF
   creating http peer for wire protocol version 2
   sending known command
-  s>     POST /api/exp-http-v2-0001/ro/known HTTP/1.1\r\n
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 20\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     \x0c\x00\x00\x01\x00\x01\x01\x11\xa1DnameEknown
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     14\r\n
-  s>     \x0c\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBok@
-  s>     \r\n
-  received frame(size=12; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
   response: []
 
 Single known node works
@@ -60,30 +37,9 @@
   > EOF
   creating http peer for wire protocol version 2
   sending known command
-  s>     POST /api/exp-http-v2-0001/ro/known HTTP/1.1\r\n
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 54\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     .\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1Enodes\x81TBk\xad\xa5\xc6u\x98\xcae\x03mW\xd9\xe4\xb6K\x0c\x1c\xe7\xa0DnameEknown
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     15\r\n
-  s>     \r\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBokA1
-  s>     \r\n
-  received frame(size=13; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
-  response: [True]
+  response: [
+    True
+  ]
 
 Multiple nodes works
 
@@ -93,29 +49,10 @@
   > EOF
   creating http peer for wire protocol version 2
   sending known command
-  s>     POST /api/exp-http-v2-0001/ro/known HTTP/1.1\r\n
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 96\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     X\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1Enodes\x83TBk\xad\xa5\xc6u\x98\xcae\x03mW\xd9\xe4\xb6K\x0c\x1c\xe7\xa0T00000000000000000000T\x11$x\x96)a\x14q$\xed\xd45I\xae\xdd\x1a3^D\xbfDnameEknown
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     17\r\n
-  s>     \x0f\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBokC101
-  s>     \r\n
-  received frame(size=15; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
-  response: [True, False, True]
+  response: [
+    True,
+    False,
+    True
+  ]
 
   $ cat error.log
--- a/tests/test-wireproto-command-listkeys.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-wireproto-command-listkeys.t	Mon Oct 22 14:46:06 2018 -0400
@@ -31,30 +31,11 @@
   > EOF
   creating http peer for wire protocol version 2
   sending listkeys command
-  s>     POST /api/exp-http-v2-0001/ro/listkeys HTTP/1.1\r\n
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 50\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     *\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1InamespaceJnamespacesDnameHlistkeys
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     33\r\n
-  s>     +\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBok\xa3Fphases@Ibookmarks@Jnamespaces@
-  s>     \r\n
-  received frame(size=43; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
-  response: {b'bookmarks': b'', b'namespaces': b'', b'phases': b''}
+  response: {
+    b'bookmarks': b'',
+    b'namespaces': b'',
+    b'phases': b''
+  }
 
 Request for phases works
 
@@ -64,30 +45,10 @@
   > EOF
   creating http peer for wire protocol version 2
   sending listkeys command
-  s>     POST /api/exp-http-v2-0001/ro/listkeys HTTP/1.1\r\n
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 46\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     &\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1InamespaceFphasesDnameHlistkeys
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     50\r\n
-  s>     H\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBok\xa2JpublishingDTrueX(be0ef73c17ade3fc89dc41701eb9fc3a91b58282A1
-  s>     \r\n
-  received frame(size=72; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
-  response: {b'be0ef73c17ade3fc89dc41701eb9fc3a91b58282': b'1', b'publishing': b'True'}
+  response: {
+    b'be0ef73c17ade3fc89dc41701eb9fc3a91b58282': b'1',
+    b'publishing': b'True'
+  }
 
 Request for bookmarks works
 
@@ -97,29 +58,8 @@
   > EOF
   creating http peer for wire protocol version 2
   sending listkeys command
-  s>     POST /api/exp-http-v2-0001/ro/listkeys HTTP/1.1\r\n
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 49\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     )\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1InamespaceIbookmarksDnameHlistkeys
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     40\r\n
-  s>     8\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBok\xa1A@X(26805aba1e600a82e93661149f2313866a221a7b
-  s>     \r\n
-  received frame(size=56; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
-  response: {b'@': b'26805aba1e600a82e93661149f2313866a221a7b'}
+  response: {
+    b'@': b'26805aba1e600a82e93661149f2313866a221a7b'
+  }
 
   $ cat error.log
--- a/tests/test-wireproto-command-lookup.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-wireproto-command-lookup.t	Mon Oct 22 14:46:06 2018 -0400
@@ -27,29 +27,6 @@
   > EOF
   creating http peer for wire protocol version 2
   sending lookup command
-  s>     *\r\n (glob)
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 73\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     A\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1CkeyX(426bada5c67598ca65036d57d9e4b64b0c1ce7a0DnameFlookup
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     28\r\n
-  s>      \x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBokTBk\xad\xa5\xc6u\x98\xcae\x03mW\xd9\xe4\xb6K\x0c\x1c\xe7\xa0
-  s>     \r\n
-  received frame(size=32; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
   response: b'Bk\xad\xa5\xc6u\x98\xcae\x03mW\xd9\xe4\xb6K\x0c\x1c\xe7\xa0'
 
   $ cat error.log
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-wireproto-command-manifestdata.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,358 @@
+  $ . $TESTDIR/wireprotohelpers.sh
+
+  $ hg init server
+  $ enablehttpv2 server
+  $ cd server
+  $ echo a0 > a
+  $ echo b0 > b
+  $ mkdir -p dir0/child0 dir0/child1 dir1
+  $ echo c0 > dir0/c
+  $ echo d0 > dir0/d
+  $ echo e0 > dir0/child0/e
+  $ echo f0 > dir0/child1/f
+  $ hg -q commit -A -m 'commit 0'
+
+  $ echo a1 > a
+  $ echo d1 > dir0/d
+  $ hg commit -m 'commit 1'
+  $ echo f0 > dir0/child1/f
+  $ hg commit -m 'commit 2'
+  nothing changed
+  [1]
+
+  $ hg -q up -r 0
+  $ echo a2 > a
+  $ hg commit -m 'commit 3'
+  created new head
+
+  $ hg log -G -T '{rev}:{node} {desc}\n'
+  @  2:c8757a2ffe552850d1e0dfe60d295ebf64c196d9 commit 3
+  |
+  | o  1:650165e803375748a94df471e5b58d85763e0b29 commit 1
+  |/
+  o  0:6d85ca1270b377d320098556ba5bfad34a9ee12d commit 0
+  
+
+  $ hg --debug debugindex -m
+     rev linkrev nodeid                                   p1                                       p2
+       0       0 1b175b595f022cfab5b809cc0ed551bd0b3ff5e4 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+       1       1 91e0bdbfb0dde0023fa063edc1445f207a22eac7 1b175b595f022cfab5b809cc0ed551bd0b3ff5e4 0000000000000000000000000000000000000000
+       2       2 46a6721b5edaf0ea04b79a5cb3218854a4d2aba0 1b175b595f022cfab5b809cc0ed551bd0b3ff5e4 0000000000000000000000000000000000000000
+
+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+Missing arguments is an error
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  abort: missing required arguments: nodes, tree!
+  [255]
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[]
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  abort: missing required arguments: tree!
+  [255]
+
+Unknown node is an error
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa']
+  >     tree eval:b''
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  abort: unknown node: \xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa! (esc)
+  [255]
+
+Fetching a single revision returns just metadata by default
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
+  >     tree eval:b''
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
+    }
+  ]
+
+Requesting parents works
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
+  >     tree eval:b''
+  >     fields eval:[b'parents']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0',
+      b'parents': [
+        b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+Requesting revision data works
+(haveparents defaults to false, so fulltext is emitted)
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
+  >     tree eval:b''
+  >     fields eval:[b'revision']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          292
+        ]
+      ],
+      b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
+    },
+    b'a\x000879345e39377229634b420c639454156726c6b6\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n'
+  ]
+
+haveparents=False yields same output
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
+  >     tree eval:b''
+  >     fields eval:[b'revision']
+  >     haveparents eval:False
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          292
+        ]
+      ],
+      b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
+    },
+    b'a\x000879345e39377229634b420c639454156726c6b6\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n'
+  ]
+
+haveparents=True will emit delta
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
+  >     tree eval:b''
+  >     fields eval:[b'revision']
+  >     haveparents eval:True
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          55
+        ]
+      ],
+      b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
+    },
+    b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n'
+  ]
+
+Requesting multiple revisions works
+(haveparents defaults to false, so fulltext is emitted unless a parent
+has been emitted)
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4', b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
+  >     tree eval:b''
+  >     fields eval:[b'revision']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 2
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          292
+        ]
+      ],
+      b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4'
+    },
+    b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n',
+    {
+      b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          55
+        ]
+      ],
+      b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
+    },
+    b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n'
+  ]
+
+With haveparents=True, first revision is a delta instead of fulltext
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4', b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
+  >     tree eval:b''
+  >     fields eval:[b'revision']
+  >     haveparents eval:True
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 2
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          292
+        ]
+      ],
+      b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4'
+    },
+    b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n',
+    {
+      b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          55
+        ]
+      ],
+      b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
+    },
+    b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n'
+  ]
+
+Revisions are sorted by DAG order, parents first
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0', b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4']
+  >     tree eval:b''
+  >     fields eval:[b'revision']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 2
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          292
+        ]
+      ],
+      b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4'
+    },
+    b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n',
+    {
+      b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          55
+        ]
+      ],
+      b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0'
+    },
+    b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n'
+  ]
+
+Requesting parents and revision data works
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x1b\x17\x5b\x59\x5f\x02\x2c\xfa\xb5\xb8\x09\xcc\x0e\xd5\x51\xbd\x0b\x3f\xf5\xe4', b'\x46\xa6\x72\x1b\x5e\xda\xf0\xea\x04\xb7\x9a\x5c\xb3\x21\x88\x54\xa4\xd2\xab\xa0']
+  >     tree eval:b''
+  >     fields eval:[b'parents', b'revision']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 2
+    },
+    {
+      b'fieldsfollowing': [
+        [
+          b'revision',
+          292
+        ]
+      ],
+      b'node': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    b'a\x002b4eb07319bfa077a40a2f04913659aef0da42da\nb\x00819e258d31a5e1606629f365bb902a1b21ee4216\ndir0/c\x00914445346a0ca0629bd47ceb5dfe07e4d4cf2501\ndir0/child0/e\x00bbba6c06b30f443d34ff841bc985c4d0827c6be4\ndir0/child1/f\x0012fc7dcd773b5a0a929ce195228083c6ddc9cec4\ndir0/d\x00538206dc971e521540d6843abfe6d16032f6d426\n',
+    {
+      b'deltabasenode': b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
+      b'fieldsfollowing': [
+        [
+          b'delta',
+          55
+        ]
+      ],
+      b'node': b'F\xa6r\x1b^\xda\xf0\xea\x04\xb7\x9a\\\xb3!\x88T\xa4\xd2\xab\xa0',
+      b'parents': [
+        b'\x1b\x17[Y_\x02,\xfa\xb5\xb8\t\xcc\x0e\xd5Q\xbd\x0b?\xf5\xe4',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    },
+    b'\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x000879345e39377229634b420c639454156726c6b6\n'
+  ]
+
+  $ cat error.log
--- a/tests/test-wireproto-command-pushkey.t	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-wireproto-command-pushkey.t	Mon Oct 22 14:46:06 2018 -0400
@@ -30,29 +30,6 @@
   > EOF
   creating http peer for wire protocol version 2
   sending pushkey command
-  s>     *\r\n (glob)
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 105\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     a\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa4CkeyA@CnewX(426bada5c67598ca65036d57d9e4b64b0c1ce7a0Cold@InamespaceIbookmarksDnameGpushkey
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     14\r\n
-  s>     \x0c\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBok\xf5
-  s>     \r\n
-  received frame(size=12; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
   response: True
 
   $ sendhttpv2peer << EOF
@@ -61,29 +38,8 @@
   > EOF
   creating http peer for wire protocol version 2
   sending listkeys command
-  s>     POST /api/exp-http-v2-0001/ro/listkeys HTTP/1.1\r\n
-  s>     Accept-Encoding: identity\r\n
-  s>     accept: application/mercurial-exp-framing-0005\r\n
-  s>     content-type: application/mercurial-exp-framing-0005\r\n
-  s>     content-length: 49\r\n
-  s>     host: $LOCALIP:$HGPORT\r\n (glob)
-  s>     user-agent: Mercurial debugwireproto\r\n
-  s>     \r\n
-  s>     )\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1InamespaceIbookmarksDnameHlistkeys
-  s> makefile('rb', None)
-  s>     HTTP/1.1 200 OK\r\n
-  s>     Server: testing stub value\r\n
-  s>     Date: $HTTP_DATE$\r\n
-  s>     Content-Type: application/mercurial-exp-framing-0005\r\n
-  s>     Transfer-Encoding: chunked\r\n
-  s>     \r\n
-  s>     40\r\n
-  s>     8\x00\x00\x01\x00\x02\x012
-  s>     \xa1FstatusBok\xa1A@X(426bada5c67598ca65036d57d9e4b64b0c1ce7a0
-  s>     \r\n
-  received frame(size=56; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
-  s>     0\r\n
-  s>     \r\n
-  response: {b'@': b'426bada5c67598ca65036d57d9e4b64b0c1ce7a0'}
+  response: {
+    b'@': b'426bada5c67598ca65036d57d9e4b64b0c1ce7a0'
+  }
 
   $ cat error.log
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-wireproto-command-rawstorefiledata.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,121 @@
+  $ . $TESTDIR/wireprotohelpers.sh
+  $ hg init server
+  $ enablehttpv2 server
+  $ cd server
+  $ echo a0 > a
+  $ echo b0 > b
+  $ hg -q commit -A -m 'commit 0'
+  $ echo a1 > a
+  $ hg commit -m 'commit 1'
+  $ mkdir dir0
+  $ mkdir dir1
+  $ echo c0 > dir0/c
+  $ echo d0 > dir0/d
+  $ echo e0 > dir1/e
+  $ echo f0 > dir1/f
+  $ hg commit -A -m 'commit 2'
+  adding dir0/c
+  adding dir0/d
+  adding dir1/e
+  adding dir1/f
+  $ echo f1 > dir1/f
+  $ hg commit -m 'commit 3'
+
+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+Missing requirement argument results in error
+
+  $ sendhttpv2peer << EOF
+  > command rawstorefiledata
+  > EOF
+  creating http peer for wire protocol version 2
+  sending rawstorefiledata command
+  abort: missing required arguments: files!
+  [255]
+
+Unknown files value results in error
+
+  $ sendhttpv2peer << EOF
+  > command rawstorefiledata
+  >     files eval:[b'unknown']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending rawstorefiledata command
+  abort: unknown file type: unknown!
+  [255]
+
+Requesting just changelog works
+
+  $ sendhttpv2peer << EOF
+  > command rawstorefiledata
+  >     files eval:[b'changelog']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending rawstorefiledata command
+  response: gen[
+    {
+      b'filecount': 1,
+      b'totalsize': 527
+    },
+    {
+      b'location': b'store',
+      b'path': b'00changelog.i',
+      b'size': 527
+    },
+    b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00N\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xc5\xc1\t\xc0 \x0c\x05\xd0{\xa6p\x03cjI\xd71\xf9\x11<H\xa1u\x7fJ\xf1]\x9eyu\x98\xa2\xb0Z\x88jk0\x11\x95z\xa0\xdb\x11\\\x81S\xfc*\xb4\xe2]\xc4\x89\t\xe3\xe1\xec;\xfc\x95\x1c\xbbN\xe4\xf7\x9cc%\xf9\x00S#\x19\x13\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3',
+    b''
+  ]
+
+Requesting just manifestlog works (as impractical as that operation may be).
+
+  $ sendhttpv2peer << EOF
+  > command rawstorefiledata
+  >     files eval:[b'manifestlog']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending rawstorefiledata command
+  response: gen[
+    {
+      b'filecount': 1,
+      b'totalsize': 584
+    },
+    {
+      b'location': b'store',
+      b'path': b'00manifest.i',
+      b'size': 584
+    },
+    b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00I\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c\r\xca\xc1\x11\x00!\x08\x040\xdfV\x03+\xa2\x94\xb3\x8c\xd0\x7f\twy\x87\x03i\x95r\x96F6\xe5\x1c\x9a\x10-\x16\xba|\x07\xab\xe5\xd1\xf08s\\\x8d\xc2\xbeo)w\xa9\x8b;\xa2\xff\x95\x19\x02jB\xab\x0c\xea\xf3\x03\xcf\x1d\x16\t\x00\x00\x00\x00\x00I\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x8c\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xcd\xb9\rB1\x10\x00Q\xc7\xbf\x19\xf6\xb6\xdd\x08\xb9\xf7\x92H\xa9\x90\xd2\xb8\x82\xc9\x9e4c\x8c\xfb\xf8\xf7\xca\xc7\x13n16\x8a\x88\xb2\xd8\x818`\xb4=eF\xb9f\x17\xcc\x92\x94hR\xc0\xeb\xe7s(/\x02\xcb\xd8\x13K\tU m\t\x1f\xef\xb2D\x03\xa6\xb6\x14\xb2\xaf\xc7[\rw?\x16`\xce\xd0"\x9c,\xddK\xd0c/\rIX4\xc3\xbc\xe4\xef{ u\xcc\x8c\x9c\x93]\x0f\x9cM;\n\xb7\x12-X\x1c\x96\x9fuT\xc8\xf5\x06\x88\xa25W\x00\x00\x00\x00\x01\x0c\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n',
+    b''
+  ]
+
+Requesting both changelog and manifestlog works.
+
+  $ sendhttpv2peer << EOF
+  > command rawstorefiledata
+  >     files eval:[b'changelog', b'manifestlog']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending rawstorefiledata command
+  response: gen[
+    {
+      b'filecount': 2,
+      b'totalsize': 1111
+    },
+    {
+      b'location': b'store',
+      b'path': b'00manifest.i',
+      b'size': 584
+    },
+    b'\x00\x03\x00\x01\x00\x00\x00\x00\x00\x00\x00I\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c\r\xca\xc1\x11\x00!\x08\x040\xdfV\x03+\xa2\x94\xb3\x8c\xd0\x7f\twy\x87\x03i\x95r\x96F6\xe5\x1c\x9a\x10-\x16\xba|\x07\xab\xe5\xd1\xf08s\\\x8d\xc2\xbeo)w\xa9\x8b;\xa2\xff\x95\x19\x02jB\xab\x0c\xea\xf3\x03\xcf\x1d\x16\t\x00\x00\x00\x00\x00I\x00\x00\x00\x00\x007\x00\x00\x00V\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xff\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+\x00\x00\x00+a\x009a38122997b3ac97be2a9aa2e556838341fdf2cc\n\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x8c\x00\x00\x01\x16\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xbcL\xdb}\x10{\xe2w\xaa\xdb"rC\xdf\xb3\xe0M\xd5,\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xcd\xb9\rB1\x10\x00Q\xc7\xbf\x19\xf6\xb6\xdd\x08\xb9\xf7\x92H\xa9\x90\xd2\xb8\x82\xc9\x9e4c\x8c\xfb\xf8\xf7\xca\xc7\x13n16\x8a\x88\xb2\xd8\x818`\xb4=eF\xb9f\x17\xcc\x92\x94hR\xc0\xeb\xe7s(/\x02\xcb\xd8\x13K\tU m\t\x1f\xef\xb2D\x03\xa6\xb6\x14\xb2\xaf\xc7[\rw?\x16`\xce\xd0"\x9c,\xddK\xd0c/\rIX4\xc3\xbc\xe4\xef{ u\xcc\x8c\x9c\x93]\x0f\x9cM;\n\xb7\x12-X\x1c\x96\x9fuT\xc8\xf5\x06\x88\xa25W\x00\x00\x00\x00\x01\x0c\x00\x00\x00\x00\x00<\x00\x00\x01\x16\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x90#\x1d\xdc\xa3o\xa1x\xa0\xee\xd9\x9b\xd00x\x11$\x87\xdd\xa3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe6\x00\x00\x01\x16\x00\x00\x000dir1/f\x0028c776ae08d0d55eb40648b401b90ff54448348e\n',
+    b'',
+    {
+      b'location': b'store',
+      b'path': b'00changelog.i',
+      b'size': 527
+    },
+    b'\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00?\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u992f4779029a3df8d0666d00bb924f69634e2641\ntest\n0 0\na\nb\n\ncommit 0\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00>\x00\x00\x00=\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff\xff\xff\xffD2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ua988fb43583e871d1ed5750ee074c6d840bbbfc8\ntest\n0 0\na\n\ncommit 1\x00\x00\x00\x00\x00~\x00\x00\x00\x00\x00N\x00\x00\x00W\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\xa4r\xd2\xea\x96U\x1a\x1e\xbb\x011-\xb2\xe6\xa7\x86\xd0F\x96o\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x9c%\xc5\xc1\t\xc0 \x0c\x05\xd0{\xa6p\x03cjI\xd71\xf9\x11<H\xa1u\x7fJ\xf1]\x9eyu\x98\xa2\xb0Z\x88jk0\x11\x95z\xa0\xdb\x11\\\x81S\xfc*\xb4\xe2]\xc4\x89\t\xe3\xe1\xec;\xfc\x95\x1c\xbbN\xe4\xf7\x9cc%\xf9\x00S#\x19\x13\x00\x00\x00\x00\x00\xcc\x00\x00\x00\x00\x00C\x00\x00\x00B\x00\x00\x00\x03\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x85kg{\x94a\x12i\xc5lW5[\x85\xf9\x95|\xfc\xc1\xb9\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00u90231ddca36fa178a0eed99bd03078112487dda3\ntest\n0 0\ndir1/f\n\ncommit 3',
+    b''
+  ]
+
+  $ cat error.log
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-wireproto-content-redirects.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,1470 @@
+  $ . $TESTDIR/wireprotohelpers.sh
+
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > blackbox =
+  > [blackbox]
+  > track = simplecache
+  > EOF
+
+  $ hg init server
+  $ enablehttpv2 server
+  $ cd server
+  $ cat >> .hg/hgrc << EOF
+  > [server]
+  > compressionengines = zlib
+  > [extensions]
+  > simplecache = $TESTDIR/wireprotosimplecache.py
+  > [simplecache]
+  > cacheapi = true
+  > EOF
+
+  $ echo a0 > a
+  $ echo b0 > b
+  $ hg -q commit -A -m 'commit 0'
+  $ echo a1 > a
+  $ hg commit -m 'commit 1'
+
+  $ hg --debug debugindex -m
+     rev linkrev nodeid                                   p1                                       p2
+       0       0 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+       1       1 a988fb43583e871d1ed5750ee074c6d840bbbfc8 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000
+
+  $ hg --config simplecache.redirectsfile=redirects.py serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ cat > redirects.py << EOF
+  > [
+  >   {
+  >     b'name': b'target-a',
+  >     b'protocol': b'http',
+  >     b'snirequired': False,
+  >     b'tlsversions': [b'1.2', b'1.3'],
+  >     b'uris': [b'http://example.com/'],
+  >   },
+  > ]
+  > EOF
+
+Redirect targets advertised when configured
+
+  $ sendhttpv2peerhandshake << EOF
+  > command capabilities
+  > EOF
+  creating http peer for wire protocol version 2
+  s>     GET /?cmd=capabilities HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     vary: X-HgProto-1,X-HgUpgrade-1\r\n
+  s>     x-hgproto-1: cbor\r\n
+  s>     x-hgupgrade-1: exp-http-v2-0003\r\n
+  s>     accept: application/mercurial-0.1\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     user-agent: Mercurial debugwireproto\r\n
+  s>     \r\n
+  s> makefile('rb', None)
+  s>     HTTP/1.1 200 OK\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: application/mercurial-cbor\r\n
+  s>     Content-Length: 2259\r\n
+  s>     \r\n
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  (remote redirect target target-a is compatible)
+  sending capabilities command
+  s>     POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
+  s>     content-length: 111\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     user-agent: Mercurial debugwireproto\r\n
+  s>     \r\n
+  s>     \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81HidentityC\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81Htarget-a
+  s> makefile('rb', None)
+  s>     HTTP/1.1 200 OK\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
+  s>     Transfer-Encoding: chunked\r\n
+  s>     \r\n
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92
+  s>     Hidentity
+  s>     \r\n
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041
+  s>     \xa1FstatusBok
+  s>     \r\n
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     6d1\r\n
+  s>     \xc9\x06\x00\x01\x00\x02\x041
+  s>     \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/
+  s>     \r\n
+  received frame(size=1737; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
+  s>     \r\n
+  s>     0\r\n
+  s>     \r\n
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  response: gen[
+    {
+      b'commands': {
+        b'branchmap': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'capabilities': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'changesetdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'bookmarks',
+                b'parents',
+                b'phase',
+                b'revision'
+              ])
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filedata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'path': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filesdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'firstchangeset',
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'dict'
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 50000
+        },
+        b'heads': {
+          b'args': {
+            b'publiconly': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'known': {
+          b'args': {
+            b'nodes': {
+              b'default': [],
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'listkeys': {
+          b'args': {
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'lookup': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'manifestdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'tree': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 100000
+        },
+        b'pushkey': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'new': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'old': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'push'
+          ]
+        },
+        b'rawstorefiledata': {
+          b'args': {
+            b'files': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        }
+      },
+      b'framingmediatypes': [
+        b'application/mercurial-exp-framing-0006'
+      ],
+      b'pathfilterprefixes': set([
+        b'path:',
+        b'rootfilesin:'
+      ]),
+      b'rawrepoformats': [
+        b'generaldelta',
+        b'revlogv1'
+      ],
+      b'redirect': {
+        b'hashes': [
+          b'sha256',
+          b'sha1'
+        ],
+        b'targets': [
+          {
+            b'name': b'target-a',
+            b'protocol': b'http',
+            b'snirequired': False,
+            b'tlsversions': [
+              b'1.2',
+              b'1.3'
+            ],
+            b'uris': [
+              b'http://example.com/'
+            ]
+          }
+        ]
+      }
+    }
+  ]
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+Unknown protocol is filtered from compatible targets
+
+  $ cat > redirects.py << EOF
+  > [
+  >   {
+  >     b'name': b'target-a',
+  >     b'protocol': b'http',
+  >     b'uris': [b'http://example.com/'],
+  >   },
+  >   {
+  >     b'name': b'target-b',
+  >     b'protocol': b'unknown',
+  >     b'uris': [b'unknown://example.com/'],
+  >   },
+  > ]
+  > EOF
+
+  $ sendhttpv2peerhandshake << EOF
+  > command capabilities
+  > EOF
+  creating http peer for wire protocol version 2
+  s>     GET /?cmd=capabilities HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     vary: X-HgProto-1,X-HgUpgrade-1\r\n
+  s>     x-hgproto-1: cbor\r\n
+  s>     x-hgupgrade-1: exp-http-v2-0003\r\n
+  s>     accept: application/mercurial-0.1\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     user-agent: Mercurial debugwireproto\r\n
+  s>     \r\n
+  s> makefile('rb', None)
+  s>     HTTP/1.1 200 OK\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: application/mercurial-cbor\r\n
+  s>     Content-Length: 2286\r\n
+  s>     \r\n
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  (remote redirect target target-a is compatible)
+  (remote redirect target target-b uses unsupported protocol: unknown)
+  sending capabilities command
+  s>     POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
+  s>     content-length: 111\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     user-agent: Mercurial debugwireproto\r\n
+  s>     \r\n
+  s>     \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81HidentityC\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81Htarget-a
+  s> makefile('rb', None)
+  s>     HTTP/1.1 200 OK\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
+  s>     Transfer-Encoding: chunked\r\n
+  s>     \r\n
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92
+  s>     Hidentity
+  s>     \r\n
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041
+  s>     \xa1FstatusBok
+  s>     \r\n
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     6ec\r\n
+  s>     \xe4\x06\x00\x01\x00\x02\x041
+  s>     \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/
+  s>     \r\n
+  received frame(size=1764; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
+  s>     \r\n
+  s>     0\r\n
+  s>     \r\n
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  response: gen[
+    {
+      b'commands': {
+        b'branchmap': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'capabilities': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'changesetdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'bookmarks',
+                b'parents',
+                b'phase',
+                b'revision'
+              ])
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filedata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'path': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filesdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'firstchangeset',
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'dict'
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 50000
+        },
+        b'heads': {
+          b'args': {
+            b'publiconly': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'known': {
+          b'args': {
+            b'nodes': {
+              b'default': [],
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'listkeys': {
+          b'args': {
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'lookup': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'manifestdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'tree': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 100000
+        },
+        b'pushkey': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'new': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'old': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'push'
+          ]
+        },
+        b'rawstorefiledata': {
+          b'args': {
+            b'files': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        }
+      },
+      b'framingmediatypes': [
+        b'application/mercurial-exp-framing-0006'
+      ],
+      b'pathfilterprefixes': set([
+        b'path:',
+        b'rootfilesin:'
+      ]),
+      b'rawrepoformats': [
+        b'generaldelta',
+        b'revlogv1'
+      ],
+      b'redirect': {
+        b'hashes': [
+          b'sha256',
+          b'sha1'
+        ],
+        b'targets': [
+          {
+            b'name': b'target-a',
+            b'protocol': b'http',
+            b'uris': [
+              b'http://example.com/'
+            ]
+          },
+          {
+            b'name': b'target-b',
+            b'protocol': b'unknown',
+            b'uris': [
+              b'unknown://example.com/'
+            ]
+          }
+        ]
+      }
+    }
+  ]
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+Missing SNI support filters targets that require SNI
+
+  $ cat > nosni.py << EOF
+  > from mercurial import sslutil
+  > sslutil.hassni = False
+  > EOF
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > nosni=`pwd`/nosni.py
+  > EOF
+
+  $ cat > redirects.py << EOF
+  > [
+  >   {
+  >     b'name': b'target-bad-tls',
+  >     b'protocol': b'https',
+  >     b'uris': [b'https://example.com/'],
+  >     b'snirequired': True,
+  >   },
+  > ]
+  > EOF
+
+  $ sendhttpv2peerhandshake << EOF
+  > command capabilities
+  > EOF
+  creating http peer for wire protocol version 2
+  s>     GET /?cmd=capabilities HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     vary: X-HgProto-1,X-HgUpgrade-1\r\n
+  s>     x-hgproto-1: cbor\r\n
+  s>     x-hgupgrade-1: exp-http-v2-0003\r\n
+  s>     accept: application/mercurial-0.1\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     user-agent: Mercurial debugwireproto\r\n
+  s>     \r\n
+  s> makefile('rb', None)
+  s>     HTTP/1.1 200 OK\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: application/mercurial-cbor\r\n
+  s>     Content-Length: 2246\r\n
+  s>     \r\n
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  (redirect target target-bad-tls requires SNI, which is unsupported)
+  sending capabilities command
+  s>     POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
+  s>     content-length: 102\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     user-agent: Mercurial debugwireproto\r\n
+  s>     \r\n
+  s>     \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80
+  s> makefile('rb', None)
+  s>     HTTP/1.1 200 OK\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
+  s>     Transfer-Encoding: chunked\r\n
+  s>     \r\n
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92
+  s>     Hidentity
+  s>     \r\n
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041
+  s>     \xa1FstatusBok
+  s>     \r\n
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     6c4\r\n
+  s>     \xbc\x06\x00\x01\x00\x02\x041
+  s>     \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/
+  s>     \r\n
+  received frame(size=1724; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
+  s>     \r\n
+  s>     0\r\n
+  s>     \r\n
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  response: gen[
+    {
+      b'commands': {
+        b'branchmap': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'capabilities': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'changesetdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'bookmarks',
+                b'parents',
+                b'phase',
+                b'revision'
+              ])
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filedata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'path': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filesdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'firstchangeset',
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'dict'
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 50000
+        },
+        b'heads': {
+          b'args': {
+            b'publiconly': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'known': {
+          b'args': {
+            b'nodes': {
+              b'default': [],
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'listkeys': {
+          b'args': {
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'lookup': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'manifestdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'tree': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 100000
+        },
+        b'pushkey': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'new': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'old': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'push'
+          ]
+        },
+        b'rawstorefiledata': {
+          b'args': {
+            b'files': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        }
+      },
+      b'framingmediatypes': [
+        b'application/mercurial-exp-framing-0006'
+      ],
+      b'pathfilterprefixes': set([
+        b'path:',
+        b'rootfilesin:'
+      ]),
+      b'rawrepoformats': [
+        b'generaldelta',
+        b'revlogv1'
+      ],
+      b'redirect': {
+        b'hashes': [
+          b'sha256',
+          b'sha1'
+        ],
+        b'targets': [
+          {
+            b'name': b'target-bad-tls',
+            b'protocol': b'https',
+            b'snirequired': True,
+            b'uris': [
+              b'https://example.com/'
+            ]
+          }
+        ]
+      }
+    }
+  ]
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > nosni=!
+  > EOF
+
+Unknown tls value is filtered from compatible targets
+
+  $ cat > redirects.py << EOF
+  > [
+  >   {
+  >     b'name': b'target-bad-tls',
+  >     b'protocol': b'https',
+  >     b'uris': [b'https://example.com/'],
+  >     b'tlsversions': [b'42', b'39'],
+  >   },
+  > ]
+  > EOF
+
+  $ sendhttpv2peerhandshake << EOF
+  > command capabilities
+  > EOF
+  creating http peer for wire protocol version 2
+  s>     GET /?cmd=capabilities HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     vary: X-HgProto-1,X-HgUpgrade-1\r\n
+  s>     x-hgproto-1: cbor\r\n
+  s>     x-hgupgrade-1: exp-http-v2-0003\r\n
+  s>     accept: application/mercurial-0.1\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     user-agent: Mercurial debugwireproto\r\n
+  s>     \r\n
+  s> makefile('rb', None)
+  s>     HTTP/1.1 200 OK\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: application/mercurial-cbor\r\n
+  s>     Content-Length: 2252\r\n
+  s>     \r\n
+  s>     \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+  (remote redirect target target-bad-tls requires unsupported TLS versions: 39, 42)
+  sending capabilities command
+  s>     POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     accept: application/mercurial-exp-framing-0006\r\n
+  s>     content-type: application/mercurial-exp-framing-0006\r\n
+  s>     content-length: 102\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     user-agent: Mercurial debugwireproto\r\n
+  s>     \r\n
+  s>     \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80
+  s> makefile('rb', None)
+  s>     HTTP/1.1 200 OK\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: application/mercurial-exp-framing-0006\r\n
+  s>     Transfer-Encoding: chunked\r\n
+  s>     \r\n
+  s>     11\r\n
+  s>     \t\x00\x00\x01\x00\x02\x01\x92
+  s>     Hidentity
+  s>     \r\n
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  s>     13\r\n
+  s>     \x0b\x00\x00\x01\x00\x02\x041
+  s>     \xa1FstatusBok
+  s>     \r\n
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     6ca\r\n
+  s>     \xc2\x06\x00\x01\x00\x02\x041
+  s>     \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/
+  s>     \r\n
+  received frame(size=1730; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  s>     8\r\n
+  s>     \x00\x00\x00\x01\x00\x02\x002
+  s>     \r\n
+  s>     0\r\n
+  s>     \r\n
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  response: gen[
+    {
+      b'commands': {
+        b'branchmap': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'capabilities': {
+          b'args': {},
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'changesetdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'bookmarks',
+                b'parents',
+                b'phase',
+                b'revision'
+              ])
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filedata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'path': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'filesdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'firstchangeset',
+                b'linknode',
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'dict'
+            },
+            b'revisions': {
+              b'required': True,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 50000
+        },
+        b'heads': {
+          b'args': {
+            b'publiconly': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'known': {
+          b'args': {
+            b'nodes': {
+              b'default': [],
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'listkeys': {
+          b'args': {
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'lookup': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        },
+        b'manifestdata': {
+          b'args': {
+            b'fields': {
+              b'default': set([]),
+              b'required': False,
+              b'type': b'set',
+              b'validvalues': set([
+                b'parents',
+                b'revision'
+              ])
+            },
+            b'haveparents': {
+              b'default': False,
+              b'required': False,
+              b'type': b'bool'
+            },
+            b'nodes': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'tree': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ],
+          b'recommendedbatchsize': 100000
+        },
+        b'pushkey': {
+          b'args': {
+            b'key': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'namespace': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'new': {
+              b'required': True,
+              b'type': b'bytes'
+            },
+            b'old': {
+              b'required': True,
+              b'type': b'bytes'
+            }
+          },
+          b'permissions': [
+            b'push'
+          ]
+        },
+        b'rawstorefiledata': {
+          b'args': {
+            b'files': {
+              b'required': True,
+              b'type': b'list'
+            },
+            b'pathfilter': {
+              b'default': None,
+              b'required': False,
+              b'type': b'list'
+            }
+          },
+          b'permissions': [
+            b'pull'
+          ]
+        }
+      },
+      b'framingmediatypes': [
+        b'application/mercurial-exp-framing-0006'
+      ],
+      b'pathfilterprefixes': set([
+        b'path:',
+        b'rootfilesin:'
+      ]),
+      b'rawrepoformats': [
+        b'generaldelta',
+        b'revlogv1'
+      ],
+      b'redirect': {
+        b'hashes': [
+          b'sha256',
+          b'sha1'
+        ],
+        b'targets': [
+          {
+            b'name': b'target-bad-tls',
+            b'protocol': b'https',
+            b'tlsversions': [
+              b'42',
+              b'39'
+            ],
+            b'uris': [
+              b'https://example.com/'
+            ]
+          }
+        ]
+      }
+    }
+  ]
+  (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+Set up the server to issue content redirects to its built-in API server.
+
+  $ cat > redirects.py << EOF
+  > [
+  >   {
+  >     b'name': b'local',
+  >     b'protocol': b'http',
+  >     b'uris': [b'http://example.com/'],
+  >   },
+  > ]
+  > EOF
+
+Request to eventual cache URL should return 404 (validating the cache server works)
+
+  $ sendhttpraw << EOF
+  > httprequest GET api/simplecache/missingkey
+  >     user-agent: test
+  > EOF
+  using raw connection to peer
+  s>     GET /api/simplecache/missingkey HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     user-agent: test\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     \r\n
+  s> makefile('rb', None)
+  s>     HTTP/1.1 404 Not Found\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: text/plain\r\n
+  s>     Content-Length: 22\r\n
+  s>     \r\n
+  s>     key not found in cache
+
+Send a cacheable request
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
+  >     tree eval:b''
+  >     fields eval:[b'parents']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+Cached entry should be available on server
+
+  $ sendhttpraw << EOF
+  > httprequest GET api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c
+  >     user-agent: test
+  > EOF
+  using raw connection to peer
+  s>     GET /api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c HTTP/1.1\r\n
+  s>     Accept-Encoding: identity\r\n
+  s>     user-agent: test\r\n
+  s>     host: $LOCALIP:$HGPORT\r\n (glob)
+  s>     \r\n
+  s> makefile('rb', None)
+  s>     HTTP/1.1 200 OK\r\n
+  s>     Server: testing stub value\r\n
+  s>     Date: $HTTP_DATE$\r\n
+  s>     Content-Type: application/mercurial-cbor\r\n
+  s>     Content-Length: 91\r\n
+  s>     \r\n
+  s>     \xa1Jtotalitems\x01\xa2DnodeT\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&AGparents\x82T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
+  cbor> [
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+2nd request should result in content redirect response
+
+  $ sendhttpv2peer << EOF
+  > command manifestdata
+  >     nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
+  >     tree eval:b''
+  >     fields eval:[b'parents']
+  > EOF
+  creating http peer for wire protocol version 2
+  sending manifestdata command
+  response: gen[
+    {
+      b'totalitems': 1
+    },
+    {
+      b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      b'parents': [
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+        b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+      ]
+    }
+  ]
+
+  $ cat error.log
+  $ killdaemons.py
+
+  $ cat .hg/blackbox.log
+  *> cacher constructed for manifestdata (glob)
+  *> cache miss for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
+  *> storing cache entry for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
+  *> cacher constructed for manifestdata (glob)
+  *> cache hit for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
+  *> sending content redirect for 47abb8efa5f01b8964d74917793ad2464db0fa2c to http://*:$HGPORT/api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-wireproto-exchangev2-shallow.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,568 @@
+#require sqlite
+
+Tests for wire protocol version 2 exchange.
+Tests in this file should be folded into existing tests once protocol
+v2 has enough features that it can be enabled via #testcase in existing
+tests.
+
+  $ . $TESTDIR/wireprotohelpers.sh
+  $ enablehttpv2client
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > sqlitestore =
+  > pullext = $TESTDIR/pullext.py
+  > [storage]
+  > new-repo-backend=sqlite
+  > EOF
+
+Configure a server
+
+  $ hg init server-basic
+  $ enablehttpv2 server-basic
+  $ cd server-basic
+  $ mkdir dir0 dir1
+  $ echo a0 > a
+  $ echo b0 > b
+  $ hg -q commit -A -m 'commit 0'
+  $ echo c0 > dir0/c
+  $ echo d0 > dir0/d
+  $ hg -q commit -A -m 'commit 1'
+  $ echo e0 > dir1/e
+  $ echo f0 > dir1/f
+  $ hg -q commit -A -m 'commit 2'
+  $ echo c1 > dir0/c
+  $ echo e1 > dir1/e
+  $ hg commit -m 'commit 3'
+  $ echo c2 > dir0/c
+  $ echo e2 > dir1/e
+  $ echo f1 > dir1/f
+  $ hg commit -m 'commit 4'
+  $ echo a1 > a
+  $ echo b1 > b
+  $ hg commit -m 'commit 5'
+
+  $ hg log -G -T '{node} {desc}'
+  @  93a8bd067ed2840d9aa810ad598168383a3a2c3a commit 5
+  |
+  o  dc666cf9ecf3d94e6b830f30e5f1272e2a9164d9 commit 4
+  |
+  o  97765fc3cd624fd1fa0176932c21ffd16adf432e commit 3
+  |
+  o  47fe012ab237a8c7fc0c78f9f26d5866eef3f825 commit 2
+  |
+  o  b709380892b193c1091d3a817f706052e346821b commit 1
+  |
+  o  3390ef850073fbc2f0dfff2244342c8e9229013a commit 0
+  
+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ cd ..
+
+Shallow clone pulls down latest revision of every file
+
+  $ hg --debug clone --depth 1 http://localhost:$HGPORT client-shallow-1
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': []
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
+        ],
+        'roots': [],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1170; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset 3390ef850073
+  add changeset b709380892b1
+  add changeset 47fe012ab237
+  add changeset 97765fc3cd62
+  add changeset dc666cf9ecf3
+  add changeset 93a8bd067ed2
+  checking for updated bookmarks
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
+      '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
+      '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3',
+      'H]O\xc2`\xef\\\xb9\xc0p6\x88K\x00k\x11\x0ej\xdby',
+      '\xd9;\xc4\x0b\x0e*GMp\xee\xf7}^\x91/f\x7fSd\x83'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1515; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'linknode',
+      'parents',
+      'revision'
+    ]),
+    'haveparents': False,
+    'revisions': [
+      {
+        'nodes': [
+          '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1005; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  new changesets 3390ef850073:93a8bd067ed2
+  updating to branch default
+  resolving manifests
+   branchmerge: False, force: False, partial: False
+   ancestor: 000000000000, local: 000000000000+, remote: 93a8bd067ed2
+   a: remote created -> g
+  getting a
+   b: remote created -> g
+  getting b
+   dir0/c: remote created -> g
+  getting dir0/c
+   dir0/d: remote created -> g
+  getting dir0/d
+   dir1/e: remote created -> g
+  getting dir1/e
+   dir1/f: remote created -> g
+  getting dir1/f
+  6 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+  $ sqlite3 -line client-shallow-1/.hg/store/db.sqlite << EOF
+  > SELECT id, path, revnum, node, p1rev, p2rev, linkrev, flags FROM filedata ORDER BY id ASC;
+  > EOF
+       id = 1
+     path = a
+   revnum = 0
+     node = \x9a8\x12)\x97\xb3\xac\x97\xbe*\x9a\xa2\xe5V\x83\x83A\xfd\xf2\xcc (esc)
+    p1rev = -1
+    p2rev = -1
+  linkrev = 5
+    flags = 2
+  
+       id = 2
+     path = b
+   revnum = 0
+     node = \xb1zk\xd3g=\x9a\xb8\xce\xd5\x81\xa2	\xf6/=\xa5\xccEx (esc)
+    p1rev = -1
+    p2rev = -1
+  linkrev = 5
+    flags = 2
+  
+       id = 3
+     path = dir0/c
+   revnum = 0
+     node = I\x1d\xa1\xbb\x89\xeax\xc0\xc0\xa2s[\x16\xce}\x93\x1d\xc8\xe2\r (esc)
+    p1rev = -1
+    p2rev = -1
+  linkrev = 4
+    flags = 2
+  
+       id = 4
+     path = dir0/d
+   revnum = 0
+     node = S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4& (esc)
+    p1rev = -1
+    p2rev = -1
+  linkrev = 1
+    flags = 0
+  
+       id = 5
+     path = dir1/e
+   revnum = 0
+     node = ]\xf3\xac\xd8\xd0\xc7\xfaP\x98\xd0'\x9a\x044\xc3\x02\x9e+x\xe1 (esc)
+    p1rev = -1
+    p2rev = -1
+  linkrev = 4
+    flags = 2
+  
+       id = 6
+     path = dir1/f
+   revnum = 0
+     node = (\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e (esc)
+    p1rev = -1
+    p2rev = -1
+  linkrev = 4
+    flags = 2
+
+Test a shallow clone with only some files
+
+  $ hg --debug clone --depth 1 --include dir0/ http://localhost:$HGPORT client-shallow-narrow-1
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': []
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
+        ],
+        'roots': [],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1170; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset 3390ef850073
+  add changeset b709380892b1
+  add changeset 47fe012ab237
+  add changeset 97765fc3cd62
+  add changeset dc666cf9ecf3
+  add changeset 93a8bd067ed2
+  checking for updated bookmarks
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
+      '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
+      '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3',
+      'H]O\xc2`\xef\\\xb9\xc0p6\x88K\x00k\x11\x0ej\xdby',
+      '\xd9;\xc4\x0b\x0e*GMp\xee\xf7}^\x91/f\x7fSd\x83'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1515; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'linknode',
+      'parents',
+      'revision'
+    ]),
+    'haveparents': False,
+    'pathfilter': {
+      'include': [
+        'path:dir0'
+      ]
+    },
+    'revisions': [
+      {
+        'nodes': [
+          '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=355; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  new changesets 3390ef850073:93a8bd067ed2
+  updating to branch default
+  resolving manifests
+   branchmerge: False, force: False, partial: False
+   ancestor: 000000000000, local: 000000000000+, remote: 93a8bd067ed2
+   dir0/c: remote created -> g
+  getting dir0/c
+   dir0/d: remote created -> g
+  getting dir0/d
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+  $ sqlite3 -line client-shallow-narrow-1/.hg/store/db.sqlite << EOF
+  > SELECT id, path, revnum, node, p1rev, p2rev, linkrev, flags FROM filedata ORDER BY id ASC;
+  > EOF
+       id = 1
+     path = dir0/c
+   revnum = 0
+     node = I\x1d\xa1\xbb\x89\xeax\xc0\xc0\xa2s[\x16\xce}\x93\x1d\xc8\xe2\r (esc)
+    p1rev = -1
+    p2rev = -1
+  linkrev = 4
+    flags = 2
+  
+       id = 2
+     path = dir0/d
+   revnum = 0
+     node = S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4& (esc)
+    p1rev = -1
+    p2rev = -1
+  linkrev = 1
+    flags = 0
+
+Cloning an old revision with depth=1 works
+
+  $ hg --debug clone --depth 1 -r 97765fc3cd624fd1fa0176932c21ffd16adf432e http://localhost:$HGPORT client-shallow-2
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  sending 1 commands
+  sending command lookup: {
+    'key': '97765fc3cd624fd1fa0176932c21ffd16adf432e'
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=21; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': []
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'roots': [],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset 3390ef850073
+  add changeset b709380892b1
+  add changeset 47fe012ab237
+  add changeset 97765fc3cd62
+  checking for updated bookmarks
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
+      '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
+      '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'linknode',
+      'parents',
+      'revision'
+    ]),
+    'haveparents': False,
+    'revisions': [
+      {
+        'nodes': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1005; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  new changesets 3390ef850073:97765fc3cd62
+  updating to branch default
+  resolving manifests
+   branchmerge: False, force: False, partial: False
+   ancestor: 000000000000, local: 000000000000+, remote: 97765fc3cd62
+   a: remote created -> g
+  getting a
+   b: remote created -> g
+  getting b
+   dir0/c: remote created -> g
+  getting dir0/c
+   dir0/d: remote created -> g
+  getting dir0/d
+   dir1/e: remote created -> g
+  getting dir1/e
+   dir1/f: remote created -> g
+  getting dir1/f
+  6 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (sent 6 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+Incremental pull of shallow clone fetches new changesets
+
+  $ hg --cwd client-shallow-2 --debug pull http://localhost:$HGPORT
+  pulling from http://localhost:$HGPORT/
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': [
+      '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  searching for changes
+  all local heads known remotely
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
+        ],
+        'roots': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=400; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset dc666cf9ecf3
+  add changeset 93a8bd067ed2
+  checking for updated bookmarks
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      'H]O\xc2`\xef\\\xb9\xc0p6\x88K\x00k\x11\x0ej\xdby',
+      '\xd9;\xc4\x0b\x0e*GMp\xee\xf7}^\x91/f\x7fSd\x83'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=561; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'linknode',
+      'parents',
+      'revision'
+    ]),
+    'haveparents': False,
+    'revisions': [
+      {
+        'nodes': [
+          '\xdcfl\xf9\xec\xf3\xd9Nk\x83\x0f0\xe5\xf1\'.*\x91d\xd9',
+          '\x93\xa8\xbd\x06~\xd2\x84\r\x9a\xa8\x10\xadY\x81h8::,:'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1373; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  new changesets dc666cf9ecf3:93a8bd067ed2
+  (run 'hg update' to get a working copy)
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+  $ hg --cwd client-shallow-2 up tip
+  merging dir0/c
+  merging dir1/e
+  3 files updated, 2 files merged, 0 files removed, 0 files unresolved
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-wireproto-exchangev2.t	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,1238 @@
+Tests for wire protocol version 2 exchange.
+Tests in this file should be folded into existing tests once protocol
+v2 has enough features that it can be enabled via #testcase in existing
+tests.
+
+  $ . $TESTDIR/wireprotohelpers.sh
+  $ enablehttpv2client
+
+  $ hg init server-simple
+  $ enablehttpv2 server-simple
+  $ cd server-simple
+  $ cat >> .hg/hgrc << EOF
+  > [phases]
+  > publish = false
+  > EOF
+  $ echo a0 > a
+  $ echo b0 > b
+  $ hg -q commit -A -m 'commit 0'
+
+  $ echo a1 > a
+  $ hg commit -m 'commit 1'
+  $ hg phase --public -r .
+  $ echo a2 > a
+  $ hg commit -m 'commit 2'
+
+  $ hg -q up -r 0
+  $ echo b1 > b
+  $ hg -q commit -m 'head 2 commit 1'
+  $ echo b2 > b
+  $ hg -q commit -m 'head 2 commit 2'
+
+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ cd ..
+
+Test basic clone
+
+  $ hg --debug clone -U http://localhost:$HGPORT client-simple
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': []
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
+          '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
+        ],
+        'roots': [],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=941; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset 3390ef850073
+  add changeset 4432d83626e8
+  add changeset cd2534766bec
+  add changeset e96ae20f4188
+  add changeset caa2a465451d
+  checking for updated bookmarks
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      '\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8',
+      '\xec\x80NH\x8c \x88\xc25\t\x9a\x10 u\x13\xbe\xcd\xc3\xdd\xa5',
+      '\x04\\\x7f9\'\xda\x13\xe7Z\xf8\xf0\xe4\xf0HI\xe4a\xa9x\x0f',
+      '7\x9c\xb0\xc2\xe6d\\y\xdd\xc5\x9a\x1dG\'\xa9\xfb\x83\n\xeb&'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=992; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'revisions': [
+      {
+        'nodes': [
+          '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+          'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0',
+          '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f',
+          '\xe9j\xe2\x0fA\x88H{\x9a\xe4\xef9A\xc2|\x81\x141F\xe5',
+          '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=901; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  new changesets 3390ef850073:caa2a465451d (3 drafts)
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+All changesets should have been transferred
+
+  $ hg -R client-simple debugindex -c
+     rev linkrev nodeid       p1           p2
+       0       0 3390ef850073 000000000000 000000000000
+       1       1 4432d83626e8 3390ef850073 000000000000
+       2       2 cd2534766bec 4432d83626e8 000000000000
+       3       3 e96ae20f4188 3390ef850073 000000000000
+       4       4 caa2a465451d e96ae20f4188 000000000000
+
+  $ hg -R client-simple log -G -T '{rev} {node} {phase}\n'
+  o  4 caa2a465451dd1facda0f5b12312c355584188a1 draft
+  |
+  o  3 e96ae20f4188487b9ae4ef3941c27c81143146e5 draft
+  |
+  | o  2 cd2534766bece138c7c1afdc6825302f0f62d81f draft
+  | |
+  | o  1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public
+  |/
+  o  0 3390ef850073fbc2f0dfff2244342c8e9229013a public
+  
+
+All manifests should have been transferred
+
+  $ hg -R client-simple debugindex -m
+     rev linkrev nodeid       p1           p2
+       0       0 992f4779029a 000000000000 000000000000
+       1       1 a988fb43583e 992f4779029a 000000000000
+       2       2 ec804e488c20 a988fb43583e 000000000000
+       3       3 045c7f3927da 992f4779029a 000000000000
+       4       4 379cb0c2e664 045c7f3927da 000000000000
+
+Cloning only a specific revision works
+
+  $ hg --debug clone -U -r 4432d83626e8 http://localhost:$HGPORT client-singlehead
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  sending 1 commands
+  sending command lookup: {
+    'key': '4432d83626e8'
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=21; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': []
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0'
+        ],
+        'roots': [],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=381; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset 3390ef850073
+  add changeset 4432d83626e8
+  checking for updated bookmarks
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      '\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=404; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'revisions': [
+      {
+        'nodes': [
+          '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+          'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=439; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  new changesets 3390ef850073:4432d83626e8
+  (sent 6 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+  $ cd client-singlehead
+
+  $ hg log -G -T '{rev} {node} {phase}\n'
+  o  1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public
+  |
+  o  0 3390ef850073fbc2f0dfff2244342c8e9229013a public
+  
+
+  $ hg debugindex -m
+     rev linkrev nodeid       p1           p2
+       0       0 992f4779029a 000000000000 000000000000
+       1       1 a988fb43583e 992f4779029a 000000000000
+
+Incremental pull works
+
+  $ hg --debug pull
+  pulling from http://localhost:$HGPORT/
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': [
+      'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0'
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  searching for changes
+  all local heads known remotely
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
+          '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
+        ],
+        'roots': [
+          'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0'
+        ],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=573; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset cd2534766bec
+  add changeset e96ae20f4188
+  add changeset caa2a465451d
+  checking for updated bookmarks
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      '\xec\x80NH\x8c \x88\xc25\t\x9a\x10 u\x13\xbe\xcd\xc3\xdd\xa5',
+      '\x04\\\x7f9\'\xda\x13\xe7Z\xf8\xf0\xe4\xf0HI\xe4a\xa9x\x0f',
+      '7\x9c\xb0\xc2\xe6d\\y\xdd\xc5\x9a\x1dG\'\xa9\xfb\x83\n\xeb&'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=601; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'revisions': [
+      {
+        'nodes': [
+          '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f',
+          '\xe9j\xe2\x0fA\x88H{\x9a\xe4\xef9A\xc2|\x81\x141F\xe5',
+          '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=527; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  new changesets cd2534766bec:caa2a465451d (3 drafts)
+  (run 'hg update' to get a working copy)
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+  $ hg log -G -T '{rev} {node} {phase}\n'
+  o  4 caa2a465451dd1facda0f5b12312c355584188a1 draft
+  |
+  o  3 e96ae20f4188487b9ae4ef3941c27c81143146e5 draft
+  |
+  | o  2 cd2534766bece138c7c1afdc6825302f0f62d81f draft
+  | |
+  | o  1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public
+  |/
+  o  0 3390ef850073fbc2f0dfff2244342c8e9229013a public
+  
+
+  $ hg debugindex -m
+     rev linkrev nodeid       p1           p2
+       0       0 992f4779029a 000000000000 000000000000
+       1       1 a988fb43583e 992f4779029a 000000000000
+       2       2 ec804e488c20 a988fb43583e 000000000000
+       3       3 045c7f3927da 992f4779029a 000000000000
+       4       4 379cb0c2e664 045c7f3927da 000000000000
+
+Phase-only update works
+TODO this doesn't work
+
+  $ hg -R ../server-simple phase --public -r caa2a465451dd
+  $ hg --debug pull
+  pulling from http://localhost:$HGPORT/
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': [
+      '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f',
+      '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1'
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=3; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  searching for changes
+  all remote heads known locally
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
+          '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
+        ],
+        'roots': [
+          '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
+          '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
+        ],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=13; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  checking for updated bookmarks
+  (run 'hg update' to get a working copy)
+  (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+  $ hg log -G -T '{rev} {node} {phase}\n'
+  o  4 caa2a465451dd1facda0f5b12312c355584188a1 draft
+  |
+  o  3 e96ae20f4188487b9ae4ef3941c27c81143146e5 draft
+  |
+  | o  2 cd2534766bece138c7c1afdc6825302f0f62d81f draft
+  | |
+  | o  1 4432d83626e8a98655f062ec1f2a43b07f7fbbb0 public
+  |/
+  o  0 3390ef850073fbc2f0dfff2244342c8e9229013a public
+  
+
+  $ cd ..
+
+Bookmarks are transferred on clone
+
+  $ hg -R server-simple bookmark -r 3390ef850073fbc2f0dfff2244342c8e9229013a book-1
+  $ hg -R server-simple bookmark -r cd2534766bece138c7c1afdc6825302f0f62d81f book-2
+
+  $ hg --debug clone -U http://localhost:$HGPORT/ client-bookmarks
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': []
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
+          '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
+        ],
+        'roots': [],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=979; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset 3390ef850073
+  add changeset 4432d83626e8
+  add changeset cd2534766bec
+  add changeset e96ae20f4188
+  add changeset caa2a465451d
+  checking for updated bookmarks
+  adding remote bookmark book-1
+  adding remote bookmark book-2
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      '\xa9\x88\xfbCX>\x87\x1d\x1e\xd5u\x0e\xe0t\xc6\xd8@\xbb\xbf\xc8',
+      '\xec\x80NH\x8c \x88\xc25\t\x9a\x10 u\x13\xbe\xcd\xc3\xdd\xa5',
+      '\x04\\\x7f9\'\xda\x13\xe7Z\xf8\xf0\xe4\xf0HI\xe4a\xa9x\x0f',
+      '7\x9c\xb0\xc2\xe6d\\y\xdd\xc5\x9a\x1dG\'\xa9\xfb\x83\n\xeb&'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=992; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'revisions': [
+      {
+        'nodes': [
+          '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+          'D2\xd86&\xe8\xa9\x86U\xf0b\xec\x1f*C\xb0\x7f\x7f\xbb\xb0',
+          '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f',
+          '\xe9j\xe2\x0fA\x88H{\x9a\xe4\xef9A\xc2|\x81\x141F\xe5',
+          '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=901; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  new changesets 3390ef850073:caa2a465451d (1 drafts)
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+  $ hg -R client-bookmarks bookmarks
+     book-1                    0:3390ef850073
+     book-2                    2:cd2534766bec
+
+Server-side bookmark moves are reflected during `hg pull`
+
+  $ hg -R server-simple bookmark -r cd2534766bece138c7c1afdc6825302f0f62d81f book-1
+  moving bookmark 'book-1' forward from 3390ef850073
+
+  $ hg -R client-bookmarks --debug pull
+  pulling from http://localhost:$HGPORT/
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': [
+      '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f',
+      '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1'
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=43; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=3; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  searching for changes
+  all remote heads known locally
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
+          '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
+        ],
+        'roots': [
+          '\xca\xa2\xa4eE\x1d\xd1\xfa\xcd\xa0\xf5\xb1#\x12\xc3UXA\x88\xa1',
+          '\xcd%4vk\xec\xe18\xc7\xc1\xaf\xdch%0/\x0fb\xd8\x1f'
+        ],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=65; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  checking for updated bookmarks
+  updating bookmark book-1
+  (run 'hg update' to get a working copy)
+  (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+  $ hg -R client-bookmarks bookmarks
+     book-1                    2:cd2534766bec
+     book-2                    2:cd2534766bec
+
+  $ killdaemons.py
+
+Let's set up a slightly more complicated server
+
+  $ hg init server-2
+  $ enablehttpv2 server-2
+  $ cd server-2
+  $ mkdir dir0 dir1
+  $ echo a0 > a
+  $ echo b0 > b
+  $ hg -q commit -A -m 'commit 0'
+  $ echo c0 > dir0/c
+  $ echo d0 > dir0/d
+  $ hg -q commit -A -m 'commit 1'
+  $ echo e0 > dir1/e
+  $ echo f0 > dir1/f
+  $ hg -q commit -A -m 'commit 2'
+  $ echo c1 > dir0/c
+  $ echo e1 > dir1/e
+  $ hg commit -m 'commit 3'
+  $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ cd ..
+
+Narrow clone only fetches some files
+
+  $ hg --config extensions.pullext=$TESTDIR/pullext.py --debug clone -U --include dir0/ http://localhost:$HGPORT/ client-narrow-0
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': []
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'roots': [],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset 3390ef850073
+  add changeset b709380892b1
+  add changeset 47fe012ab237
+  add changeset 97765fc3cd62
+  checking for updated bookmarks
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
+      '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
+      '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'pathfilter': {
+      'include': [
+        'path:dir0'
+      ]
+    },
+    'revisions': [
+      {
+        'nodes': [
+          '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+          '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b',
+          'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%',
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=449; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  new changesets 3390ef850073:97765fc3cd62
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+#if reporevlogstore
+  $ find client-narrow-0/.hg/store -type f -name '*.i' | sort
+  client-narrow-0/.hg/store/00changelog.i
+  client-narrow-0/.hg/store/00manifest.i
+  client-narrow-0/.hg/store/data/dir0/c.i
+  client-narrow-0/.hg/store/data/dir0/d.i
+#endif
+
+--exclude by itself works
+
+  $ hg --config extensions.pullext=$TESTDIR/pullext.py --debug clone -U --exclude dir0/ http://localhost:$HGPORT/ client-narrow-1
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': []
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'roots': [],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset 3390ef850073
+  add changeset b709380892b1
+  add changeset 47fe012ab237
+  add changeset 97765fc3cd62
+  checking for updated bookmarks
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
+      '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
+      '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'pathfilter': {
+      'exclude': [
+        'path:dir0'
+      ],
+      'include': [
+        'path:.'
+      ]
+    },
+    'revisions': [
+      {
+        'nodes': [
+          '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+          '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b',
+          'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%',
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=709; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  new changesets 3390ef850073:97765fc3cd62
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+#if reporevlogstore
+  $ find client-narrow-1/.hg/store -type f -name '*.i' | sort
+  client-narrow-1/.hg/store/00changelog.i
+  client-narrow-1/.hg/store/00manifest.i
+  client-narrow-1/.hg/store/data/a.i
+  client-narrow-1/.hg/store/data/b.i
+  client-narrow-1/.hg/store/data/dir1/e.i
+  client-narrow-1/.hg/store/data/dir1/f.i
+#endif
+
+Mixing --include and --exclude works
+
+  $ hg --config extensions.pullext=$TESTDIR/pullext.py --debug clone -U --include dir0/ --exclude dir0/c http://localhost:$HGPORT/ client-narrow-2
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': []
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'roots': [],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset 3390ef850073
+  add changeset b709380892b1
+  add changeset 47fe012ab237
+  add changeset 97765fc3cd62
+  checking for updated bookmarks
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
+      '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
+      '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'pathfilter': {
+      'exclude': [
+        'path:dir0/c'
+      ],
+      'include': [
+        'path:dir0'
+      ]
+    },
+    'revisions': [
+      {
+        'nodes': [
+          '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+          '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b',
+          'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%',
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=160; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  new changesets 3390ef850073:97765fc3cd62
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+#if reporevlogstore
+  $ find client-narrow-2/.hg/store -type f -name '*.i' | sort
+  client-narrow-2/.hg/store/00changelog.i
+  client-narrow-2/.hg/store/00manifest.i
+  client-narrow-2/.hg/store/data/dir0/d.i
+#endif
+
+--stream will use rawfiledata to transfer changelog and manifestlog, then
+fall through to get files data
+
+  $ hg --debug clone --stream -U http://localhost:$HGPORT client-stream-0
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  sending 1 commands
+  sending command rawstorefiledata: {
+    'files': [
+      'changelog',
+      'manifestlog'
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1275; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': [
+      '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  searching for changes
+  all remote heads known locally
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'roots': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=13; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  checking for updated bookmarks
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'revisions': [
+      {
+        'nodes': [
+          '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+          '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b',
+          'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%',
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1133; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+--stream + --include/--exclude will only obtain some files
+
+  $ hg --debug --config extensions.pullext=$TESTDIR/pullext.py clone --stream --include dir0/ -U http://localhost:$HGPORT client-stream-2
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  sending 1 commands
+  sending command rawstorefiledata: {
+    'files': [
+      'changelog',
+      'manifestlog'
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1275; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  updating the branch cache
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': [
+      '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=2; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  searching for changes
+  all remote heads known locally
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'roots': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=13; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  checking for updated bookmarks
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'pathfilter': {
+      'include': [
+        'path:dir0'
+      ]
+    },
+    'revisions': [
+      {
+        'nodes': [
+          '3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
+          '\xb7\t8\x08\x92\xb1\x93\xc1\t\x1d:\x81\x7fp`R\xe3F\x82\x1b',
+          'G\xfe\x01*\xb27\xa8\xc7\xfc\x0cx\xf9\xf2mXf\xee\xf3\xf8%',
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=449; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+#if reporevlogstore
+  $ find client-stream-2/.hg/store -type f -name '*.i' | sort
+  client-stream-2/.hg/store/00changelog.i
+  client-stream-2/.hg/store/00manifest.i
+  client-stream-2/.hg/store/data/dir0/c.i
+  client-stream-2/.hg/store/data/dir0/d.i
+#endif
+
+Shallow clone doesn't work with revlogs
+
+  $ hg --debug --config extensions.pullext=$TESTDIR/pullext.py clone --depth 1 -U http://localhost:$HGPORT client-shallow-revlogs
+  using http://localhost:$HGPORT/
+  sending capabilities command
+  query 1; heads
+  sending 2 commands
+  sending command heads: {}
+  sending command known: {
+    'nodes': []
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=22; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  received frame(size=11; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1; request=3; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=3; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command changesetdata: {
+    'fields': set([
+      'bookmarks',
+      'parents',
+      'phase',
+      'revision'
+    ]),
+    'revisions': [
+      {
+        'heads': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'roots': [],
+        'type': 'changesetdagrange'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=783; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  add changeset 3390ef850073
+  add changeset b709380892b1
+  add changeset 47fe012ab237
+  add changeset 97765fc3cd62
+  checking for updated bookmarks
+  sending 1 commands
+  sending command manifestdata: {
+    'fields': set([
+      'parents',
+      'revision'
+    ]),
+    'haveparents': True,
+    'nodes': [
+      '\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
+      '|2 \x1a\xa3\xa1R\xa9\xe6\xa9"+?\xa8\xd0\xe3\x0f\xc2V\xe8',
+      '\x8d\xd0W<\x7f\xaf\xe2\x04F\xcc\xea\xac\x05N\xea\xa4x\x91M\xdb',
+      '113\x85\xf2!\x8b\x08^\xb2Z\x821\x1e*\xdd\x0e\xeb\x8c3'
+    ],
+    'tree': ''
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=967; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  sending 1 commands
+  sending command filesdata: {
+    'fields': set([
+      'linknode',
+      'parents',
+      'revision'
+    ]),
+    'haveparents': False,
+    'revisions': [
+      {
+        'nodes': [
+          '\x97v_\xc3\xcdbO\xd1\xfa\x01v\x93,!\xff\xd1j\xdfC.'
+        ],
+        'type': 'changesetexplicit'
+      }
+    ]
+  }
+  received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+  received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=1005; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+  received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+  transaction abort!
+  rollback completed
+  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
+  abort: revlog storage does not support missing parents write mode
+  [255]
--- a/tests/test-wireproto-framing.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-wireproto-framing.py	Mon Oct 22 14:46:06 2018 -0400
@@ -44,9 +44,6 @@
         self.assertEqual(ffs(b"1 1 0 1 0 cbor:b'foo'"),
                          b'\x04\x00\x00\x01\x00\x01\x00\x10Cfoo')
 
-        self.assertEqual(ffs(b"1 1 0 1 0 cbor:u'foo'"),
-                         b'\x04\x00\x00\x01\x00\x01\x00\x10cfoo')
-
     def testcborlists(self):
         self.assertEqual(ffs(b"1 1 0 1 0 cbor:[None, True, False, 42, b'foo']"),
                          b'\n\x00\x00\x01\x00\x01\x00\x10\x85\xf6\xf5\xf4'
--- a/tests/test-wireproto-serverreactor.py	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/test-wireproto-serverreactor.py	Mon Oct 22 14:46:06 2018 -0400
@@ -6,16 +6,21 @@
     cbor,
 )
 from mercurial import (
+    ui as uimod,
     util,
     wireprotoframing as framing,
 )
+from mercurial.utils import (
+    cborutil,
+)
 
 ffs = framing.makeframefromhumanstring
 
 OK = cbor.dumps({b'status': b'ok'})
 
 def makereactor(deferoutput=False):
-    return framing.serverreactor(deferoutput=deferoutput)
+    ui = uimod.ui()
+    return framing.serverreactor(ui, deferoutput=deferoutput)
 
 def sendframes(reactor, gen):
     """Send a generator of frame bytearray to a reactor.
@@ -69,6 +74,7 @@
             b'requestid': 1,
             b'command': b'mycommand',
             b'args': {},
+            b'redirect': None,
             b'data': None,
         })
 
@@ -86,6 +92,7 @@
             b'requestid': 41,
             b'command': b'mycommand',
             b'args': {b'foo': b'bar'},
+            b'redirect': None,
             b'data': None,
         })
 
@@ -100,6 +107,7 @@
             b'requestid': 1,
             b'command': b'mycommand',
             b'args': {b'foo': b'bar', b'biz': b'baz'},
+            b'redirect': None,
             b'data': None,
         })
 
@@ -115,6 +123,7 @@
             b'requestid': 1,
             b'command': b'mycommand',
             b'args': {},
+            b'redirect': None,
             b'data': b'data!',
         })
 
@@ -137,6 +146,7 @@
             b'requestid': 1,
             b'command': b'mycommand',
             b'args': {},
+            b'redirect': None,
             b'data': b'data1data2data3',
         })
 
@@ -160,6 +170,7 @@
                 b'key': b'val',
                 b'foo': b'bar',
             },
+            b'redirect': None,
             b'data': b'value1value2',
         })
 
@@ -187,7 +198,8 @@
             ffs(b'1 1 stream-begin command-data 0 ignored'))
         self.assertaction(result, b'error')
         self.assertEqual(result[1], {
-            b'message': b'expected command request frame; got 2',
+            b'message': b'expected sender protocol settings or command request '
+                        b'frame; got 2',
         })
 
     def testunexpectedcommanddatareceiving(self):
@@ -213,19 +225,22 @@
         results.append(self._sendsingleframe(
             reactor, ffs(b'1 1 stream-begin command-request new '
                          b"cbor:{b'name': b'command'}")))
-        result = reactor.oncommandresponseready(outstream, 1, b'response1')
+        result = reactor.oncommandresponsereadyobjects(
+            outstream, 1, [b'response1'])
         self.assertaction(result, b'sendframes')
         list(result[1][b'framegen'])
         results.append(self._sendsingleframe(
             reactor, ffs(b'1 1 stream-begin command-request new '
                          b"cbor:{b'name': b'command'}")))
-        result = reactor.oncommandresponseready(outstream, 1, b'response2')
+        result = reactor.oncommandresponsereadyobjects(
+            outstream, 1, [b'response2'])
         self.assertaction(result, b'sendframes')
         list(result[1][b'framegen'])
         results.append(self._sendsingleframe(
             reactor, ffs(b'1 1 stream-begin command-request new '
                          b"cbor:{b'name': b'command'}")))
-        result = reactor.oncommandresponseready(outstream, 1, b'response3')
+        result = reactor.oncommandresponsereadyobjects(
+            outstream, 1, [b'response3'])
         self.assertaction(result, b'sendframes')
         list(result[1][b'framegen'])
 
@@ -235,6 +250,7 @@
                 b'requestid': 1,
                 b'command': b'command',
                 b'args': {},
+                b'redirect': None,
                 b'data': None,
             })
 
@@ -291,12 +307,14 @@
             b'requestid': 3,
             b'command': b'command3',
             b'args': {b'biz': b'baz', b'key': b'val'},
+            b'redirect': None,
             b'data': None,
         })
         self.assertEqual(results[5][1], {
             b'requestid': 1,
             b'command': b'command1',
             b'args': {b'foo': b'bar', b'key1': b'val'},
+            b'redirect': None,
             b'data': None,
         })
 
@@ -349,10 +367,14 @@
         list(sendcommandframes(reactor, instream, 1, b'mycommand', {}))
 
         outstream = reactor.makeoutputstream()
-        result = reactor.oncommandresponseready(outstream, 1, b'response')
+        result = reactor.oncommandresponsereadyobjects(
+            outstream, 1, [b'response'])
         self.assertaction(result, b'sendframes')
         self.assertframesequal(result[1][b'framegen'], [
-            b'1 2 stream-begin command-response eos %sresponse' % OK,
+            b'1 2 stream-begin stream-settings eos cbor:b"identity"',
+            b'1 2 encoded command-response continuation %s' % OK,
+            b'1 2 encoded command-response continuation cbor:b"response"',
+            b'1 2 0 command-response eos ',
         ])
 
     def testmultiframeresponse(self):
@@ -365,12 +387,16 @@
         list(sendcommandframes(reactor, instream, 1, b'mycommand', {}))
 
         outstream = reactor.makeoutputstream()
-        result = reactor.oncommandresponseready(outstream, 1, first + second)
+        result = reactor.oncommandresponsereadyobjects(
+            outstream, 1, [first + second])
         self.assertaction(result, b'sendframes')
         self.assertframesequal(result[1][b'framegen'], [
-            b'1 2 stream-begin command-response continuation %s' % OK,
-            b'1 2 0 command-response continuation %s' % first,
-            b'1 2 0 command-response eos %s' % second,
+            b'1 2 stream-begin stream-settings eos cbor:b"identity"',
+            b'1 2 encoded command-response continuation %s' % OK,
+            b'1 2 encoded command-response continuation Y\x80d',
+            b'1 2 encoded command-response continuation %s' % first,
+            b'1 2 encoded command-response continuation %s' % second,
+            b'1 2 0 command-response eos '
         ])
 
     def testservererror(self):
@@ -397,12 +423,16 @@
         self.assertaction(results[0], b'runcommand')
 
         outstream = reactor.makeoutputstream()
-        result = reactor.oncommandresponseready(outstream, 1, b'response')
+        result = reactor.oncommandresponsereadyobjects(
+            outstream, 1, [b'response'])
         self.assertaction(result, b'noop')
         result = reactor.oninputeof()
         self.assertaction(result, b'sendframes')
         self.assertframesequal(result[1][b'framegen'], [
-            b'1 2 stream-begin command-response eos %sresponse' % OK,
+            b'1 2 stream-begin stream-settings eos cbor:b"identity"',
+            b'1 2 encoded command-response continuation %s' % OK,
+            b'1 2 encoded command-response continuation cbor:b"response"',
+            b'1 2 0 command-response eos ',
         ])
 
     def testmultiplecommanddeferresponse(self):
@@ -412,15 +442,22 @@
         list(sendcommandframes(reactor, instream, 3, b'command2', {}))
 
         outstream = reactor.makeoutputstream()
-        result = reactor.oncommandresponseready(outstream, 1, b'response1')
+        result = reactor.oncommandresponsereadyobjects(
+            outstream, 1, [b'response1'])
         self.assertaction(result, b'noop')
-        result = reactor.oncommandresponseready(outstream, 3, b'response2')
+        result = reactor.oncommandresponsereadyobjects(
+            outstream, 3, [b'response2'])
         self.assertaction(result, b'noop')
         result = reactor.oninputeof()
         self.assertaction(result, b'sendframes')
         self.assertframesequal(result[1][b'framegen'], [
-            b'1 2 stream-begin command-response eos %sresponse1' % OK,
-            b'3 2 0 command-response eos %sresponse2' % OK,
+            b'1 2 stream-begin stream-settings eos cbor:b"identity"',
+            b'1 2 encoded command-response continuation %s' % OK,
+            b'1 2 encoded command-response continuation cbor:b"response1"',
+            b'1 2 0 command-response eos ',
+            b'3 2 encoded command-response continuation %s' % OK,
+            b'3 2 encoded command-response continuation cbor:b"response2"',
+            b'3 2 0 command-response eos ',
         ])
 
     def testrequestidtracking(self):
@@ -432,16 +469,23 @@
 
         # Register results for commands out of order.
         outstream = reactor.makeoutputstream()
-        reactor.oncommandresponseready(outstream, 3, b'response3')
-        reactor.oncommandresponseready(outstream, 1, b'response1')
-        reactor.oncommandresponseready(outstream, 5, b'response5')
+        reactor.oncommandresponsereadyobjects(outstream, 3, [b'response3'])
+        reactor.oncommandresponsereadyobjects(outstream, 1, [b'response1'])
+        reactor.oncommandresponsereadyobjects(outstream, 5, [b'response5'])
 
         result = reactor.oninputeof()
         self.assertaction(result, b'sendframes')
         self.assertframesequal(result[1][b'framegen'], [
-            b'3 2 stream-begin command-response eos %sresponse3' % OK,
-            b'1 2 0 command-response eos %sresponse1' % OK,
-            b'5 2 0 command-response eos %sresponse5' % OK,
+            b'3 2 stream-begin stream-settings eos cbor:b"identity"',
+            b'3 2 encoded command-response continuation %s' % OK,
+            b'3 2 encoded command-response continuation cbor:b"response3"',
+            b'3 2 0 command-response eos ',
+            b'1 2 encoded command-response continuation %s' % OK,
+            b'1 2 encoded command-response continuation cbor:b"response1"',
+            b'1 2 0 command-response eos ',
+            b'5 2 encoded command-response continuation %s' % OK,
+            b'5 2 encoded command-response continuation cbor:b"response5"',
+            b'5 2 0 command-response eos ',
         ])
 
     def testduplicaterequestonactivecommand(self):
@@ -462,7 +506,7 @@
         instream = framing.stream(1)
         list(sendcommandframes(reactor, instream, 1, b'command1', {}))
         outstream = reactor.makeoutputstream()
-        reactor.oncommandresponseready(outstream, 1, b'response')
+        reactor.oncommandresponsereadyobjects(outstream, 1, [b'response'])
 
         # We've registered the response but haven't sent it. From the
         # perspective of the reactor, the command is still active.
@@ -479,12 +523,111 @@
         instream = framing.stream(1)
         list(sendcommandframes(reactor, instream, 1, b'command1', {}))
         outstream = reactor.makeoutputstream()
-        res = reactor.oncommandresponseready(outstream, 1, b'response')
+        res = reactor.oncommandresponsereadyobjects(outstream, 1, [b'response'])
         list(res[1][b'framegen'])
 
         results = list(sendcommandframes(reactor, instream, 1, b'command1', {}))
         self.assertaction(results[0], b'runcommand')
 
+    def testprotocolsettingsnoflags(self):
+        result = self._sendsingleframe(
+            makereactor(),
+            ffs(b'0 1 stream-begin sender-protocol-settings 0 '))
+        self.assertaction(result, b'error')
+        self.assertEqual(result[1], {
+            b'message': b'sender protocol settings frame must have '
+                        b'continuation or end of stream flag set',
+        })
+
+    def testprotocolsettingsconflictflags(self):
+        result = self._sendsingleframe(
+            makereactor(),
+            ffs(b'0 1 stream-begin sender-protocol-settings continuation|eos '))
+        self.assertaction(result, b'error')
+        self.assertEqual(result[1], {
+            b'message': b'sender protocol settings frame cannot have both '
+                        b'continuation and end of stream flags set',
+        })
+
+    def testprotocolsettingsemptypayload(self):
+        result = self._sendsingleframe(
+            makereactor(),
+            ffs(b'0 1 stream-begin sender-protocol-settings eos '))
+        self.assertaction(result, b'error')
+        self.assertEqual(result[1], {
+            b'message': b'sender protocol settings frame did not contain CBOR '
+                        b'data',
+        })
+
+    def testprotocolsettingsmultipleobjects(self):
+        result = self._sendsingleframe(
+            makereactor(),
+            ffs(b'0 1 stream-begin sender-protocol-settings eos '
+                b'\x46foobar\x43foo'))
+        self.assertaction(result, b'error')
+        self.assertEqual(result[1], {
+            b'message': b'sender protocol settings frame contained multiple '
+                        b'CBOR values',
+        })
+
+    def testprotocolsettingscontentencodings(self):
+        reactor = makereactor()
+
+        result = self._sendsingleframe(
+            reactor,
+            ffs(b'0 1 stream-begin sender-protocol-settings eos '
+                b'cbor:{b"contentencodings": [b"a", b"b"]}'))
+        self.assertaction(result, b'wantframe')
+
+        self.assertEqual(reactor._state, b'idle')
+        self.assertEqual(reactor._sendersettings[b'contentencodings'],
+                         [b'a', b'b'])
+
+    def testprotocolsettingsmultipleframes(self):
+        reactor = makereactor()
+
+        data = b''.join(cborutil.streamencode({
+            b'contentencodings': [b'value1', b'value2'],
+        }))
+
+        results = list(sendframes(reactor, [
+            ffs(b'0 1 stream-begin sender-protocol-settings continuation %s' %
+                data[0:5]),
+            ffs(b'0 1 0 sender-protocol-settings eos %s' % data[5:]),
+        ]))
+
+        self.assertEqual(len(results), 2)
+
+        self.assertaction(results[0], b'wantframe')
+        self.assertaction(results[1], b'wantframe')
+
+        self.assertEqual(reactor._state, b'idle')
+        self.assertEqual(reactor._sendersettings[b'contentencodings'],
+                         [b'value1', b'value2'])
+
+    def testprotocolsettingsbadcbor(self):
+        result = self._sendsingleframe(
+            makereactor(),
+            ffs(b'0 1 stream-begin sender-protocol-settings eos badvalue'))
+        self.assertaction(result, b'error')
+
+    def testprotocolsettingsnoninitial(self):
+        # Cannot have protocol settings frames as non-initial frames.
+        reactor = makereactor()
+
+        stream = framing.stream(1)
+        results = list(sendcommandframes(reactor, stream, 1, b'mycommand', {}))
+        self.assertEqual(len(results), 1)
+        self.assertaction(results[0], b'runcommand')
+
+        result = self._sendsingleframe(
+            reactor,
+            ffs(b'0 1 0 sender-protocol-settings eos '))
+        self.assertaction(result, b'error')
+        self.assertEqual(result[1], {
+            b'message': b'expected command request frame; got 8',
+        })
+
 if __name__ == '__main__':
     import silenttestrunner
     silenttestrunner.main(__name__)
--- a/tests/wireprotohelpers.sh	Wed Oct 10 12:25:28 2018 -0400
+++ b/tests/wireprotohelpers.sh	Mon Oct 22 14:46:06 2018 -0400
@@ -1,16 +1,20 @@
-HTTPV2=exp-http-v2-0001
-MEDIATYPE=application/mercurial-exp-framing-0005
+HTTPV2=exp-http-v2-0003
+MEDIATYPE=application/mercurial-exp-framing-0006
 
 sendhttpraw() {
   hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT/
 }
 
 sendhttpv2peer() {
-  hg --verbose debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/
+  hg --config experimental.httppeer.v2-encoder-order=identity debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/
+}
+
+sendhttpv2peerverbose() {
+  hg --config experimental.httppeer.v2-encoder-order=identity --verbose debugwireproto --nologhandshake --peer http2 http://$LOCALIP:$HGPORT/
 }
 
 sendhttpv2peerhandshake() {
-  hg --verbose debugwireproto --peer http2 http://$LOCALIP:$HGPORT/
+  hg --config experimental.httppeer.v2-encoder-order=identity --verbose debugwireproto --peer http2 http://$LOCALIP:$HGPORT/
 }
 
 cat > dummycommands.py << EOF
@@ -20,21 +24,21 @@
     wireprotov2server,
 )
 
-@wireprotov1server.wireprotocommand('customreadonly', permission='pull')
+@wireprotov1server.wireprotocommand(b'customreadonly', permission=b'pull')
 def customreadonlyv1(repo, proto):
     return wireprototypes.bytesresponse(b'customreadonly bytes response')
 
-@wireprotov2server.wireprotocommand('customreadonly', permission='pull')
+@wireprotov2server.wireprotocommand(b'customreadonly', permission=b'pull')
 def customreadonlyv2(repo, proto):
-    return wireprototypes.cborresponse(b'customreadonly bytes response')
+    yield b'customreadonly bytes response'
 
-@wireprotov1server.wireprotocommand('customreadwrite', permission='push')
+@wireprotov1server.wireprotocommand(b'customreadwrite', permission=b'push')
 def customreadwrite(repo, proto):
     return wireprototypes.bytesresponse(b'customreadwrite bytes response')
 
-@wireprotov2server.wireprotocommand('customreadwrite', permission='push')
+@wireprotov2server.wireprotocommand(b'customreadwrite', permission=b'push')
 def customreadwritev2(repo, proto):
-    return wireprototypes.cborresponse(b'customreadwrite bytes response')
+    yield b'customreadwrite bytes response'
 EOF
 
 cat >> $HGRCPATH << EOF
@@ -56,3 +60,13 @@
 web.api.http-v2 = true
 EOF
 }
+
+enablehttpv2client() {
+  cat >> $HGRCPATH << EOF
+[experimental]
+httppeer.advertise-v2 = true
+# So tests are in plain text. Also, zstd isn't available in all installs,
+# which would make tests non-deterministic.
+httppeer.v2-encoder-order = identity
+EOF
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/wireprotosimplecache.py	Mon Oct 22 14:46:06 2018 -0400
@@ -0,0 +1,193 @@
+# wireprotosimplecache.py - Extension providing in-memory wire protocol cache
+#
+# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+    extensions,
+    registrar,
+    repository,
+    util,
+    wireprotoserver,
+    wireprototypes,
+    wireprotov2server,
+)
+from mercurial.utils import (
+    interfaceutil,
+    stringutil,
+)
+
+CACHE = None
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem(b'simplecache', b'cacheapi',
+           default=False)
+configitem(b'simplecache', b'cacheobjects',
+           default=False)
+configitem(b'simplecache', b'redirectsfile',
+           default=None)
+
+# API handler that makes cached keys available.
+def handlecacherequest(rctx, req, res, checkperm, urlparts):
+    if rctx.repo.ui.configbool(b'simplecache', b'cacheobjects'):
+        res.status = b'500 Internal Server Error'
+        res.setbodybytes(b'cacheobjects not supported for api server')
+        return
+
+    if not urlparts:
+        res.status = b'200 OK'
+        res.headers[b'Content-Type'] = b'text/plain'
+        res.setbodybytes(b'simple cache server')
+        return
+
+    key = b'/'.join(urlparts)
+
+    if key not in CACHE:
+        res.status = b'404 Not Found'
+        res.headers[b'Content-Type'] = b'text/plain'
+        res.setbodybytes(b'key not found in cache')
+        return
+
+    res.status = b'200 OK'
+    res.headers[b'Content-Type'] = b'application/mercurial-cbor'
+    res.setbodybytes(CACHE[key])
+
+def cachedescriptor(req, repo):
+    return {}
+
+wireprotoserver.API_HANDLERS[b'simplecache'] = {
+    b'config': (b'simplecache', b'cacheapi'),
+    b'handler': handlecacherequest,
+    b'apidescriptor': cachedescriptor,
+}
+
+@interfaceutil.implementer(repository.iwireprotocolcommandcacher)
+class memorycacher(object):
+    def __init__(self, ui, command, encodefn, redirecttargets, redirecthashes,
+                 req):
+        self.ui = ui
+        self.encodefn = encodefn
+        self.redirecttargets = redirecttargets
+        self.redirecthashes = redirecthashes
+        self.req = req
+        self.key = None
+        self.cacheobjects = ui.configbool(b'simplecache', b'cacheobjects')
+        self.cacheapi = ui.configbool(b'simplecache', b'cacheapi')
+        self.buffered = []
+
+        ui.log(b'simplecache', b'cacher constructed for %s\n', command)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exctype, excvalue, exctb):
+        if exctype:
+            self.ui.log(b'simplecache', b'cacher exiting due to error\n')
+
+    def adjustcachekeystate(self, state):
+        # Needed in order to make tests deterministic. Don't copy this
+        # pattern for production caches!
+        del state[b'repo']
+
+    def setcachekey(self, key):
+        self.key = key
+        return True
+
+    def lookup(self):
+        if self.key not in CACHE:
+            self.ui.log(b'simplecache', b'cache miss for %s\n', self.key)
+            return None
+
+        entry = CACHE[self.key]
+        self.ui.log(b'simplecache', b'cache hit for %s\n', self.key)
+
+        redirectable = True
+
+        if not self.cacheapi:
+            redirectable = False
+        elif not self.redirecttargets:
+            redirectable = False
+        else:
+            clienttargets = set(self.redirecttargets)
+            ourtargets = set(t[b'name'] for t in loadredirecttargets(self.ui))
+
+            # We only ever redirect to a single target (for now). So we don't
+            # need to store which target matched.
+            if not clienttargets & ourtargets:
+                redirectable = False
+
+        if redirectable:
+            paths = self.req.dispatchparts[:-3]
+            paths.append(b'simplecache')
+            paths.append(self.key)
+
+            url = b'%s/%s' % (self.req.baseurl, b'/'.join(paths))
+
+            #url = b'http://example.com/%s' % self.key
+            self.ui.log(b'simplecache', b'sending content redirect for %s to '
+                                        b'%s\n', self.key, url)
+            response = wireprototypes.alternatelocationresponse(
+                url=url,
+                mediatype=b'application/mercurial-cbor')
+
+            return {b'objs': [response]}
+
+        if self.cacheobjects:
+            return {
+                b'objs': entry,
+            }
+        else:
+            return {
+                b'objs': [wireprototypes.encodedresponse(entry)],
+            }
+
+    def onobject(self, obj):
+        if self.cacheobjects:
+            self.buffered.append(obj)
+        else:
+            self.buffered.extend(self.encodefn(obj))
+
+        yield obj
+
+    def onfinished(self):
+        self.ui.log(b'simplecache', b'storing cache entry for %s\n', self.key)
+        if self.cacheobjects:
+            CACHE[self.key] = self.buffered
+        else:
+            CACHE[self.key] = b''.join(self.buffered)
+
+        return []
+
+def makeresponsecacher(orig, repo, proto, command, args, objencoderfn,
+                       redirecttargets, redirecthashes):
+    return memorycacher(repo.ui, command, objencoderfn, redirecttargets,
+                        redirecthashes, proto._req)
+
+def loadredirecttargets(ui):
+    path = ui.config(b'simplecache', b'redirectsfile')
+    if not path:
+        return []
+
+    with open(path, 'rb') as fh:
+        s = fh.read()
+
+    return stringutil.evalpythonliteral(s)
+
+def getadvertisedredirecttargets(orig, repo, proto):
+    return loadredirecttargets(repo.ui)
+
+def extsetup(ui):
+    global CACHE
+
+    CACHE = util.lrucachedict(10000)
+
+    extensions.wrapfunction(wireprotov2server, b'makeresponsecacher',
+                            makeresponsecacher)
+    extensions.wrapfunction(wireprotov2server, b'getadvertisedredirecttargets',
+                            getadvertisedredirecttargets)