freeze: merge default into stable for 4.2 code freeze stable 4.2-rc
authorAugie Fackler <augie@google.com>
Tue, 18 Apr 2017 12:24:34 -0400
branchstable
changeset 32054 616e788321cc
parent 32053 52902059edc7 (current diff)
parent 32049 ed42e00a5c4e (diff)
child 32055 f64ac87d82f9
freeze: merge default into stable for 4.2 code freeze
contrib/hgperf
contrib/python-zstandard/c-ext/dictparams.c
contrib/python-zstandard/tests/test_cffi.py
contrib/python-zstandard/tests/test_roundtrip.py
mercurial/dispatch.py
tests/test-ssh.t
--- a/Makefile	Tue Apr 18 11:22:42 2017 -0400
+++ b/Makefile	Tue Apr 18 12:24:34 2017 -0400
@@ -159,10 +159,21 @@
 # Packaging targets
 
 osx:
+	rm -rf build/mercurial
 	/usr/bin/python2.7 setup.py install --optimize=1 \
 	  --root=build/mercurial/ --prefix=/usr/local/ \
 	  --install-lib=/Library/Python/2.7/site-packages/
 	make -C doc all install DESTDIR="$(PWD)/build/mercurial/"
+        # install zsh completions - this location appears to be
+        # searched by default as of macOS Sierra.
+	install -d build/mercurial/usr/local/share/zsh/site-functions/
+	install -m 0644 contrib/zsh_completion build/mercurial/usr/local/share/zsh/site-functions/hg
+        # install bash completions - there doesn't appear to be a
+        # place that's searched by default for bash, so we'll follow
+        # the lead of Apple's git install and just put it in a
+        # location of our own.
+	install -d build/mercurial/usr/local/hg/contrib/
+	install -m 0644 contrib/bash_completion build/mercurial/usr/local/hg/contrib/hg-completion.bash
 	mkdir -p $${OUTPUTDIR:-dist}
 	HGVER=$$((cat build/mercurial/Library/Python/2.7/site-packages/mercurial/__version__.py; echo 'print(version)') | python) && \
 	OSXVER=$$(sw_vers -productVersion | cut -d. -f1,2) && \
@@ -262,5 +273,9 @@
 .PHONY: help all local build doc cleanbutpackages clean install install-bin \
 	install-doc install-home install-home-bin install-home-doc \
 	dist dist-notests check tests check-code update-pot \
-	osx fedora20 docker-fedora20 fedora21 docker-fedora21 \
+	osx deb ppa docker-debian-jessie \
+	docker-ubuntu-trusty docker-ubuntu-trusty-ppa \
+	docker-ubuntu-xenial docker-ubuntu-xenial-ppa \
+	docker-ubuntu-yakkety docker-ubuntu-yakkety-ppa \
+	fedora20 docker-fedora20 fedora21 docker-fedora21 \
 	centos5 docker-centos5 centos6 docker-centos6 centos7 docker-centos7
--- a/contrib/check-code.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/check-code.py	Tue Apr 18 12:24:34 2017 -0400
@@ -100,7 +100,7 @@
 
 testpats = [
   [
-    (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"),
+    (r'\b(push|pop)d\b', "don't use 'pushd' or 'popd', use 'cd'"),
     (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
     (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
     (r'(?<!hg )grep.* -a', "don't use 'grep -a', use in-line python"),
@@ -190,8 +190,10 @@
     (r'^  .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg),
     (r'^  .*file://\$TESTTMP',
      'write "file:/*/$TESTTMP" + (glob) to match on windows too'),
-    (r'^  [^$>].*27\.0\.0\.1.*[^)]$',
-     'use (glob) to match localhost IP on hosts without 127.0.0.1 too'),
+    (r'^  [^$>].*27\.0\.0\.1',
+     'use $LOCALIP not an explicit loopback address'),
+    (r'^  [^$>].*\$LOCALIP.*[^)]$',
+     'mark $LOCALIP output lines with (glob) to help tests in BSD jails'),
     (r'^  (cat|find): .*: No such file or directory',
      'use test -f to test for file existence'),
     (r'^  diff -[^ -]*p',
@@ -210,8 +212,8 @@
   ],
   # warnings
   [
-    (r'^  (?!.*127\.0\.0\.1)[^*?/\n]* \(glob\)$',
-     "glob match with no glob string (?, *, /, and 127.0.0.1)"),
+    (r'^  (?!.*\$LOCALIP)[^*?/\n]* \(glob\)$',
+     "glob match with no glob string (?, *, /, and $LOCALIP)"),
   ]
 ]
 
@@ -237,7 +239,7 @@
     (r'lambda\s*\(.*,.*\)',
      "tuple parameter unpacking not available in Python 3+"),
     (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
-    (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
+    (r'(?<!\.)\breduce\s*\(.*', "reduce is not available in Python 3+"),
     (r'\bdict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}',
      'dict-from-generator'),
     (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
@@ -318,7 +320,7 @@
      'legacy exception syntax; use "as" instead of ","'),
     (r':\n(    )*( ){1,3}[^ ]', "must indent 4 spaces"),
     (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
-    (r'\b__bool__\b', "__bool__ should be __nonzero__ in Python 2"),
+    (r'\bdef\s+__bool__\b', "__bool__ should be __nonzero__ in Python 2"),
     (r'os\.path\.join\(.*, *(""|\'\')\)',
      "use pathutil.normasprefix(path) instead of os.path.join(path, '')"),
     (r'\s0[0-7]+\b', 'legacy octal syntax; use "0o" prefix instead of "0"'),
@@ -330,13 +332,15 @@
     (r'^import cStringIO', "don't use cStringIO.StringIO, use util.stringio"),
     (r'^import urllib', "don't use urllib, use util.urlreq/util.urlerr"),
     (r'^import SocketServer', "don't use SockerServer, use util.socketserver"),
-    (r'^import urlparse', "don't use urlparse, use util.urlparse"),
+    (r'^import urlparse', "don't use urlparse, use util.urlreq"),
     (r'^import xmlrpclib', "don't use xmlrpclib, use util.xmlrpclib"),
     (r'^import cPickle', "don't use cPickle, use util.pickle"),
     (r'^import pickle', "don't use pickle, use util.pickle"),
     (r'^import httplib', "don't use httplib, use util.httplib"),
     (r'^import BaseHTTPServer', "use util.httpserver instead"),
     (r'\.next\(\)', "don't use .next(), use next(...)"),
+    (r'([a-z]*).revision\(\1\.node\(',
+     "don't convert rev to node before passing to revision(nodeorrev)"),
 
     # rules depending on implementation of repquote()
     (r' x+[xpqo%APM][\'"]\n\s+[\'"]x',
@@ -371,6 +375,13 @@
           (?P=quote))""", reppython),
 ]
 
+# extension non-filter patterns
+pyextnfpats = [
+    [(r'^"""\n?[A-Z]', "don't capitalize docstring title")],
+    # warnings
+    [],
+]
+
 txtfilters = []
 
 txtpats = [
@@ -480,6 +491,7 @@
 
 checks = [
     ('python', r'.*\.(py|cgi)$', r'^#!.*python', pyfilters, pypats),
+    ('python', r'.*hgext.*\.py$', '', [], pyextnfpats),
     ('python 3', r'.*(hgext|mercurial).*(?<!pycompat)\.py', '',
             pyfilters, py3pats),
     ('test script', r'(.*/)?test-[^.~]*$', '', testfilters, testpats),
@@ -661,7 +673,7 @@
     return result
 
 def main():
-    parser = optparse.OptionParser("%prog [options] [files]")
+    parser = optparse.OptionParser("%prog [options] [files | -]")
     parser.add_option("-w", "--warnings", action="store_true",
                       help="include warning-level checks")
     parser.add_option("-p", "--per-file", type="int",
@@ -679,6 +691,9 @@
 
     if len(args) == 0:
         check = glob.glob("*")
+    elif args == ['-']:
+        # read file list from stdin
+        check = sys.stdin.read().splitlines()
     else:
         check = args
 
--- a/contrib/chg/chg.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/chg/chg.c	Tue Apr 18 12:24:34 2017 -0400
@@ -128,6 +128,24 @@
 		abortmsg("insecure sockdir %s", sockdir);
 }
 
+/*
+ * Check if a socket directory exists and is only owned by the current user.
+ * Return 1 if so, 0 if not. This is used to check if XDG_RUNTIME_DIR can be
+ * used or not. According to the specification [1], XDG_RUNTIME_DIR should be
+ * ignored if the directory is not owned by the user with mode 0700.
+ * [1]: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+ */
+static int checkruntimedir(const char *sockdir)
+{
+	struct stat st;
+	int r = lstat(sockdir, &st);
+	if (r < 0) /* ex. does not exist */
+		return 0;
+	if (!S_ISDIR(st.st_mode)) /* ex. is a file, not a directory */
+		return 0;
+	return st.st_uid == geteuid() && (st.st_mode & 0777) == 0700;
+}
+
 static void getdefaultsockdir(char sockdir[], size_t size)
 {
 	/* by default, put socket file in secure directory
@@ -135,7 +153,7 @@
 	 * (permission of socket file may be ignored on some Unices) */
 	const char *runtimedir = getenv("XDG_RUNTIME_DIR");
 	int r;
-	if (runtimedir) {
+	if (runtimedir && checkruntimedir(runtimedir)) {
 		r = snprintf(sockdir, size, "%s/chg", runtimedir);
 	} else {
 		const char *tmpdir = getenv("TMPDIR");
@@ -429,11 +447,11 @@
 	}
 
 	setupsignalhandler(hgc_peerpid(hgc), hgc_peerpgid(hgc));
+	atexit(waitpager);
 	int exitcode = hgc_runcommand(hgc, argv + 1, argc - 1);
 	restoresignalhandler();
 	hgc_close(hgc);
 	freecmdserveropts(&opts);
-	waitpager();
 
 	return exitcode;
 }
--- a/contrib/chg/hgclient.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/chg/hgclient.c	Tue Apr 18 12:24:34 2017 -0400
@@ -252,7 +252,7 @@
 		ctx->datasize = sizeof(r_n);
 		writeblock(hgc);
 	} else if (strcmp(args[0], "pager") == 0) {
-		setuppager(args[1]);
+		setuppager(args[1], args + 3);
 		if (hgc->capflags & CAP_ATTACHIO)
 			attachio(hgc);
 		/* unblock the server */
--- a/contrib/chg/procutil.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/chg/procutil.c	Tue Apr 18 12:24:34 2017 -0400
@@ -91,11 +91,15 @@
 
 	struct sigaction sa;
 	memset(&sa, 0, sizeof(sa));
+
+	/* deadly signals meant to be sent to a process group:
+	 * - SIGHUP: usually generated by the kernel, when termination of a
+	 *   process causes that process group to become orphaned
+	 * - SIGINT: usually generated by the terminal */
 	sa.sa_handler = forwardsignaltogroup;
 	sa.sa_flags = SA_RESTART;
 	if (sigemptyset(&sa.sa_mask) < 0)
 		goto error;
-
 	if (sigaction(SIGHUP, &sa, NULL) < 0)
 		goto error;
 	if (sigaction(SIGINT, &sa, NULL) < 0)
@@ -111,6 +115,11 @@
 	sa.sa_flags = SA_RESTART;
 	if (sigaction(SIGWINCH, &sa, NULL) < 0)
 		goto error;
+	/* forward user-defined signals */
+	if (sigaction(SIGUSR1, &sa, NULL) < 0)
+		goto error;
+	if (sigaction(SIGUSR2, &sa, NULL) < 0)
+		goto error;
 	/* propagate job control requests to worker */
 	sa.sa_handler = forwardsignal;
 	sa.sa_flags = SA_RESTART;
@@ -168,7 +177,7 @@
 
 /* This implementation is based on hgext/pager.py (post 369741ef7253)
  * Return 0 if pager is not started, or pid of the pager */
-pid_t setuppager(const char *pagercmd)
+pid_t setuppager(const char *pagercmd, const char *envp[])
 {
 	assert(pagerpid == 0);
 	if (!pagercmd)
@@ -196,7 +205,8 @@
 		close(pipefds[0]);
 		close(pipefds[1]);
 
-		int r = execlp("/bin/sh", "/bin/sh", "-c", pagercmd, NULL);
+		int r = execle("/bin/sh", "/bin/sh", "-c", pagercmd, NULL,
+				envp);
 		if (r < 0) {
 			abortmsgerrno("cannot start pager '%s'", pagercmd);
 		}
--- a/contrib/chg/procutil.h	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/chg/procutil.h	Tue Apr 18 12:24:34 2017 -0400
@@ -15,7 +15,7 @@
 void restoresignalhandler(void);
 void setupsignalhandler(pid_t pid, pid_t pgid);
 
-pid_t setuppager(const char *pagercmd);
+pid_t setuppager(const char *pagercmd, const char *envp[]);
 void waitpager(void);
 
 #endif /* PROCUTIL_H_ */
--- a/contrib/hgperf	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/hgperf	Tue Apr 18 12:24:34 2017 -0400
@@ -55,17 +55,15 @@
 import mercurial.util
 import mercurial.dispatch
 
-import time
-
 def timer(func, title=None):
     results = []
-    begin = time.time()
+    begin = mercurial.util.timer()
     count = 0
     while True:
         ostart = os.times()
-        cstart = time.time()
+        cstart = mercurial.util.timer()
         r = func()
-        cstop = time.time()
+        cstop = mercurial.util.timer()
         ostop = os.times()
         count += 1
         a, b = ostart, ostop
--- a/contrib/memory.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/memory.py	Tue Apr 18 12:24:34 2017 -0400
@@ -12,7 +12,6 @@
 '''
 
 from __future__ import absolute_import
-import atexit
 
 def memusage(ui):
     """Report memory usage of the current process."""
@@ -29,4 +28,4 @@
                             for k, v in result.iteritems()]) + "\n")
 
 def extsetup(ui):
-    atexit.register(memusage, ui)
+    ui.atexit(memusage, ui)
--- a/contrib/perf.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/perf.py	Tue Apr 18 12:24:34 2017 -0400
@@ -20,6 +20,7 @@
 
 from __future__ import absolute_import
 import functools
+import gc
 import os
 import random
 import sys
@@ -66,6 +67,16 @@
 setattr(util, 'safehasattr', safehasattr)
 
 # for "historical portability":
+# define util.timer forcibly, because util.timer has been available
+# since ae5d60bb70c9
+if safehasattr(time, 'perf_counter'):
+    util.timer = time.perf_counter
+elif os.name == 'nt':
+    util.timer = time.clock
+else:
+    util.timer = time.time
+
+# for "historical portability":
 # use locally defined empty option list, if formatteropts isn't
 # available, because commands.formatteropts has been available since
 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
@@ -164,6 +175,7 @@
                     self.hexfunc = node.short
             def __nonzero__(self):
                 return False
+            __bool__ = __nonzero__
             def startitem(self):
                 pass
             def data(self, **data):
@@ -189,14 +201,15 @@
     func()
 
 def _timer(fm, func, title=None):
+    gc.collect()
     results = []
-    begin = time.time()
+    begin = util.timer()
     count = 0
     while True:
         ostart = os.times()
-        cstart = time.time()
+        cstart = util.timer()
         r = func()
-        cstop = time.time()
+        cstop = util.timer()
         ostop = os.times()
         count += 1
         a, b = ostart, ostop
@@ -993,6 +1006,26 @@
     node = r.lookup(rev)
     rev = r.rev(node)
 
+    def getrawchunks(data, chain):
+        start = r.start
+        length = r.length
+        inline = r._inline
+        iosize = r._io.size
+        buffer = util.buffer
+        offset = start(chain[0])
+
+        chunks = []
+        ladd = chunks.append
+
+        for rev in chain:
+            chunkstart = start(rev)
+            if inline:
+                chunkstart += (rev + 1) * iosize
+            chunklength = length(rev)
+            ladd(buffer(data, chunkstart - offset, chunklength))
+
+        return chunks
+
     def dodeltachain(rev):
         if not cache:
             r.clearcaches()
@@ -1003,24 +1036,15 @@
             r.clearcaches()
         r._chunkraw(chain[0], chain[-1])
 
-    def dodecompress(data, chain):
+    def dorawchunks(data, chain):
         if not cache:
             r.clearcaches()
-
-        start = r.start
-        length = r.length
-        inline = r._inline
-        iosize = r._io.size
-        buffer = util.buffer
-        offset = start(chain[0])
+        getrawchunks(data, chain)
 
-        for rev in chain:
-            chunkstart = start(rev)
-            if inline:
-                chunkstart += (rev + 1) * iosize
-            chunklength = length(rev)
-            b = buffer(data, chunkstart - offset, chunklength)
-            r.decompress(b)
+    def dodecompress(chunks):
+        decomp = r.decompress
+        for chunk in chunks:
+            decomp(chunk)
 
     def dopatch(text, bins):
         if not cache:
@@ -1039,6 +1063,7 @@
 
     chain = r._deltachain(rev)[0]
     data = r._chunkraw(chain[0], chain[-1])[1]
+    rawchunks = getrawchunks(data, chain)
     bins = r._chunks(chain)
     text = str(bins[0])
     bins = bins[1:]
@@ -1048,7 +1073,8 @@
         (lambda: dorevision(), 'full'),
         (lambda: dodeltachain(rev), 'deltachain'),
         (lambda: doread(chain), 'read'),
-        (lambda: dodecompress(data, chain), 'decompress'),
+        (lambda: dorawchunks(data, chain), 'rawchunks'),
+        (lambda: dodecompress(rawchunks), 'decompress'),
         (lambda: dopatch(text, bins), 'patch'),
         (lambda: dohash(text), 'hash'),
     ]
@@ -1256,6 +1282,17 @@
         timer(fn, title=title)
         fm.end()
 
+@command('perfwrite', formatteropts)
+def perfwrite(ui, repo, **opts):
+    """microbenchmark ui.write
+    """
+    timer, fm = gettimer(ui, opts)
+    def write():
+        for i in range(100000):
+            ui.write(('Testing write performance\n'))
+    timer(write)
+    fm.end()
+
 def uisetup(ui):
     if (util.safehasattr(cmdutil, 'openrevlog') and
         not util.safehasattr(commands, 'debugrevlogopts')):
--- a/contrib/python-zstandard/NEWS.rst	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/NEWS.rst	Tue Apr 18 12:24:34 2017 -0400
@@ -1,6 +1,66 @@
 Version History
 ===============
 
+0.8.1 (released 2017-04-08)
+---------------------------
+
+* Add #includes so compilation on OS X and BSDs works (#20).
+
+0.8.0 (released 2017-03-08)
+---------------------------
+
+* CompressionParameters now has a estimated_compression_context_size() method.
+  zstd.estimate_compression_context_size() is now deprecated and slated for
+  removal.
+* Implemented a lot of fuzzing tests.
+* CompressionParameters instances now perform extra validation by calling
+  ZSTD_checkCParams() at construction time.
+* multi_compress_to_buffer() API for compressing multiple inputs as a
+  single operation, as efficiently as possible.
+* ZSTD_CStream instances are now used across multiple operations on
+  ZstdCompressor instances, resulting in much better performance for
+  APIs that do streaming.
+* ZSTD_DStream instances are now used across multiple operations on
+  ZstdDecompressor instances, resulting in much better performance for
+  APIs that do streaming.
+* train_dictionary() now releases the GIL.
+* Support for training dictionaries using the COVER algorithm.
+* multi_decompress_to_buffer() API for decompressing multiple frames as a
+  single operation, as efficiently as possible.
+* Support for multi-threaded compression.
+* Disable deprecation warnings when compiling CFFI module.
+* Fixed memory leak in train_dictionary().
+* Removed DictParameters type.
+* train_dictionary() now accepts keyword arguments instead of a
+  DictParameters instance to control dictionary generation.
+
+0.7.0 (released 2017-02-07)
+---------------------------
+
+* Added zstd.get_frame_parameters() to obtain info about a zstd frame.
+* Added ZstdDecompressor.decompress_content_dict_chain() for efficient
+  decompression of *content-only dictionary chains*.
+* CFFI module fully implemented; all tests run against both C extension and
+  CFFI implementation.
+* Vendored version of zstd updated to 1.1.3.
+* Use ZstdDecompressor.decompress() now uses ZSTD_createDDict_byReference()
+  to avoid extra memory allocation of dict data.
+* Add function names to error messages (by using ":name" in PyArg_Parse*
+  functions).
+* Reuse decompression context across operations. Previously, we created a
+  new ZSTD_DCtx for each decompress(). This was measured to slow down
+  decompression by 40-200MB/s. The API guarantees say ZstdDecompressor
+  is not thread safe. So we reuse the ZSTD_DCtx across operations and make
+  things faster in the process.
+* ZstdCompressor.write_to()'s compress() and flush() methods now return number
+  of bytes written.
+* ZstdDecompressor.write_to()'s write() method now returns the number of bytes
+  written to the underlying output object.
+* CompressionParameters instances now expose their values as attributes.
+* CompressionParameters instances no longer are subscriptable nor behave
+  as tuples (backwards incompatible). Use attributes to obtain values.
+* DictParameters instances now expose their values as attributes.
+
 0.6.0 (released 2017-01-14)
 ---------------------------
 
--- a/contrib/python-zstandard/README.rst	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/README.rst	Tue Apr 18 12:24:34 2017 -0400
@@ -4,10 +4,11 @@
 
 This project provides Python bindings for interfacing with the
 `Zstandard <http://www.zstd.net>`_ compression library. A C extension
-and CFFI interface is provided.
+and CFFI interface are provided.
 
-The primary goal of the extension is to provide a Pythonic interface to
-the underlying C API. This means exposing most of the features and flexibility
+The primary goal of the project is to provide a rich interface to the
+underlying C API through a Pythonic interface while not sacrificing
+performance. This means exposing most of the features and flexibility
 of the C API while not sacrificing usability or safety that Python provides.
 
 The canonical home for this project is
@@ -19,15 +20,24 @@
 ================
 
 The project is officially in beta state. The author is reasonably satisfied
-with the current API and that functionality works as advertised. There
-may be some backwards incompatible changes before 1.0. Though the author
-does not intend to make any major changes to the Python API.
+that functionality works as advertised. **There will be some backwards
+incompatible changes before 1.0, probably in the 0.9 release.** This may
+involve renaming the main module from *zstd* to *zstandard* and renaming
+various types and methods. Pin the package version to prevent unwanted
+breakage when this change occurs!
+
+This project is vendored and distributed with Mercurial 4.1, where it is
+used in a production capacity.
 
 There is continuous integration for Python versions 2.6, 2.7, and 3.3+
 on Linux x86_x64 and Windows x86 and x86_64. The author is reasonably
 confident the extension is stable and works as advertised on these
 platforms.
 
+The CFFI bindings are mostly feature complete. Where a feature is implemented
+in CFFI, unit tests run against both C extension and CFFI implementation to
+ensure behavior parity.
+
 Expected Changes
 ----------------
 
@@ -43,19 +53,27 @@
 There should be an API that accepts an object that conforms to the buffer
 interface and returns an iterator over compressed or decompressed output.
 
+There should be an API that exposes an ``io.RawIOBase`` interface to
+compressor and decompressor streams, like how ``gzip.GzipFile`` from
+the standard library works (issue 13).
+
 The author is on the fence as to whether to support the extremely
 low level compression and decompression APIs. It could be useful to
 support compression without the framing headers. But the author doesn't
 believe it a high priority at this time.
 
-The CFFI bindings are half-baked and need to be finished.
+There will likely be a refactoring of the module names. Currently,
+``zstd`` is a C extension and ``zstd_cffi`` is the CFFI interface.
+This means that all code for the C extension must be implemented in
+C. ``zstd`` may be converted to a Python module so code can be reused
+between CFFI and C and so not all code in the C extension has to be C.
 
 Requirements
 ============
 
-This extension is designed to run with Python 2.6, 2.7, 3.3, 3.4, and 3.5
-on common platforms (Linux, Windows, and OS X). Only x86_64 is currently
-well-tested as an architecture.
+This extension is designed to run with Python 2.6, 2.7, 3.3, 3.4, 3.5, and
+3.6 on common platforms (Linux, Windows, and OS X). Only x86_64 is
+currently well-tested as an architecture.
 
 Installing
 ==========
@@ -106,15 +124,11 @@
 Comparison to Other Python Bindings
 ===================================
 
-https://pypi.python.org/pypi/zstd is an alternative Python binding to
+https://pypi.python.org/pypi/zstd is an alternate Python binding to
 Zstandard. At the time this was written, the latest release of that
-package (1.0.0.2) had the following significant differences from this package:
-
-* It only exposes the simple API for compression and decompression operations.
-  This extension exposes the streaming API, dictionary training, and more.
-* It adds a custom framing header to compressed data and there is no way to
-  disable it. This means that data produced with that module cannot be used by
-  other Zstandard implementations.
+package (1.1.2) only exposed the simple APIs for compression and decompression.
+This package exposes much more of the zstd API, including streaming and
+dictionary compression. This package also has CFFI support.
 
 Bundling of Zstandard Source Code
 =================================
@@ -151,10 +165,13 @@
    $ tox
 
 Tests use the ``hypothesis`` Python package to perform fuzzing. If you
-don't have it, those tests won't run.
+don't have it, those tests won't run. Since the fuzzing tests take longer
+to execute than normal tests, you'll need to opt in to running them by
+setting the ``ZSTD_SLOW_TESTS`` environment variable. This is set
+automatically when using ``tox``.
 
-There is also an experimental CFFI module. You need the ``cffi`` Python
-package installed to build and test that.
+The ``cffi`` Python package needs to be installed in order to build the CFFI
+bindings. If it isn't present, the CFFI bindings won't be built.
 
 To create a virtualenv with all development dependencies, do something
 like the following::
@@ -171,8 +188,16 @@
 API
 ===
 
-The compiled C extension provides a ``zstd`` Python module. This module
-exposes the following interfaces.
+The compiled C extension provides a ``zstd`` Python module. The CFFI
+bindings provide a ``zstd_cffi`` module. Both provide an identical API
+interface. The types, functions, and attributes exposed by these modules
+are documented in the sections below.
+
+.. note::
+
+   The documentation in this section makes references to various zstd
+   concepts and functionality. The ``Concepts`` section below explains
+   these concepts in more detail.
 
 ZstdCompressor
 --------------
@@ -208,6 +233,14 @@
    Whether to write the dictionary ID into the compressed data.
    Defaults to True. The dictionary ID is only written if a dictionary
    is being used.
+threads
+   Enables and sets the number of threads to use for multi-threaded compression
+   operations. Defaults to 0, which means to use single-threaded compression.
+   Negative values will resolve to the number of logical CPUs in the system.
+   Read below for more info on multi-threaded compression. This argument only
+   controls thread count for operations that operate on individual pieces of
+   data. APIs that spawn multiple threads for working on multiple pieces of
+   data have their own ``threads`` argument.
 
 Unless specified otherwise, assume that no two methods of ``ZstdCompressor``
 instances can be called from multiple Python threads simultaneously. In other
@@ -221,6 +254,8 @@
    cctx = zstd.ZstdCompressor()
    compressed = cctx.compress(b'data to compress')
 
+The ``data`` argument can be any object that implements the *buffer protocol*.
+
 Unless ``compression_params`` or ``dict_data`` are passed to the
 ``ZstdCompressor``, each invocation of ``compress()`` will calculate the
 optimal compression parameters for the configured compression ``level`` and
@@ -260,6 +295,10 @@
 compressor's internal state into the output object. This may result in 0 or
 more ``write()`` calls to the output object.
 
+Both ``write()`` and ``flush()`` return the number of bytes written to the
+object's ``write()``. In many cases, small inputs do not accumulate enough
+data to cause a write and ``write()`` will return ``0``.
+
 If the size of the data being fed to this streaming compressor is known,
 you can declare it before compression begins::
 
@@ -406,6 +445,42 @@
    data = cobj.compress(b'foobar')
    data = cobj.flush()
 
+Batch Compression API
+^^^^^^^^^^^^^^^^^^^^^
+
+(Experimental. Not yet supported in CFFI bindings.)
+
+``multi_compress_to_buffer(data, [threads=0])`` performs compression of multiple
+inputs as a single operation.
+
+Data to be compressed can be passed as a ``BufferWithSegmentsCollection``, a
+``BufferWithSegments``, or a list containing byte like objects. Each element of
+the container will be compressed individually using the configured parameters
+on the ``ZstdCompressor`` instance.
+
+The ``threads`` argument controls how many threads to use for compression. The
+default is ``0`` which means to use a single thread. Negative values use the
+number of logical CPUs in the machine.
+
+The function returns a ``BufferWithSegmentsCollection``. This type represents
+N discrete memory allocations, eaching holding 1 or more compressed frames.
+
+Output data is written to shared memory buffers. This means that unlike
+regular Python objects, a reference to *any* object within the collection
+keeps the shared buffer and therefore memory backing it alive. This can have
+undesirable effects on process memory usage.
+
+The API and behavior of this function is experimental and will likely change.
+Known deficiencies include:
+
+* If asked to use multiple threads, it will always spawn that many threads,
+  even if the input is too small to use them. It should automatically lower
+  the thread count when the extra threads would just add overhead.
+* The buffer allocation strategy is fixed. There is room to make it dynamic,
+  perhaps even to allow one output buffer per input, facilitating a variation
+  of the API to return a list without the adverse effects of shared memory
+  buffers.
+
 ZstdDecompressor
 ----------------
 
@@ -476,6 +551,10 @@
 the decompressor by calling ``write(data)`` and decompressed output is written
 to the output object by calling its ``write(data)`` method.
 
+Calls to ``write()`` will return the number of bytes written to the output
+object. Not all inputs will result in bytes being written, so return values
+of ``0`` are possible.
+
 The size of chunks being ``write()`` to the destination can be specified::
 
     dctx = zstd.ZstdDecompressor()
@@ -576,64 +655,155 @@
    data = dobj.decompress(compressed_chunk_0)
    data = dobj.decompress(compressed_chunk_1)
 
-Choosing an API
----------------
+Batch Decompression API
+^^^^^^^^^^^^^^^^^^^^^^^
+
+(Experimental. Not yet supported in CFFI bindings.)
+
+``multi_decompress_to_buffer()`` performs decompression of multiple
+frames as a single operation and returns a ``BufferWithSegmentsCollection``
+containing decompressed data for all inputs.
 
-Various forms of compression and decompression APIs are provided because each
-are suitable for different use cases.
+Compressed frames can be passed to the function as a ``BufferWithSegments``,
+a ``BufferWithSegmentsCollection``, or as a list containing objects that
+conform to the buffer protocol. For best performance, pass a
+``BufferWithSegmentsCollection`` or a ``BufferWithSegments``, as
+minimal input validation will be done for that type. If calling from
+Python (as opposed to C), constructing one of these instances may add
+overhead cancelling out the performance overhead of validation for list
+inputs.
+
+The decompressed size of each frame must be discoverable. It can either be
+embedded within the zstd frame (``write_content_size=True`` argument to
+``ZstdCompressor``) or passed in via the ``decompressed_sizes`` argument.
+
+The ``decompressed_sizes`` argument is an object conforming to the buffer
+protocol which holds an array of 64-bit unsigned integers in the machine's
+native format defining the decompressed sizes of each frame. If this argument
+is passed, it avoids having to scan each frame for its decompressed size.
+This frame scanning can add noticeable overhead in some scenarios.
 
-The simple/one-shot APIs are useful for small data, when the decompressed
-data size is known (either recorded in the zstd frame header via
-``write_content_size`` or known via an out-of-band mechanism, such as a file
-size).
+The ``threads`` argument controls the number of threads to use to perform
+decompression operations. The default (``0``) or the value ``1`` means to
+use a single thread. Negative values use the number of logical CPUs in the
+machine.
+
+.. note::
+
+   It is possible to pass a ``mmap.mmap()`` instance into this function by
+   wrapping it with a ``BufferWithSegments`` instance (which will define the
+   offsets of frames within the memory mapped region).
+
+This function is logically equivalent to performing ``dctx.decompress()``
+on each input frame and returning the result.
 
-A limitation of the simple APIs is that input or output data must fit in memory.
-And unless using advanced tricks with Python *buffer objects*, both input and
-output must fit in memory simultaneously.
+This function exists to perform decompression on multiple frames as fast
+as possible by having as little overhead as possible. Since decompression is
+performed as a single operation and since the decompressed output is stored in
+a single buffer, extra memory allocations, Python objects, and Python function
+calls are avoided. This is ideal for scenarios where callers need to access
+decompressed data for multiple frames.
 
-Another limitation is that compression or decompression is performed as a single
-operation. So if you feed large input, it could take a long time for the
-function to return.
+Currently, the implementation always spawns multiple threads when requested,
+even if the amount of work to do is small. In the future, it will be smarter
+about avoiding threads and their associated overhead when the amount of
+work to do is small.
+
+Content-Only Dictionary Chain Decompression
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``decompress_content_dict_chain(frames)`` performs decompression of a list of
+zstd frames produced using chained *content-only* dictionary compression. Such
+a list of frames is produced by compressing discrete inputs where each
+non-initial input is compressed with a *content-only* dictionary consisting
+of the content of the previous input.
+
+For example, say you have the following inputs::
 
-The streaming APIs do not have the limitations of the simple API. The cost to
-this is they are more complex to use than a single function call.
+   inputs = [b'input 1', b'input 2', b'input 3']
+
+The zstd frame chain consists of:
+
+1. ``b'input 1'`` compressed in standalone/discrete mode
+2. ``b'input 2'`` compressed using ``b'input 1'`` as a *content-only* dictionary
+3. ``b'input 3'`` compressed using ``b'input 2'`` as a *content-only* dictionary
+
+Each zstd frame **must** have the content size written.
+
+The following Python code can be used to produce a *content-only dictionary
+chain*::
 
-The streaming APIs put the caller in control of compression and decompression
-behavior by allowing them to directly control either the input or output side
-of the operation.
+    def make_chain(inputs):
+        frames = []
+
+        # First frame is compressed in standalone/discrete mode.
+        zctx = zstd.ZstdCompressor(write_content_size=True)
+        frames.append(zctx.compress(inputs[0]))
 
-With the streaming input APIs, the caller feeds data into the compressor or
-decompressor as they see fit. Output data will only be written after the caller
-has explicitly written data.
+        # Subsequent frames use the previous fulltext as a content-only dictionary
+        for i, raw in enumerate(inputs[1:]):
+            dict_data = zstd.ZstdCompressionDict(inputs[i])
+            zctx = zstd.ZstdCompressor(write_content_size=True, dict_data=dict_data)
+            frames.append(zctx.compress(raw))
+
+        return frames
+
+``decompress_content_dict_chain()`` returns the uncompressed data of the last
+element in the input chain.
 
-With the streaming output APIs, the caller consumes output from the compressor
-or decompressor as they see fit. The compressor or decompressor will only
-consume data from the source when the caller is ready to receive it.
+It is possible to implement *content-only dictionary chain* decompression
+on top of other Python APIs. However, this function will likely be significantly
+faster, especially for long input chains, as it avoids the overhead of
+instantiating and passing around intermediate objects between C and Python.
+
+Multi-Threaded Compression
+--------------------------
+
+``ZstdCompressor`` accepts a ``threads`` argument that controls the number
+of threads to use for compression. The way this works is that input is split
+into segments and each segment is fed into a worker pool for compression. Once
+a segment is compressed, it is flushed/appended to the output.
+
+The segment size for multi-threaded compression is chosen from the window size
+of the compressor. This is derived from the ``window_log`` attribute of a
+``CompressionParameters`` instance. By default, segment sizes are in the 1+MB
+range.
 
-One end of the streaming APIs involves a file-like object that must
-``write()`` output data or ``read()`` input data. Depending on what the
-backing storage for these objects is, those operations may not complete quickly.
-For example, when streaming compressed data to a file, the ``write()`` into
-a streaming compressor could result in a ``write()`` to the filesystem, which
-may take a long time to finish due to slow I/O on the filesystem. So, there
-may be overhead in streaming APIs beyond the compression and decompression
-operations.
+If multi-threaded compression is requested and the input is smaller than the
+configured segment size, only a single compression thread will be used. If the
+input is smaller than the segment size multiplied by the thread pool size or
+if data cannot be delivered to the compressor fast enough, not all requested
+compressor threads may be active simultaneously.
+
+Compared to non-multi-threaded compression, multi-threaded compression has
+higher per-operation overhead. This includes extra memory operations,
+thread creation, lock acquisition, etc.
+
+Due to the nature of multi-threaded compression using *N* compression
+*states*, the output from multi-threaded compression will likely be larger
+than non-multi-threaded compression. The difference is usually small. But
+there is a CPU/wall time versus size trade off that may warrant investigation.
+
+Output from multi-threaded compression does not require any special handling
+on the decompression side. In other words, any zstd decompressor should be able
+to consume data produced with multi-threaded compression.
 
 Dictionary Creation and Management
 ----------------------------------
 
-Zstandard allows *dictionaries* to be used when compressing and
-decompressing data. The idea is that if you are compressing a lot of similar
-data, you can precompute common properties of that data (such as recurring
-byte sequences) to achieve better compression ratios.
-
-In Python, compression dictionaries are represented as the
-``ZstdCompressionDict`` type.
+Compression dictionaries are represented as the ``ZstdCompressionDict`` type.
 
 Instances can be constructed from bytes::
 
    dict_data = zstd.ZstdCompressionDict(data)
 
+It is possible to construct a dictionary from *any* data. Unless the
+data begins with a magic header, the dictionary will be treated as
+*content-only*. *Content-only* dictionaries allow compression operations
+that follow to reference raw data within the content. For one use of
+*content-only* dictionaries, see
+``ZstdDecompressor.decompress_content_dict_chain()``.
+
 More interestingly, instances can be created by *training* on sample data::
 
    dict_data = zstd.train_dictionary(size, samples)
@@ -673,6 +843,88 @@
    dict_data = zstd.train_dictionary(size, samples)
    raw_data = dict_data.as_bytes()
 
+The following named arguments to ``train_dictionary`` can also be used
+to further control dictionary generation.
+
+selectivity
+   Integer selectivity level. Default is 9. Larger values yield more data in
+   dictionary.
+level
+   Integer compression level. Default is 6.
+dict_id
+   Integer dictionary ID for the produced dictionary. Default is 0, which
+   means to use a random value.
+notifications
+   Controls writing of informational messages to ``stderr``. ``0`` (the
+   default) means to write nothing. ``1`` writes errors. ``2`` writes
+   progression info. ``3`` writes more details. And ``4`` writes all info.
+
+Cover Dictionaries
+^^^^^^^^^^^^^^^^^^
+
+An alternate dictionary training mechanism named *cover* is also available.
+More details about this training mechanism are available in the paper
+*Effective Construction of Relative Lempel-Ziv Dictionaries* (authors:
+Liao, Petri, Moffat, Wirth).
+
+To use this mechanism, use ``zstd.train_cover_dictionary()`` instead of
+``zstd.train_dictionary()``. The function behaves nearly the same except
+its arguments are different and the returned dictionary will contain ``k``
+and ``d`` attributes reflecting the parameters to the cover algorithm.
+
+.. note::
+
+   The ``k`` and ``d`` attributes are only populated on dictionary
+   instances created by this function. If a ``ZstdCompressionDict`` is
+   constructed from raw bytes data, the ``k`` and ``d`` attributes will
+   be ``0``.
+
+The segment and dmer size parameters to the cover algorithm can either be
+specified manually or you can ask ``train_cover_dictionary()`` to try
+multiple values and pick the best one, where *best* means the smallest
+compressed data size.
+
+In manual mode, the ``k`` and ``d`` arguments must be specified or a
+``ZstdError`` will be raised.
+
+In automatic mode (triggered by specifying ``optimize=True``), ``k``
+and ``d`` are optional. If a value isn't specified, then default values for
+both are tested.  The ``steps`` argument can control the number of steps
+through ``k`` values. The ``level`` argument defines the compression level
+that will be used when testing the compressed size. And ``threads`` can
+specify the number of threads to use for concurrent operation.
+
+This function takes the following arguments:
+
+dict_size
+   Target size in bytes of the dictionary to generate.
+samples
+   A list of bytes holding samples the dictionary will be trained from.
+k
+   Parameter to cover algorithm defining the segment size. A reasonable range
+   is [16, 2048+].
+d
+   Parameter to cover algorithm defining the dmer size. A reasonable range is
+   [6, 16]. ``d`` must be less than or equal to ``k``.
+dict_id
+   Integer dictionary ID for the produced dictionary. Default is 0, which uses
+   a random value.
+optimize
+   When true, test dictionary generation with multiple parameters.
+level
+   Integer target compression level when testing compression with
+   ``optimize=True``. Default is 1.
+steps
+   Number of steps through ``k`` values to perform when ``optimize=True``.
+   Default is 32.
+threads
+   Number of threads to use when ``optimize=True``. Default is 0, which means
+   to use a single thread. A negative value can be specified to use as many
+   threads as there are detected logical CPUs.
+notifications
+   Controls writing of informational messages to ``stderr``. See the
+   documentation for ``train_dictionary()`` for more.
+
 Explicit Compression Parameters
 -------------------------------
 
@@ -700,19 +952,57 @@
 
     cctx = zstd.ZstdCompressor(compression_params=params)
 
-The members of the ``CompressionParameters`` tuple are as follows::
+The members/attributes of ``CompressionParameters`` instances are as follows::
 
-* 0 - Window log
-* 1 - Chain log
-* 2 - Hash log
-* 3 - Search log
-* 4 - Search length
-* 5 - Target length
-* 6 - Strategy (one of the ``zstd.STRATEGY_`` constants)
+* window_log
+* chain_log
+* hash_log
+* search_log
+* search_length
+* target_length
+* strategy
+
+This is the order the arguments are passed to the constructor if not using
+named arguments.
 
 You'll need to read the Zstandard documentation for what these parameters
 do.
 
+Frame Inspection
+----------------
+
+Data emitted from zstd compression is encapsulated in a *frame*. This frame
+begins with a 4 byte *magic number* header followed by 2 to 14 bytes describing
+the frame in more detail. For more info, see
+https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md.
+
+``zstd.get_frame_parameters(data)`` parses a zstd *frame* header from a bytes
+instance and return a ``FrameParameters`` object describing the frame.
+
+Depending on which fields are present in the frame and their values, the
+length of the frame parameters varies. If insufficient bytes are passed
+in to fully parse the frame parameters, ``ZstdError`` is raised. To ensure
+frame parameters can be parsed, pass in at least 18 bytes.
+
+``FrameParameters`` instances have the following attributes:
+
+content_size
+   Integer size of original, uncompressed content. This will be ``0`` if the
+   original content size isn't written to the frame (controlled with the
+   ``write_content_size`` argument to ``ZstdCompressor``) or if the input
+   content size was ``0``.
+
+window_size
+   Integer size of maximum back-reference distance in compressed data.
+
+dict_id
+   Integer of dictionary ID used for compression. ``0`` if no dictionary
+   ID was used or if the dictionary ID was ``0``.
+
+has_checksum
+   Bool indicating whether a 4 byte content checksum is stored at the end
+   of the frame.
+
 Misc Functionality
 ------------------
 
@@ -776,19 +1066,293 @@
 TARGETLENGTH_MAX
     Maximum value for compression parameter
 STRATEGY_FAST
-    Compression strategory
+    Compression strategy
 STRATEGY_DFAST
-    Compression strategory
+    Compression strategy
 STRATEGY_GREEDY
-    Compression strategory
+    Compression strategy
 STRATEGY_LAZY
-    Compression strategory
+    Compression strategy
 STRATEGY_LAZY2
-    Compression strategory
+    Compression strategy
 STRATEGY_BTLAZY2
-    Compression strategory
+    Compression strategy
 STRATEGY_BTOPT
-    Compression strategory
+    Compression strategy
+
+Performance Considerations
+--------------------------
+
+The ``ZstdCompressor`` and ``ZstdDecompressor`` types maintain state to a
+persistent compression or decompression *context*. Reusing a ``ZstdCompressor``
+or ``ZstdDecompressor`` instance for multiple operations is faster than
+instantiating a new ``ZstdCompressor`` or ``ZstdDecompressor`` for each
+operation. The differences are magnified as the size of data decreases. For
+example, the difference between *context* reuse and non-reuse for 100,000
+100 byte inputs will be significant (possiby over 10x faster to reuse contexts)
+whereas 10 1,000,000 byte inputs will be more similar in speed (because the
+time spent doing compression dwarfs time spent creating new *contexts*).
+
+Buffer Types
+------------
+
+The API exposes a handful of custom types for interfacing with memory buffers.
+The primary goal of these types is to facilitate efficient multi-object
+operations.
+
+The essential idea is to have a single memory allocation provide backing
+storage for multiple logical objects. This has 2 main advantages: fewer
+allocations and optimal memory access patterns. This avoids having to allocate
+a Python object for each logical object and furthermore ensures that access of
+data for objects can be sequential (read: fast) in memory.
+
+BufferWithSegments
+^^^^^^^^^^^^^^^^^^
+
+The ``BufferWithSegments`` type represents a memory buffer containing N
+discrete items of known lengths (segments). It is essentially a fixed size
+memory address and an array of 2-tuples of ``(offset, length)`` 64-bit
+unsigned native endian integers defining the byte offset and length of each
+segment within the buffer.
+
+Instances behave like containers.
+
+``len()`` returns the number of segments within the instance.
+
+``o[index]`` or ``__getitem__`` obtains a ``BufferSegment`` representing an
+individual segment within the backing buffer. That returned object references
+(not copies) memory. This means that iterating all objects doesn't copy
+data within the buffer.
+
+The ``.size`` attribute contains the total size in bytes of the backing
+buffer.
+
+Instances conform to the buffer protocol. So a reference to the backing bytes
+can be obtained via ``memoryview(o)``. A *copy* of the backing bytes can also
+be obtained via ``.tobytes()``.
+
+The ``.segments`` attribute exposes the array of ``(offset, length)`` for
+segments within the buffer. It is a ``BufferSegments`` type.
+
+BufferSegment
+^^^^^^^^^^^^^
+
+The ``BufferSegment`` type represents a segment within a ``BufferWithSegments``.
+It is essentially a reference to N bytes within a ``BufferWithSegments``.
+
+``len()`` returns the length of the segment in bytes.
+
+``.offset`` contains the byte offset of this segment within its parent
+``BufferWithSegments`` instance.
+
+The object conforms to the buffer protocol. ``.tobytes()`` can be called to
+obtain a ``bytes`` instance with a copy of the backing bytes.
+
+BufferSegments
+^^^^^^^^^^^^^^
+
+This type represents an array of ``(offset, length)`` integers defining segments
+within a ``BufferWithSegments``.
+
+The array members are 64-bit unsigned integers using host/native bit order.
+
+Instances conform to the buffer protocol.
+
+BufferWithSegmentsCollection
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``BufferWithSegmentsCollection`` type represents a virtual spanning view
+of multiple ``BufferWithSegments`` instances.
+
+Instances are constructed from 1 or more ``BufferWithSegments`` instances. The
+resulting object behaves like an ordered sequence whose members are the
+segments within each ``BufferWithSegments``.
+
+``len()`` returns the number of segments within all ``BufferWithSegments``
+instances.
+
+``o[index]`` and ``__getitem__(index)`` return the ``BufferSegment`` at
+that offset as if all ``BufferWithSegments`` instances were a single
+entity.
+
+If the object is composed of 2 ``BufferWithSegments`` instances with the
+first having 2 segments and the second have 3 segments, then ``b[0]``
+and ``b[1]`` access segments in the first object and ``b[2]``, ``b[3]``,
+and ``b[4]`` access segments from the second.
+
+Choosing an API
+===============
+
+There are multiple APIs for performing compression and decompression. This is
+because different applications have different needs and the library wants to
+facilitate optimal use in as many use cases as possible.
+
+From a high-level, APIs are divided into *one-shot* and *streaming*. See
+the ``Concepts`` section for a description of how these are different at
+the C layer.
+
+The *one-shot* APIs are useful for small data, where the input or output
+size is known. (The size can come from a buffer length, file size, or
+stored in the zstd frame header.) A limitation of the *one-shot* APIs is that
+input and output must fit in memory simultaneously. For say a 4 GB input,
+this is often not feasible.
+
+The *one-shot* APIs also perform all work as a single operation. So, if you
+feed it large input, it could take a long time for the function to return.
+
+The streaming APIs do not have the limitations of the simple API. But the
+price you pay for this flexibility is that they are more complex than a
+single function call.
+
+The streaming APIs put the caller in control of compression and decompression
+behavior by allowing them to directly control either the input or output side
+of the operation.
+
+With the *streaming input*, *compressor*, and *decompressor* APIs, the caller
+has full control over the input to the compression or decompression stream.
+They can directly choose when new data is operated on.
+
+With the *streaming ouput* APIs, the caller has full control over the output
+of the compression or decompression stream. It can choose when to receive
+new data.
+
+When using the *streaming* APIs that operate on file-like or stream objects,
+it is important to consider what happens in that object when I/O is requested.
+There is potential for long pauses as data is read or written from the
+underlying stream (say from interacting with a filesystem or network). This
+could add considerable overhead.
+
+Concepts
+========
+
+It is important to have a basic understanding of how Zstandard works in order
+to optimally use this library. In addition, there are some low-level Python
+concepts that are worth explaining to aid understanding. This section aims to
+provide that knowledge.
+
+Zstandard Frames and Compression Format
+---------------------------------------
+
+Compressed zstandard data almost always exists within a container called a
+*frame*. (For the technically curious, see the
+`specification <https://github.com/facebook/zstd/blob/3bee41a70eaf343fbcae3637b3f6edbe52f35ed8/doc/zstd_compression_format.md>_.)
+
+The frame contains a header and optional trailer. The header contains a
+magic number to self-identify as a zstd frame and a description of the
+compressed data that follows.
+
+Among other things, the frame *optionally* contains the size of the
+decompressed data the frame represents, a 32-bit checksum of the
+decompressed data (to facilitate verification during decompression),
+and the ID of the dictionary used to compress the data.
+
+Storing the original content size in the frame (``write_content_size=True``
+to ``ZstdCompressor``) is important for performance in some scenarios. Having
+the decompressed size stored there (or storing it elsewhere) allows
+decompression to perform a single memory allocation that is exactly sized to
+the output. This is faster than continuously growing a memory buffer to hold
+output.
+
+Compression and Decompression Contexts
+--------------------------------------
+
+In order to perform a compression or decompression operation with the zstd
+C API, you need what's called a *context*. A context essentially holds
+configuration and state for a compression or decompression operation. For
+example, a compression context holds the configured compression level.
+
+Contexts can be reused for multiple operations. Since creating and
+destroying contexts is not free, there are performance advantages to
+reusing contexts.
+
+The ``ZstdCompressor`` and ``ZstdDecompressor`` types are essentially
+wrappers around these contexts in the zstd C API.
+
+One-shot And Streaming Operations
+---------------------------------
+
+A compression or decompression operation can either be performed as a
+single *one-shot* operation or as a continuous *streaming* operation.
+
+In one-shot mode (the *simple* APIs provided by the Python interface),
+**all** input is handed to the compressor or decompressor as a single buffer
+and **all** output is returned as a single buffer.
+
+In streaming mode, input is delivered to the compressor or decompressor as
+a series of chunks via multiple function calls. Likewise, output is
+obtained in chunks as well.
+
+Streaming operations require an additional *stream* object to be created
+to track the operation. These are logical extensions of *context*
+instances.
+
+There are advantages and disadvantages to each mode of operation. There
+are scenarios where certain modes can't be used. See the
+``Choosing an API`` section for more.
+
+Dictionaries
+------------
+
+A compression *dictionary* is essentially data used to seed the compressor
+state so it can achieve better compression. The idea is that if you are
+compressing a lot of similar pieces of data (e.g. JSON documents or anything
+sharing similar structure), then you can find common patterns across multiple
+objects then leverage those common patterns during compression and
+decompression operations to achieve better compression ratios.
+
+Dictionary compression is generally only useful for small inputs - data no
+larger than a few kilobytes. The upper bound on this range is highly dependent
+on the input data and the dictionary.
+
+Python Buffer Protocol
+----------------------
+
+Many functions in the library operate on objects that implement Python's
+`buffer protocol <https://docs.python.org/3.6/c-api/buffer.html>`_.
+
+The *buffer protocol* is an internal implementation detail of a Python
+type that allows instances of that type (objects) to be exposed as a raw
+pointer (or buffer) in the C API. In other words, it allows objects to be
+exposed as an array of bytes.
+
+From the perspective of the C API, objects implementing the *buffer protocol*
+all look the same: they are just a pointer to a memory address of a defined
+length. This allows the C API to be largely type agnostic when accessing their
+data. This allows custom types to be passed in without first converting them
+to a specific type.
+
+Many Python types implement the buffer protocol. These include ``bytes``
+(``str`` on Python 2), ``bytearray``, ``array.array``, ``io.BytesIO``,
+``mmap.mmap``, and ``memoryview``.
+
+``python-zstandard`` APIs that accept objects conforming to the buffer
+protocol require that the buffer is *C contiguous* and has a single
+dimension (``ndim==1``). This is usually the case. An example of where it
+is not is a Numpy matrix type.
+
+Requiring Output Sizes for Non-Streaming Decompression APIs
+-----------------------------------------------------------
+
+Non-streaming decompression APIs require that either the output size is
+explicitly defined (either in the zstd frame header or passed into the
+function) or that a max output size is specified. This restriction is for
+your safety.
+
+The *one-shot* decompression APIs store the decompressed result in a
+single buffer. This means that a buffer needs to be pre-allocated to hold
+the result. If the decompressed size is not known, then there is no universal
+good default size to use. Any default will fail or will be highly sub-optimal
+in some scenarios (it will either be too small or will put stress on the
+memory allocator to allocate a too large block).
+
+A *helpful* API may retry decompression with buffers of increasing size.
+While useful, there are obvious performance disadvantages, namely redoing
+decompression N times until it works. In addition, there is a security
+concern. Say the input came from highly compressible data, like 1 GB of the
+same byte value. The output size could be several magnitudes larger than the
+input size. An input of <100KB could decompress to >1GB. Without a bounds
+restriction on the decompressed size, certain inputs could exhaust all system
+memory. That's not good and is why the maximum output size is limited.
 
 Note on Zstandard's *Experimental* API
 ======================================
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/bufferutil.c	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,770 @@
+/**
+* Copyright (c) 2017-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+PyDoc_STRVAR(BufferWithSegments__doc__,
+"BufferWithSegments - A memory buffer holding known sub-segments.\n"
+"\n"
+"This type represents a contiguous chunk of memory containing N discrete\n"
+"items within sub-segments of that memory.\n"
+"\n"
+"Segments within the buffer are stored as an array of\n"
+"``(offset, length)`` pairs, where each element is an unsigned 64-bit\n"
+"integer using the host/native bit order representation.\n"
+"\n"
+"The type exists to facilitate operations against N>1 items without the\n"
+"overhead of Python object creation and management.\n"
+);
+
+static void BufferWithSegments_dealloc(ZstdBufferWithSegments* self) {
+	/* Backing memory is either canonically owned by a Py_buffer or by us. */
+	if (self->parent.buf) {
+		PyBuffer_Release(&self->parent);
+	}
+	else if (self->useFree) {
+		free(self->data);
+	}
+	else {
+		PyMem_Free(self->data);
+	}
+
+	self->data = NULL;
+
+	if (self->useFree) {
+		free(self->segments);
+	}
+	else {
+		PyMem_Free(self->segments);
+	}
+
+	self->segments = NULL;
+
+	PyObject_Del(self);
+}
+
+static int BufferWithSegments_init(ZstdBufferWithSegments* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"data",
+		"segments",
+		NULL
+	};
+
+	Py_buffer segments;
+	Py_ssize_t segmentCount;
+	Py_ssize_t i;
+
+	memset(&self->parent, 0, sizeof(self->parent));
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y*y*:BufferWithSegments",
+#else
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s*s*:BufferWithSegments",
+#endif
+		kwlist, &self->parent, &segments)) {
+		return -1;
+	}
+
+	if (!PyBuffer_IsContiguous(&self->parent, 'C') || self->parent.ndim > 1) {
+		PyErr_SetString(PyExc_ValueError, "data buffer should be contiguous and have a single dimension");
+		goto except;
+	}
+
+	if (!PyBuffer_IsContiguous(&segments, 'C') || segments.ndim > 1) {
+		PyErr_SetString(PyExc_ValueError, "segments buffer should be contiguous and have a single dimension");
+		goto except;
+	}
+
+	if (segments.len % sizeof(BufferSegment)) {
+		PyErr_Format(PyExc_ValueError, "segments array size is not a multiple of %lu",
+			sizeof(BufferSegment));
+		goto except;
+	}
+
+	segmentCount = segments.len / sizeof(BufferSegment);
+
+	/* Validate segments data, as blindly trusting it could lead to arbitrary
+	memory access. */
+	for (i = 0; i < segmentCount; i++) {
+		BufferSegment* segment = &((BufferSegment*)(segments.buf))[i];
+
+		if (segment->offset + segment->length > (unsigned long long)self->parent.len) {
+			PyErr_SetString(PyExc_ValueError, "offset within segments array references memory outside buffer");
+			goto except;
+			return -1;
+		}
+	}
+
+	/* Make a copy of the segments data. It is cheap to do so and is a guard
+	   against caller changing offsets, which has security implications. */
+	self->segments = PyMem_Malloc(segments.len);
+	if (!self->segments) {
+		PyErr_NoMemory();
+		goto except;
+	}
+
+	memcpy(self->segments, segments.buf, segments.len);
+	PyBuffer_Release(&segments);
+
+	self->data = self->parent.buf;
+	self->dataSize = self->parent.len;
+	self->segmentCount = segmentCount;
+
+	return 0;
+
+except:
+	PyBuffer_Release(&self->parent);
+	PyBuffer_Release(&segments);
+	return -1;
+};
+
+/**
+ * Construct a BufferWithSegments from existing memory and offsets.
+ *
+ * Ownership of the backing memory and BufferSegments will be transferred to
+ * the created object and freed when the BufferWithSegments is destroyed.
+ */
+ZstdBufferWithSegments* BufferWithSegments_FromMemory(void* data, unsigned long long dataSize,
+	BufferSegment* segments, Py_ssize_t segmentsSize) {
+	ZstdBufferWithSegments* result = NULL;
+	Py_ssize_t i;
+
+	if (NULL == data) {
+		PyErr_SetString(PyExc_ValueError, "data is NULL");
+		return NULL;
+	}
+
+	if (NULL == segments) {
+		PyErr_SetString(PyExc_ValueError, "segments is NULL");
+		return NULL;
+	}
+
+	for (i = 0; i < segmentsSize; i++) {
+		BufferSegment* segment = &segments[i];
+
+		if (segment->offset + segment->length > dataSize) {
+			PyErr_SetString(PyExc_ValueError, "offset in segments overflows buffer size");
+			return NULL;
+		}
+	}
+
+	result = PyObject_New(ZstdBufferWithSegments, &ZstdBufferWithSegmentsType);
+	if (NULL == result) {
+		return NULL;
+	}
+
+	result->useFree = 0;
+
+	memset(&result->parent, 0, sizeof(result->parent));
+	result->data = data;
+	result->dataSize = dataSize;
+	result->segments = segments;
+	result->segmentCount = segmentsSize;
+
+	return result;
+}
+
+static Py_ssize_t BufferWithSegments_length(ZstdBufferWithSegments* self) {
+	return self->segmentCount;
+}
+
+static ZstdBufferSegment* BufferWithSegments_item(ZstdBufferWithSegments* self, Py_ssize_t i) {
+	ZstdBufferSegment* result = NULL;
+
+	if (i < 0) {
+		PyErr_SetString(PyExc_IndexError, "offset must be non-negative");
+		return NULL;
+	}
+
+	if (i >= self->segmentCount) {
+		PyErr_Format(PyExc_IndexError, "offset must be less than %zd", self->segmentCount);
+		return NULL;
+	}
+
+	result = (ZstdBufferSegment*)PyObject_CallObject((PyObject*)&ZstdBufferSegmentType, NULL);
+	if (NULL == result) {
+		return NULL;
+	}
+
+	result->parent = (PyObject*)self;
+	Py_INCREF(self);
+
+	result->data = (char*)self->data + self->segments[i].offset;
+	result->dataSize = self->segments[i].length;
+	result->offset = self->segments[i].offset;
+
+	return result;
+}
+
+#if PY_MAJOR_VERSION >= 3
+static int BufferWithSegments_getbuffer(ZstdBufferWithSegments* self, Py_buffer* view, int flags) {
+	return PyBuffer_FillInfo(view, (PyObject*)self, self->data, self->dataSize, 1, flags);
+}
+#else
+static Py_ssize_t BufferWithSegments_getreadbuffer(ZstdBufferWithSegments* self, Py_ssize_t segment, void **ptrptr) {
+	if (segment != 0) {
+		PyErr_SetString(PyExc_ValueError, "segment number must be 0");
+		return -1;
+	}
+
+	*ptrptr = self->data;
+	return self->dataSize;
+}
+
+static Py_ssize_t BufferWithSegments_getsegcount(ZstdBufferWithSegments* self, Py_ssize_t* len) {
+	if (len) {
+		*len = 1;
+	}
+
+	return 1;
+}
+#endif
+
+PyDoc_STRVAR(BufferWithSegments_tobytes__doc__,
+"Obtain a bytes instance for this buffer.\n"
+);
+
+static PyObject* BufferWithSegments_tobytes(ZstdBufferWithSegments* self) {
+	return PyBytes_FromStringAndSize(self->data, self->dataSize);
+}
+
+PyDoc_STRVAR(BufferWithSegments_segments__doc__,
+"Obtain a BufferSegments describing segments in this sintance.\n"
+);
+
+static ZstdBufferSegments* BufferWithSegments_segments(ZstdBufferWithSegments* self) {
+	ZstdBufferSegments* result = (ZstdBufferSegments*)PyObject_CallObject((PyObject*)&ZstdBufferSegmentsType, NULL);
+	if (NULL == result) {
+		return NULL;
+	}
+
+	result->parent = (PyObject*)self;
+	Py_INCREF(self);
+	result->segments = self->segments;
+	result->segmentCount = self->segmentCount;
+
+	return result;
+}
+
+static PySequenceMethods BufferWithSegments_sq = {
+	(lenfunc)BufferWithSegments_length, /* sq_length */
+	0, /* sq_concat */
+	0, /* sq_repeat */
+	(ssizeargfunc)BufferWithSegments_item, /* sq_item */
+	0, /* sq_ass_item */
+	0, /* sq_contains */
+	0, /* sq_inplace_concat */
+	0 /* sq_inplace_repeat */
+};
+
+static PyBufferProcs BufferWithSegments_as_buffer = {
+#if PY_MAJOR_VERSION >= 3
+	(getbufferproc)BufferWithSegments_getbuffer, /* bf_getbuffer */
+	0 /* bf_releasebuffer */
+#else
+	(readbufferproc)BufferWithSegments_getreadbuffer, /* bf_getreadbuffer */
+	0, /* bf_getwritebuffer */
+	(segcountproc)BufferWithSegments_getsegcount, /* bf_getsegcount */
+	0 /* bf_getcharbuffer */
+#endif
+};
+
+static PyMethodDef BufferWithSegments_methods[] = {
+	{ "segments", (PyCFunction)BufferWithSegments_segments,
+	  METH_NOARGS, BufferWithSegments_segments__doc__ },
+	{ "tobytes", (PyCFunction)BufferWithSegments_tobytes,
+	  METH_NOARGS, BufferWithSegments_tobytes__doc__ },
+	{ NULL, NULL }
+};
+
+static PyMemberDef BufferWithSegments_members[] = {
+	{ "size", T_ULONGLONG, offsetof(ZstdBufferWithSegments, dataSize),
+	  READONLY, "total size of the buffer in bytes" },
+	{ NULL }
+};
+
+PyTypeObject ZstdBufferWithSegmentsType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.BufferWithSegments", /* tp_name */
+	sizeof(ZstdBufferWithSegments),/* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)BufferWithSegments_dealloc, /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	&BufferWithSegments_sq,    /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash  */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	&BufferWithSegments_as_buffer, /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	BufferWithSegments__doc__, /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	0,                         /* tp_iter */
+	0,                         /* tp_iternext */
+	BufferWithSegments_methods, /* tp_methods */
+	BufferWithSegments_members, /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	(initproc)BufferWithSegments_init, /* tp_init */
+	0,                         /* tp_alloc */
+	PyType_GenericNew,         /* tp_new */
+};
+
+PyDoc_STRVAR(BufferSegments__doc__,
+"BufferSegments - Represents segments/offsets within a BufferWithSegments\n"
+);
+
+static void BufferSegments_dealloc(ZstdBufferSegments* self) {
+	Py_CLEAR(self->parent);
+	PyObject_Del(self);
+}
+
+#if PY_MAJOR_VERSION >= 3
+static int BufferSegments_getbuffer(ZstdBufferSegments* self, Py_buffer* view, int flags) {
+	return PyBuffer_FillInfo(view, (PyObject*)self,
+		(void*)self->segments, self->segmentCount * sizeof(BufferSegment),
+		1, flags);
+}
+#else
+static Py_ssize_t BufferSegments_getreadbuffer(ZstdBufferSegments* self, Py_ssize_t segment, void **ptrptr) {
+	if (segment != 0) {
+		PyErr_SetString(PyExc_ValueError, "segment number must be 0");
+		return -1;
+	}
+
+	*ptrptr = (void*)self->segments;
+	return self->segmentCount * sizeof(BufferSegment);
+}
+
+static Py_ssize_t BufferSegments_getsegcount(ZstdBufferSegments* self, Py_ssize_t* len) {
+	if (len) {
+		*len = 1;
+	}
+
+	return 1;
+}
+#endif
+
+static PyBufferProcs BufferSegments_as_buffer = {
+#if PY_MAJOR_VERSION >= 3
+	(getbufferproc)BufferSegments_getbuffer,
+	0
+#else
+	(readbufferproc)BufferSegments_getreadbuffer,
+	0,
+	(segcountproc)BufferSegments_getsegcount,
+	0
+#endif
+};
+
+PyTypeObject ZstdBufferSegmentsType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.BufferSegments", /* tp_name */
+	sizeof(ZstdBufferSegments),/* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)BufferSegments_dealloc, /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	0,                         /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash  */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	&BufferSegments_as_buffer, /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	BufferSegments__doc__,     /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	0,                         /* tp_iter */
+	0,                         /* tp_iternext */
+	0,                         /* tp_methods */
+	0,                         /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	0,                         /* tp_init */
+	0,                         /* tp_alloc */
+	PyType_GenericNew,         /* tp_new */
+};
+
+PyDoc_STRVAR(BufferSegment__doc__,
+	"BufferSegment - Represents a segment within a BufferWithSegments\n"
+);
+
+static void BufferSegment_dealloc(ZstdBufferSegment* self) {
+	Py_CLEAR(self->parent);
+	PyObject_Del(self);
+}
+
+static Py_ssize_t BufferSegment_length(ZstdBufferSegment* self) {
+	return self->dataSize;
+}
+
+#if PY_MAJOR_VERSION >= 3
+static int BufferSegment_getbuffer(ZstdBufferSegment* self, Py_buffer* view, int flags) {
+	return PyBuffer_FillInfo(view, (PyObject*)self,
+		self->data, self->dataSize, 1, flags);
+}
+#else
+static Py_ssize_t BufferSegment_getreadbuffer(ZstdBufferSegment* self, Py_ssize_t segment, void **ptrptr) {
+	if (segment != 0) {
+		PyErr_SetString(PyExc_ValueError, "segment number must be 0");
+		return -1;
+	}
+
+	*ptrptr = self->data;
+	return self->dataSize;
+}
+
+static Py_ssize_t BufferSegment_getsegcount(ZstdBufferSegment* self, Py_ssize_t* len) {
+	if (len) {
+		*len = 1;
+	}
+
+	return 1;
+}
+#endif
+
+PyDoc_STRVAR(BufferSegment_tobytes__doc__,
+"Obtain a bytes instance for this segment.\n"
+);
+
+static PyObject* BufferSegment_tobytes(ZstdBufferSegment* self) {
+	return PyBytes_FromStringAndSize(self->data, self->dataSize);
+}
+
+static PySequenceMethods BufferSegment_sq = {
+	(lenfunc)BufferSegment_length, /* sq_length */
+	0, /* sq_concat */
+	0, /* sq_repeat */
+	0, /* sq_item */
+	0, /* sq_ass_item */
+	0, /* sq_contains */
+	0, /* sq_inplace_concat */
+	0 /* sq_inplace_repeat */
+};
+
+static PyBufferProcs BufferSegment_as_buffer = {
+#if PY_MAJOR_VERSION >= 3
+	(getbufferproc)BufferSegment_getbuffer,
+	0
+#else
+	(readbufferproc)BufferSegment_getreadbuffer,
+	0,
+	(segcountproc)BufferSegment_getsegcount,
+	0
+#endif
+};
+
+static PyMethodDef BufferSegment_methods[] = {
+	{ "tobytes", (PyCFunction)BufferSegment_tobytes,
+	  METH_NOARGS, BufferSegment_tobytes__doc__ },
+	{ NULL, NULL }
+};
+
+static PyMemberDef BufferSegment_members[] = {
+	{ "offset", T_ULONGLONG, offsetof(ZstdBufferSegment, offset), READONLY,
+	  "offset of segment within parent buffer" },
+	  { NULL }
+};
+
+PyTypeObject ZstdBufferSegmentType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.BufferSegment", /* tp_name */
+	sizeof(ZstdBufferSegment),/* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)BufferSegment_dealloc, /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	&BufferSegment_sq,         /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash  */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	&BufferSegment_as_buffer,  /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	BufferSegment__doc__,      /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	0,                         /* tp_iter */
+	0,                         /* tp_iternext */
+	BufferSegment_methods,     /* tp_methods */
+	BufferSegment_members,     /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	0,                         /* tp_init */
+	0,                         /* tp_alloc */
+	PyType_GenericNew,         /* tp_new */
+};
+
+PyDoc_STRVAR(BufferWithSegmentsCollection__doc__,
+"Represents a collection of BufferWithSegments.\n"
+);
+
+static void BufferWithSegmentsCollection_dealloc(ZstdBufferWithSegmentsCollection* self) {
+	Py_ssize_t i;
+
+	if (self->firstElements) {
+		PyMem_Free(self->firstElements);
+		self->firstElements = NULL;
+	}
+
+	if (self->buffers) {
+		for (i = 0; i < self->bufferCount; i++) {
+			Py_CLEAR(self->buffers[i]);
+		}
+
+		PyMem_Free(self->buffers);
+		self->buffers = NULL;
+	}
+
+	PyObject_Del(self);
+}
+
+static int BufferWithSegmentsCollection_init(ZstdBufferWithSegmentsCollection* self, PyObject* args) {
+	Py_ssize_t size;
+	Py_ssize_t i;
+	Py_ssize_t offset = 0;
+
+	size = PyTuple_Size(args);
+	if (-1 == size) {
+		return -1;
+	}
+
+	if (0 == size) {
+		PyErr_SetString(PyExc_ValueError, "must pass at least 1 argument");
+		return -1;
+	}
+
+	for (i = 0; i < size; i++) {
+		PyObject* item = PyTuple_GET_ITEM(args, i);
+		if (!PyObject_TypeCheck(item, &ZstdBufferWithSegmentsType)) {
+			PyErr_SetString(PyExc_TypeError, "arguments must be BufferWithSegments instances");
+			return -1;
+		}
+
+		if (0 == ((ZstdBufferWithSegments*)item)->segmentCount ||
+			0 == ((ZstdBufferWithSegments*)item)->dataSize) {
+			PyErr_SetString(PyExc_ValueError, "ZstdBufferWithSegments cannot be empty");
+			return -1;
+		}
+	}
+
+	self->buffers = PyMem_Malloc(size * sizeof(ZstdBufferWithSegments*));
+	if (NULL == self->buffers) {
+		PyErr_NoMemory();
+		return -1;
+	}
+
+	self->firstElements = PyMem_Malloc(size * sizeof(Py_ssize_t));
+	if (NULL == self->firstElements) {
+		PyMem_Free(self->buffers);
+		self->buffers = NULL;
+		PyErr_NoMemory();
+		return -1;
+	}
+
+	self->bufferCount = size;
+
+	for (i = 0; i < size; i++) {
+		ZstdBufferWithSegments* item = (ZstdBufferWithSegments*)PyTuple_GET_ITEM(args, i);
+
+		self->buffers[i] = item;
+		Py_INCREF(item);
+
+		if (i > 0) {
+			self->firstElements[i - 1] = offset;
+		}
+
+		offset += item->segmentCount;
+	}
+
+	self->firstElements[size - 1] = offset;
+
+	return 0;
+}
+
+static PyObject* BufferWithSegmentsCollection_size(ZstdBufferWithSegmentsCollection* self) {
+	Py_ssize_t i;
+	Py_ssize_t j;
+	unsigned long long size = 0;
+
+	for (i = 0; i < self->bufferCount; i++) {
+		for (j = 0; j < self->buffers[i]->segmentCount; j++) {
+			size += self->buffers[i]->segments[j].length;
+		}
+	}
+
+	return PyLong_FromUnsignedLongLong(size);
+}
+
+Py_ssize_t BufferWithSegmentsCollection_length(ZstdBufferWithSegmentsCollection* self) {
+	return self->firstElements[self->bufferCount - 1];
+}
+
+static ZstdBufferSegment* BufferWithSegmentsCollection_item(ZstdBufferWithSegmentsCollection* self, Py_ssize_t i) {
+	Py_ssize_t bufferOffset;
+
+	if (i < 0) {
+		PyErr_SetString(PyExc_IndexError, "offset must be non-negative");
+		return NULL;
+	}
+
+	if (i >= BufferWithSegmentsCollection_length(self)) {
+		PyErr_Format(PyExc_IndexError, "offset must be less than %zd",
+			BufferWithSegmentsCollection_length(self));
+		return NULL;
+	}
+
+	for (bufferOffset = 0; bufferOffset < self->bufferCount; bufferOffset++) {
+		Py_ssize_t offset = 0;
+
+		if (i < self->firstElements[bufferOffset]) {
+			if (bufferOffset > 0) {
+				offset = self->firstElements[bufferOffset - 1];
+			}
+
+			return BufferWithSegments_item(self->buffers[bufferOffset], i - offset);
+		}
+	}
+
+	PyErr_SetString(ZstdError, "error resolving segment; this should not happen");
+	return NULL;
+}
+
+static PySequenceMethods BufferWithSegmentsCollection_sq = {
+	(lenfunc)BufferWithSegmentsCollection_length, /* sq_length */
+	0, /* sq_concat */
+	0, /* sq_repeat */
+	(ssizeargfunc)BufferWithSegmentsCollection_item, /* sq_item */
+	0, /* sq_ass_item */
+	0, /* sq_contains */
+	0, /* sq_inplace_concat */
+	0 /* sq_inplace_repeat */
+};
+
+static PyMethodDef BufferWithSegmentsCollection_methods[] = {
+	{ "size", (PyCFunction)BufferWithSegmentsCollection_size,
+	  METH_NOARGS, PyDoc_STR("total size in bytes of all segments") },
+	{ NULL, NULL }
+};
+
+PyTypeObject ZstdBufferWithSegmentsCollectionType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.BufferWithSegmentsCollection", /* tp_name */
+	sizeof(ZstdBufferWithSegmentsCollection),/* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)BufferWithSegmentsCollection_dealloc, /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	&BufferWithSegmentsCollection_sq, /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash  */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	0,                         /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	BufferWithSegmentsCollection__doc__, /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	/* TODO implement iterator for performance. */
+	0,                         /* tp_iter */
+	0,                         /* tp_iternext */
+	BufferWithSegmentsCollection_methods, /* tp_methods */
+	0,                         /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	(initproc)BufferWithSegmentsCollection_init, /* tp_init */
+	0,                         /* tp_alloc */
+	PyType_GenericNew,         /* tp_new */
+};
+
+void bufferutil_module_init(PyObject* mod) {
+	Py_TYPE(&ZstdBufferWithSegmentsType) = &PyType_Type;
+	if (PyType_Ready(&ZstdBufferWithSegmentsType) < 0) {
+		return;
+	}
+
+	Py_INCREF(&ZstdBufferWithSegmentsType);
+	PyModule_AddObject(mod, "BufferWithSegments", (PyObject*)&ZstdBufferWithSegmentsType);
+
+	Py_TYPE(&ZstdBufferSegmentsType) = &PyType_Type;
+	if (PyType_Ready(&ZstdBufferSegmentsType) < 0) {
+		return;
+	}
+
+	Py_INCREF(&ZstdBufferSegmentsType);
+	PyModule_AddObject(mod, "BufferSegments", (PyObject*)&ZstdBufferSegmentsType);
+
+	Py_TYPE(&ZstdBufferSegmentType) = &PyType_Type;
+	if (PyType_Ready(&ZstdBufferSegmentType) < 0) {
+		return;
+	}
+
+	Py_INCREF(&ZstdBufferSegmentType);
+	PyModule_AddObject(mod, "BufferSegment", (PyObject*)&ZstdBufferSegmentType);
+
+	Py_TYPE(&ZstdBufferWithSegmentsCollectionType) = &PyType_Type;
+	if (PyType_Ready(&ZstdBufferWithSegmentsCollectionType) < 0) {
+		return;
+	}
+
+	Py_INCREF(&ZstdBufferWithSegmentsCollectionType);
+	PyModule_AddObject(mod, "BufferWithSegmentsCollection", (PyObject*)&ZstdBufferWithSegmentsCollectionType);
+}
--- a/contrib/python-zstandard/c-ext/compressiondict.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/compressiondict.c	Tue Apr 18 12:24:34 2017 -0400
@@ -11,45 +11,48 @@
 extern PyObject* ZstdError;
 
 ZstdCompressionDict* train_dictionary(PyObject* self, PyObject* args, PyObject* kwargs) {
-	static char *kwlist[] = { "dict_size", "samples", "parameters", NULL };
+	static char* kwlist[] = {
+		"dict_size",
+		"samples",
+		"selectivity",
+		"level",
+		"notifications",
+		"dict_id",
+		NULL
+	};
 	size_t capacity;
 	PyObject* samples;
 	Py_ssize_t samplesLen;
-	PyObject* parameters = NULL;
+	unsigned  selectivity = 0;
+	int level = 0;
+	unsigned notifications = 0;
+	unsigned dictID = 0;
 	ZDICT_params_t zparams;
 	Py_ssize_t sampleIndex;
 	Py_ssize_t sampleSize;
 	PyObject* sampleItem;
 	size_t zresult;
-	void* sampleBuffer;
+	void* sampleBuffer = NULL;
 	void* sampleOffset;
 	size_t samplesSize = 0;
-	size_t* sampleSizes;
-	void* dict;
-	ZstdCompressionDict* result;
+	size_t* sampleSizes = NULL;
+	void* dict = NULL;
+	ZstdCompressionDict* result = NULL;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "nO!|O!", kwlist,
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "nO!|IiII:train_dictionary",
+		kwlist,
 		&capacity,
 		&PyList_Type, &samples,
-		(PyObject*)&DictParametersType, &parameters)) {
+		&selectivity, &level, &notifications, &dictID)) {
 		return NULL;
 	}
 
-	/* Validate parameters first since it is easiest. */
-	zparams.selectivityLevel = 0;
-	zparams.compressionLevel = 0;
-	zparams.notificationLevel = 0;
-	zparams.dictID = 0;
-	zparams.reserved[0] = 0;
-	zparams.reserved[1] = 0;
+	memset(&zparams, 0, sizeof(zparams));
 
-	if (parameters) {
-		/* TODO validate data ranges */
-		zparams.selectivityLevel = PyLong_AsUnsignedLong(PyTuple_GetItem(parameters, 0));
-		zparams.compressionLevel = PyLong_AsLong(PyTuple_GetItem(parameters, 1));
-		zparams.notificationLevel = PyLong_AsUnsignedLong(PyTuple_GetItem(parameters, 2));
-		zparams.dictID = PyLong_AsUnsignedLong(PyTuple_GetItem(parameters, 3));
-	}
+	zparams.selectivityLevel = selectivity;
+	zparams.compressionLevel = level;
+	zparams.notificationLevel = notifications;
+	zparams.dictID = dictID;
 
 	/* Figure out the size of the raw samples */
 	samplesLen = PyList_Size(samples);
@@ -57,7 +60,6 @@
 		sampleItem = PyList_GetItem(samples, sampleIndex);
 		if (!PyBytes_Check(sampleItem)) {
 			PyErr_SetString(PyExc_ValueError, "samples must be bytes");
-			/* TODO probably need to perform DECREF here */
 			return NULL;
 		}
 		samplesSize += PyBytes_GET_SIZE(sampleItem);
@@ -68,13 +70,12 @@
 	sampleBuffer = PyMem_Malloc(samplesSize);
 	if (!sampleBuffer) {
 		PyErr_NoMemory();
-		return NULL;
+		goto finally;
 	}
 	sampleSizes = PyMem_Malloc(samplesLen * sizeof(size_t));
 	if (!sampleSizes) {
-		PyMem_Free(sampleBuffer);
 		PyErr_NoMemory();
-		return NULL;
+		goto finally;
 	}
 
 	sampleOffset = sampleBuffer;
@@ -89,33 +90,168 @@
 
 	dict = PyMem_Malloc(capacity);
 	if (!dict) {
-		PyMem_Free(sampleSizes);
-		PyMem_Free(sampleBuffer);
 		PyErr_NoMemory();
-		return NULL;
+		goto finally;
 	}
 
+	/* TODO consider using dup2() to redirect zstd's stderr writing to a buffer */
+	Py_BEGIN_ALLOW_THREADS
 	zresult = ZDICT_trainFromBuffer_advanced(dict, capacity,
 		sampleBuffer, sampleSizes, (unsigned int)samplesLen,
 		zparams);
+	Py_END_ALLOW_THREADS
 	if (ZDICT_isError(zresult)) {
 		PyErr_Format(ZstdError, "Cannot train dict: %s", ZDICT_getErrorName(zresult));
 		PyMem_Free(dict);
-		PyMem_Free(sampleSizes);
-		PyMem_Free(sampleBuffer);
-		return NULL;
+		goto finally;
 	}
 
 	result = PyObject_New(ZstdCompressionDict, &ZstdCompressionDictType);
 	if (!result) {
-		return NULL;
+		goto finally;
 	}
 
 	result->dictData = dict;
 	result->dictSize = zresult;
+	result->d = 0;
+	result->k = 0;
+
+finally:
+	PyMem_Free(sampleBuffer);
+	PyMem_Free(sampleSizes);
+
 	return result;
 }
 
+ZstdCompressionDict* train_cover_dictionary(PyObject* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"dict_size",
+		"samples",
+		"k",
+		"d",
+		"notifications",
+		"dict_id",
+		"level",
+		"optimize",
+		"steps",
+		"threads",
+		NULL
+	};
+
+	size_t capacity;
+	PyObject* samples;
+	unsigned k = 0;
+	unsigned d = 0;
+	unsigned notifications = 0;
+	unsigned dictID = 0;
+	int level = 0;
+	PyObject* optimize = NULL;
+	unsigned steps = 0;
+	int threads = 0;
+	COVER_params_t params;
+	Py_ssize_t samplesLen;
+	Py_ssize_t i;
+	size_t samplesSize = 0;
+	void* sampleBuffer = NULL;
+	size_t* sampleSizes = NULL;
+	void* sampleOffset;
+	Py_ssize_t sampleSize;
+	void* dict = NULL;
+	size_t zresult;
+	ZstdCompressionDict* result = NULL;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "nO!|IIIIiOIi:train_cover_dictionary",
+		kwlist, &capacity, &PyList_Type, &samples,
+		&k, &d, &notifications, &dictID, &level, &optimize, &steps, &threads)) {
+		return NULL;
+	}
+
+	if (threads < 0) {
+		threads = cpu_count();
+	}
+
+	memset(&params, 0, sizeof(params));
+	params.k = k;
+	params.d = d;
+	params.steps = steps;
+	params.nbThreads = threads;
+	params.notificationLevel = notifications;
+	params.dictID = dictID;
+	params.compressionLevel = level;
+
+	/* Figure out total size of input samples. */
+	samplesLen = PyList_Size(samples);
+	for (i = 0; i < samplesLen; i++) {
+		PyObject* sampleItem = PyList_GET_ITEM(samples, i);
+
+		if (!PyBytes_Check(sampleItem)) {
+			PyErr_SetString(PyExc_ValueError, "samples must be bytes");
+			return NULL;
+		}
+		samplesSize += PyBytes_GET_SIZE(sampleItem);
+	}
+
+	sampleBuffer = PyMem_Malloc(samplesSize);
+	if (!sampleBuffer) {
+		PyErr_NoMemory();
+		goto finally;
+	}
+
+	sampleSizes = PyMem_Malloc(samplesLen * sizeof(size_t));
+	if (!sampleSizes) {
+		PyErr_NoMemory();
+		goto finally;
+	}
+
+	sampleOffset = sampleBuffer;
+	for (i = 0; i < samplesLen; i++) {
+		PyObject* sampleItem = PyList_GET_ITEM(samples, i);
+		sampleSize = PyBytes_GET_SIZE(sampleItem);
+		sampleSizes[i] = sampleSize;
+		memcpy(sampleOffset, PyBytes_AS_STRING(sampleItem), sampleSize);
+		sampleOffset = (char*)sampleOffset + sampleSize;
+	}
+
+	dict = PyMem_Malloc(capacity);
+	if (!dict) {
+		PyErr_NoMemory();
+		goto finally;
+	}
+
+	Py_BEGIN_ALLOW_THREADS
+	if (optimize && PyObject_IsTrue(optimize)) {
+		zresult = COVER_optimizeTrainFromBuffer(dict, capacity,
+			sampleBuffer, sampleSizes, (unsigned)samplesLen, &params);
+	}
+	else {
+		zresult = COVER_trainFromBuffer(dict, capacity,
+			sampleBuffer, sampleSizes, (unsigned)samplesLen, params);
+	}
+	Py_END_ALLOW_THREADS
+
+	if (ZDICT_isError(zresult)) {
+		PyMem_Free(dict);
+		PyErr_Format(ZstdError, "cannot train dict: %s", ZDICT_getErrorName(zresult));
+		goto finally;
+	}
+
+	result = PyObject_New(ZstdCompressionDict, &ZstdCompressionDictType);
+	if (!result) {
+		PyMem_Free(dict);
+		goto finally;
+	}
+
+	result->dictData = dict;
+	result->dictSize = zresult;
+	result->d = params.d;
+	result->k = params.k;
+
+finally:
+	PyMem_Free(sampleBuffer);
+	PyMem_Free(sampleSizes);
+
+	return result;
+}
 
 PyDoc_STRVAR(ZstdCompressionDict__doc__,
 "ZstdCompressionDict(data) - Represents a computed compression dictionary\n"
@@ -133,10 +269,11 @@
 	self->dictSize = 0;
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "y#:ZstdCompressionDict",
 #else
-	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "s#:ZstdCompressionDict",
 #endif
+		&source, &sourceSize)) {
 		return -1;
 	}
 
@@ -179,6 +316,14 @@
 	{ NULL, NULL }
 };
 
+static PyMemberDef ZstdCompressionDict_members[] = {
+	{ "k", T_UINT, offsetof(ZstdCompressionDict, k), READONLY,
+	  "segment size" },
+	{ "d", T_UINT, offsetof(ZstdCompressionDict, d), READONLY,
+	  "dmer size" },
+	{ NULL }
+};
+
 static Py_ssize_t ZstdCompressionDict_length(ZstdCompressionDict* self) {
 	return self->dictSize;
 }
@@ -223,7 +368,7 @@
 	0,                              /* tp_iter */
 	0,                              /* tp_iternext */
 	ZstdCompressionDict_methods,    /* tp_methods */
-	0,                              /* tp_members */
+	ZstdCompressionDict_members,    /* tp_members */
 	0,                              /* tp_getset */
 	0,                              /* tp_base */
 	0,                              /* tp_dict */
--- a/contrib/python-zstandard/c-ext/compressionparams.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/compressionparams.c	Tue Apr 18 12:24:34 2017 -0400
@@ -25,7 +25,8 @@
 	ZSTD_compressionParameters params;
 	CompressionParametersObject* result;
 
-	if (!PyArg_ParseTuple(args, "i|Kn", &compressionLevel, &sourceSize, &dictSize)) {
+	if (!PyArg_ParseTuple(args, "i|Kn:get_compression_parameters",
+		&compressionLevel, &sourceSize, &dictSize)) {
 		return NULL;
 	}
 
@@ -47,12 +48,108 @@
 	return result;
 }
 
+static int CompressionParameters_init(CompressionParametersObject* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"window_log",
+		"chain_log",
+		"hash_log",
+		"search_log",
+		"search_length",
+		"target_length",
+		"strategy",
+		NULL
+	};
+
+	unsigned windowLog;
+	unsigned chainLog;
+	unsigned hashLog;
+	unsigned searchLog;
+	unsigned searchLength;
+	unsigned targetLength;
+	unsigned strategy;
+	ZSTD_compressionParameters params;
+	size_t zresult;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "IIIIIII:CompressionParameters",
+		kwlist, &windowLog, &chainLog, &hashLog, &searchLog, &searchLength,
+		&targetLength, &strategy)) {
+		return -1;
+	}
+
+	if (windowLog < ZSTD_WINDOWLOG_MIN || windowLog > ZSTD_WINDOWLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid window log value");
+		return -1;
+	}
+
+	if (chainLog < ZSTD_CHAINLOG_MIN || chainLog > ZSTD_CHAINLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid chain log value");
+		return -1;
+	}
+
+	if (hashLog < ZSTD_HASHLOG_MIN || hashLog > ZSTD_HASHLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid hash log value");
+		return -1;
+	}
+
+	if (searchLog < ZSTD_SEARCHLOG_MIN || searchLog > ZSTD_SEARCHLOG_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid search log value");
+		return -1;
+	}
+
+	if (searchLength < ZSTD_SEARCHLENGTH_MIN || searchLength > ZSTD_SEARCHLENGTH_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid search length value");
+		return -1;
+	}
+
+	if (targetLength < ZSTD_TARGETLENGTH_MIN || targetLength > ZSTD_TARGETLENGTH_MAX) {
+		PyErr_SetString(PyExc_ValueError, "invalid target length value");
+		return -1;
+	}
+
+	if (strategy < ZSTD_fast || strategy > ZSTD_btopt) {
+		PyErr_SetString(PyExc_ValueError, "invalid strategy value");
+		return -1;
+	}
+
+	self->windowLog = windowLog;
+	self->chainLog = chainLog;
+	self->hashLog = hashLog;
+	self->searchLog = searchLog;
+	self->searchLength = searchLength;
+	self->targetLength = targetLength;
+	self->strategy = strategy;
+
+	ztopy_compression_parameters(self, &params);
+	zresult = ZSTD_checkCParams(params);
+
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(PyExc_ValueError, "invalid compression parameters: %s",
+			ZSTD_getErrorName(zresult));
+		return -1;
+	}
+
+	return 0;
+}
+
+PyDoc_STRVAR(CompressionParameters_estimated_compression_context_size__doc__,
+"Estimate the size in bytes of a compression context for compression parameters\n"
+);
+
+PyObject* CompressionParameters_estimated_compression_context_size(CompressionParametersObject* self) {
+	ZSTD_compressionParameters params;
+
+	ztopy_compression_parameters(self, &params);
+
+	return PyLong_FromSize_t(ZSTD_estimateCCtxSize(params));
+}
+
 PyObject* estimate_compression_context_size(PyObject* self, PyObject* args) {
 	CompressionParametersObject* params;
 	ZSTD_compressionParameters zparams;
 	PyObject* result;
 
-	if (!PyArg_ParseTuple(args, "O!", &CompressionParametersType, &params)) {
+	if (!PyArg_ParseTuple(args, "O!:estimate_compression_context_size",
+		&CompressionParametersType, &params)) {
 		return NULL;
 	}
 
@@ -64,113 +161,43 @@
 PyDoc_STRVAR(CompressionParameters__doc__,
 "CompressionParameters: low-level control over zstd compression");
 
-static PyObject* CompressionParameters_new(PyTypeObject* subtype, PyObject* args, PyObject* kwargs) {
-	CompressionParametersObject* self;
-	unsigned windowLog;
-	unsigned chainLog;
-	unsigned hashLog;
-	unsigned searchLog;
-	unsigned searchLength;
-	unsigned targetLength;
-	unsigned strategy;
-
-	if (!PyArg_ParseTuple(args, "IIIIIII", &windowLog, &chainLog, &hashLog, &searchLog,
-		&searchLength, &targetLength, &strategy)) {
-		return NULL;
-	}
-
-	if (windowLog < ZSTD_WINDOWLOG_MIN || windowLog > ZSTD_WINDOWLOG_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid window log value");
-		return NULL;
-	}
-
-	if (chainLog < ZSTD_CHAINLOG_MIN || chainLog > ZSTD_CHAINLOG_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid chain log value");
-		return NULL;
-	}
-
-	if (hashLog < ZSTD_HASHLOG_MIN || hashLog > ZSTD_HASHLOG_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid hash log value");
-		return NULL;
-	}
-
-	if (searchLog < ZSTD_SEARCHLOG_MIN || searchLog > ZSTD_SEARCHLOG_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid search log value");
-		return NULL;
-	}
-
-	if (searchLength < ZSTD_SEARCHLENGTH_MIN || searchLength > ZSTD_SEARCHLENGTH_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid search length value");
-		return NULL;
-	}
-
-	if (targetLength < ZSTD_TARGETLENGTH_MIN || targetLength > ZSTD_TARGETLENGTH_MAX) {
-		PyErr_SetString(PyExc_ValueError, "invalid target length value");
-		return NULL;
-	}
-
-	if (strategy < ZSTD_fast || strategy > ZSTD_btopt) {
-		PyErr_SetString(PyExc_ValueError, "invalid strategy value");
-		return NULL;
-	}
-
-	self = (CompressionParametersObject*)subtype->tp_alloc(subtype, 1);
-	if (!self) {
-		return NULL;
-	}
-
-	self->windowLog = windowLog;
-	self->chainLog = chainLog;
-	self->hashLog = hashLog;
-	self->searchLog = searchLog;
-	self->searchLength = searchLength;
-	self->targetLength = targetLength;
-	self->strategy = strategy;
-
-	return (PyObject*)self;
-}
-
 static void CompressionParameters_dealloc(PyObject* self) {
 	PyObject_Del(self);
 }
 
-static Py_ssize_t CompressionParameters_length(PyObject* self) {
-	return 7;
-}
-
-static PyObject* CompressionParameters_item(PyObject* o, Py_ssize_t i) {
-	CompressionParametersObject* self = (CompressionParametersObject*)o;
+static PyMethodDef CompressionParameters_methods[] = {
+	{
+		"estimated_compression_context_size",
+		(PyCFunction)CompressionParameters_estimated_compression_context_size,
+		METH_NOARGS,
+		CompressionParameters_estimated_compression_context_size__doc__
+	},
+	{ NULL, NULL }
+};
 
-	switch (i) {
-	case 0:
-		return PyLong_FromLong(self->windowLog);
-	case 1:
-		return PyLong_FromLong(self->chainLog);
-	case 2:
-		return PyLong_FromLong(self->hashLog);
-	case 3:
-		return PyLong_FromLong(self->searchLog);
-	case 4:
-		return PyLong_FromLong(self->searchLength);
-	case 5:
-		return PyLong_FromLong(self->targetLength);
-	case 6:
-		return PyLong_FromLong(self->strategy);
-	default:
-		PyErr_SetString(PyExc_IndexError, "index out of range");
-		return NULL;
-	}
-}
-
-static PySequenceMethods CompressionParameters_sq = {
-	CompressionParameters_length, /* sq_length */
-	0,							  /* sq_concat */
-	0,                            /* sq_repeat */
-	CompressionParameters_item,   /* sq_item */
-	0,                            /* sq_ass_item */
-	0,                            /* sq_contains */
-	0,                            /* sq_inplace_concat */
-	0                             /* sq_inplace_repeat */
+static PyMemberDef CompressionParameters_members[] = {
+	{ "window_log", T_UINT,
+	  offsetof(CompressionParametersObject, windowLog), READONLY,
+	  "window log" },
+	{ "chain_log", T_UINT,
+	  offsetof(CompressionParametersObject, chainLog), READONLY,
+	  "chain log" },
+	{ "hash_log", T_UINT,
+	  offsetof(CompressionParametersObject, hashLog), READONLY,
+	  "hash log" },
+	{ "search_log", T_UINT,
+	  offsetof(CompressionParametersObject, searchLog), READONLY,
+	  "search log" },
+	{ "search_length", T_UINT,
+	  offsetof(CompressionParametersObject, searchLength), READONLY,
+	  "search length" },
+	{ "target_length", T_UINT,
+	  offsetof(CompressionParametersObject, targetLength), READONLY,
+	  "target length" },
+	{ "strategy", T_INT,
+	  offsetof(CompressionParametersObject, strategy), READONLY,
+	  "strategy" },
+	{ NULL }
 };
 
 PyTypeObject CompressionParametersType = {
@@ -185,7 +212,7 @@
 	0,                         /* tp_compare */
 	0,                         /* tp_repr */
 	0,                         /* tp_as_number */
-	&CompressionParameters_sq, /* tp_as_sequence */
+	0,                         /* tp_as_sequence */
 	0,                         /* tp_as_mapping */
 	0,                         /* tp_hash  */
 	0,                         /* tp_call */
@@ -193,7 +220,7 @@
 	0,                         /* tp_getattro */
 	0,                         /* tp_setattro */
 	0,                         /* tp_as_buffer */
-	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
 	CompressionParameters__doc__, /* tp_doc */
 	0,                         /* tp_traverse */
 	0,                         /* tp_clear */
@@ -201,17 +228,17 @@
 	0,                         /* tp_weaklistoffset */
 	0,                         /* tp_iter */
 	0,                         /* tp_iternext */
-	0,                         /* tp_methods */
-	0,                         /* tp_members */
+	CompressionParameters_methods, /* tp_methods */
+	CompressionParameters_members, /* tp_members */
 	0,                         /* tp_getset */
 	0,                         /* tp_base */
 	0,                         /* tp_dict */
 	0,                         /* tp_descr_get */
 	0,                         /* tp_descr_set */
 	0,                         /* tp_dictoffset */
-	0,                         /* tp_init */
+	(initproc)CompressionParameters_init, /* tp_init */
 	0,                         /* tp_alloc */
-	CompressionParameters_new, /* tp_new */
+	PyType_GenericNew,         /* tp_new */
 };
 
 void compressionparams_module_init(PyObject* mod) {
@@ -220,7 +247,7 @@
 		return;
 	}
 
-	Py_IncRef((PyObject*)&CompressionParametersType);
+	Py_INCREF(&CompressionParametersType);
 	PyModule_AddObject(mod, "CompressionParameters",
 		(PyObject*)&CompressionParametersType);
 }
--- a/contrib/python-zstandard/c-ext/compressionwriter.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/compressionwriter.c	Tue Apr 18 12:24:34 2017 -0400
@@ -18,11 +18,6 @@
 	Py_XDECREF(self->compressor);
 	Py_XDECREF(self->writer);
 
-	if (self->cstream) {
-		ZSTD_freeCStream(self->cstream);
-		self->cstream = NULL;
-	}
-
 	PyObject_Del(self);
 }
 
@@ -32,9 +27,15 @@
 		return NULL;
 	}
 
-	self->cstream = CStream_from_ZstdCompressor(self->compressor, self->sourceSize);
-	if (!self->cstream) {
-		return NULL;
+	if (self->compressor->mtcctx) {
+		if (init_mtcstream(self->compressor, self->sourceSize)) {
+			return NULL;
+		}
+	}
+	else {
+		if (0 != init_cstream(self->compressor, self->sourceSize)) {
+			return NULL;
+		}
 	}
 
 	self->entered = 1;
@@ -52,14 +53,14 @@
 	ZSTD_outBuffer output;
 	PyObject* res;
 
-	if (!PyArg_ParseTuple(args, "OOO", &exc_type, &exc_value, &exc_tb)) {
+	if (!PyArg_ParseTuple(args, "OOO:__exit__", &exc_type, &exc_value, &exc_tb)) {
 		return NULL;
 	}
 
 	self->entered = 0;
 
-	if (self->cstream && exc_type == Py_None && exc_value == Py_None &&
-		exc_tb == Py_None) {
+	if ((self->compressor->cstream || self->compressor->mtcctx) && exc_type == Py_None
+		&& exc_value == Py_None && exc_tb == Py_None) {
 
 		output.dst = PyMem_Malloc(self->outSize);
 		if (!output.dst) {
@@ -69,7 +70,12 @@
 		output.pos = 0;
 
 		while (1) {
-			zresult = ZSTD_endStream(self->cstream, &output);
+			if (self->compressor->mtcctx) {
+				zresult = ZSTDMT_endStream(self->compressor->mtcctx, &output);
+			}
+			else {
+				zresult = ZSTD_endStream(self->compressor->cstream, &output);
+			}
 			if (ZSTD_isError(zresult)) {
 				PyErr_Format(ZstdError, "error ending compression stream: %s",
 					ZSTD_getErrorName(zresult));
@@ -95,21 +101,19 @@
 		}
 
 		PyMem_Free(output.dst);
-		ZSTD_freeCStream(self->cstream);
-		self->cstream = NULL;
 	}
 
 	Py_RETURN_FALSE;
 }
 
 static PyObject* ZstdCompressionWriter_memory_size(ZstdCompressionWriter* self) {
-	if (!self->cstream) {
+	if (!self->compressor->cstream) {
 		PyErr_SetString(ZstdError, "cannot determine size of an inactive compressor; "
 			"call when a context manager is active");
 		return NULL;
 	}
 
-	return PyLong_FromSize_t(ZSTD_sizeof_CStream(self->cstream));
+	return PyLong_FromSize_t(ZSTD_sizeof_CStream(self->compressor->cstream));
 }
 
 static PyObject* ZstdCompressionWriter_write(ZstdCompressionWriter* self, PyObject* args) {
@@ -119,11 +123,12 @@
 	ZSTD_inBuffer input;
 	ZSTD_outBuffer output;
 	PyObject* res;
+	Py_ssize_t totalWrite = 0;
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "y#:write", &source, &sourceSize)) {
 #else
-	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "s#:write", &source, &sourceSize)) {
 #endif
 		return NULL;
 	}
@@ -146,7 +151,13 @@
 
 	while ((ssize_t)input.pos < sourceSize) {
 		Py_BEGIN_ALLOW_THREADS
-		zresult = ZSTD_compressStream(self->cstream, &output, &input);
+		if (self->compressor->mtcctx) {
+			zresult = ZSTDMT_compressStream(self->compressor->mtcctx,
+				&output, &input);
+		}
+		else {
+			zresult = ZSTD_compressStream(self->compressor->cstream, &output, &input);
+		}
 		Py_END_ALLOW_THREADS
 
 		if (ZSTD_isError(zresult)) {
@@ -164,20 +175,21 @@
 #endif
 				output.dst, output.pos);
 			Py_XDECREF(res);
+			totalWrite += output.pos;
 		}
 		output.pos = 0;
 	}
 
 	PyMem_Free(output.dst);
 
-	/* TODO return bytes written */
-	Py_RETURN_NONE;
+	return PyLong_FromSsize_t(totalWrite);
 }
 
 static PyObject* ZstdCompressionWriter_flush(ZstdCompressionWriter* self, PyObject* args) {
 	size_t zresult;
 	ZSTD_outBuffer output;
 	PyObject* res;
+	Py_ssize_t totalWrite = 0;
 
 	if (!self->entered) {
 		PyErr_SetString(ZstdError, "flush must be called from an active context manager");
@@ -193,7 +205,12 @@
 
 	while (1) {
 		Py_BEGIN_ALLOW_THREADS
-		zresult = ZSTD_flushStream(self->cstream, &output);
+		if (self->compressor->mtcctx) {
+			zresult = ZSTDMT_flushStream(self->compressor->mtcctx, &output);
+		}
+		else {
+			zresult = ZSTD_flushStream(self->compressor->cstream, &output);
+		}
 		Py_END_ALLOW_THREADS
 
 		if (ZSTD_isError(zresult)) {
@@ -215,14 +232,14 @@
 #endif
 				output.dst, output.pos);
 			Py_XDECREF(res);
+			totalWrite += output.pos;
 		}
 		output.pos = 0;
 	}
 
 	PyMem_Free(output.dst);
 
-	/* TODO return bytes written */
-	Py_RETURN_NONE;
+	return PyLong_FromSsize_t(totalWrite);
 }
 
 static PyMethodDef ZstdCompressionWriter_methods[] = {
--- a/contrib/python-zstandard/c-ext/compressobj.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/compressobj.c	Tue Apr 18 12:24:34 2017 -0400
@@ -18,11 +18,6 @@
 	PyMem_Free(self->output.dst);
 	self->output.dst = NULL;
 
-	if (self->cstream) {
-		ZSTD_freeCStream(self->cstream);
-		self->cstream = NULL;
-	}
-
 	Py_XDECREF(self->compressor);
 
 	PyObject_Del(self);
@@ -42,9 +37,9 @@
 	}
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "y#:compress", &source, &sourceSize)) {
 #else
-	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "s#:compress", &source, &sourceSize)) {
 #endif
 		return NULL;
 	}
@@ -55,7 +50,13 @@
 
 	while ((ssize_t)input.pos < sourceSize) {
 		Py_BEGIN_ALLOW_THREADS
-		zresult = ZSTD_compressStream(self->cstream, &self->output, &input);
+		if (self->compressor->mtcctx) {
+			zresult = ZSTDMT_compressStream(self->compressor->mtcctx,
+				&self->output, &input);
+		}
+		else {
+			zresult = ZSTD_compressStream(self->compressor->cstream, &self->output, &input);
+		}
 		Py_END_ALLOW_THREADS
 
 		if (ZSTD_isError(zresult)) {
@@ -98,7 +99,7 @@
 	PyObject* result = NULL;
 	Py_ssize_t resultSize = 0;
 
-	if (!PyArg_ParseTuple(args, "|i", &flushMode)) {
+	if (!PyArg_ParseTuple(args, "|i:flush", &flushMode)) {
 		return NULL;
 	}
 
@@ -118,7 +119,12 @@
 		/* The output buffer is of size ZSTD_CStreamOutSize(), which is 
 		   guaranteed to hold a full block. */
 		Py_BEGIN_ALLOW_THREADS
-		zresult = ZSTD_flushStream(self->cstream, &self->output);
+		if (self->compressor->mtcctx) {
+			zresult = ZSTDMT_flushStream(self->compressor->mtcctx, &self->output);
+		}
+		else {
+			zresult = ZSTD_flushStream(self->compressor->cstream, &self->output);
+		}
 		Py_END_ALLOW_THREADS
 
 		if (ZSTD_isError(zresult)) {
@@ -150,7 +156,12 @@
 	self->finished = 1;
 
 	while (1) {
-		zresult = ZSTD_endStream(self->cstream, &self->output);
+		if (self->compressor->mtcctx) {
+			zresult = ZSTDMT_endStream(self->compressor->mtcctx, &self->output);
+		}
+		else {
+			zresult = ZSTD_endStream(self->compressor->cstream, &self->output);
+		}
 		if (ZSTD_isError(zresult)) {
 			PyErr_Format(ZstdError, "error ending compression stream: %s",
 				ZSTD_getErrorName(zresult));
@@ -182,9 +193,6 @@
 		}
 	}
 
-	ZSTD_freeCStream(self->cstream);
-	self->cstream = NULL;
-
 	if (result) {
 		return result;
 	}
--- a/contrib/python-zstandard/c-ext/compressor.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/compressor.c	Tue Apr 18 12:24:34 2017 -0400
@@ -7,16 +7,21 @@
 */
 
 #include "python-zstandard.h"
+#include "pool.h"
 
 extern PyObject* ZstdError;
 
-int populate_cdict(ZstdCompressor* compressor, void* dictData, size_t dictSize, ZSTD_parameters* zparams) {
+int populate_cdict(ZstdCompressor* compressor, ZSTD_parameters* zparams) {
 	ZSTD_customMem zmem;
-	assert(!compressor->cdict);
+
+	if (compressor->cdict || !compressor->dict || !compressor->dict->dictData) {
+		return 0;
+	}
+
 	Py_BEGIN_ALLOW_THREADS
 	memset(&zmem, 0, sizeof(zmem));
 	compressor->cdict = ZSTD_createCDict_advanced(compressor->dict->dictData,
-		compressor->dict->dictSize, *zparams, zmem);
+		compressor->dict->dictSize, 1, *zparams, zmem);
 	Py_END_ALLOW_THREADS
 
 	if (!compressor->cdict) {
@@ -28,22 +33,32 @@
 }
 
 /**
-* Initialize a zstd CStream from a ZstdCompressor instance.
-*
-* Returns a ZSTD_CStream on success or NULL on failure. If NULL, a Python
-* exception will be set.
-*/
-ZSTD_CStream* CStream_from_ZstdCompressor(ZstdCompressor* compressor, Py_ssize_t sourceSize) {
-	ZSTD_CStream* cstream;
+ * Ensure the ZSTD_CStream on a ZstdCompressor instance is initialized.
+ *
+ * Returns 0 on success. Other value on failure. Will set a Python exception
+ * on failure.
+ */
+int init_cstream(ZstdCompressor* compressor, unsigned long long sourceSize) {
 	ZSTD_parameters zparams;
 	void* dictData = NULL;
 	size_t dictSize = 0;
 	size_t zresult;
 
-	cstream = ZSTD_createCStream();
-	if (!cstream) {
-		PyErr_SetString(ZstdError, "cannot create CStream");
-		return NULL;
+	if (compressor->cstream) {
+		zresult = ZSTD_resetCStream(compressor->cstream, sourceSize);
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(ZstdError, "could not reset CStream: %s",
+				ZSTD_getErrorName(zresult));
+			return -1;
+		}
+
+		return 0;
+	}
+
+	compressor->cstream = ZSTD_createCStream();
+	if (!compressor->cstream) {
+		PyErr_SetString(ZstdError, "could not create CStream");
+		return -1;
 	}
 
 	if (compressor->dict) {
@@ -63,15 +78,51 @@
 
 	zparams.fParams = compressor->fparams;
 
-	zresult = ZSTD_initCStream_advanced(cstream, dictData, dictSize, zparams, sourceSize);
+	zresult = ZSTD_initCStream_advanced(compressor->cstream, dictData, dictSize,
+		zparams, sourceSize);
 
 	if (ZSTD_isError(zresult)) {
-		ZSTD_freeCStream(cstream);
+		ZSTD_freeCStream(compressor->cstream);
+		compressor->cstream = NULL;
 		PyErr_Format(ZstdError, "cannot init CStream: %s", ZSTD_getErrorName(zresult));
-		return NULL;
+		return -1;
 	}
 
-	return cstream;
+	return 0;;
+}
+
+int init_mtcstream(ZstdCompressor* compressor, Py_ssize_t sourceSize) {
+	size_t zresult;
+	void* dictData = NULL;
+	size_t dictSize = 0;
+	ZSTD_parameters zparams;
+
+	assert(compressor->mtcctx);
+
+	if (compressor->dict) {
+		dictData = compressor->dict->dictData;
+		dictSize = compressor->dict->dictSize;
+	}
+
+	memset(&zparams, 0, sizeof(zparams));
+	if (compressor->cparams) {
+		ztopy_compression_parameters(compressor->cparams, &zparams.cParams);
+	}
+	else {
+		zparams.cParams = ZSTD_getCParams(compressor->compressionLevel, sourceSize, dictSize);
+	}
+
+	zparams.fParams = compressor->fparams;
+
+	zresult = ZSTDMT_initCStream_advanced(compressor->mtcctx, dictData, dictSize,
+		zparams, sourceSize);
+
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "cannot init CStream: %s", ZSTD_getErrorName(zresult));
+		return -1;
+	}
+
+	return 0;
 }
 
 PyDoc_STRVAR(ZstdCompressor__doc__,
@@ -103,6 +154,11 @@
 "   Determines whether the dictionary ID will be written into the compressed\n"
 "   data. Defaults to True. Only adds content to the compressed data if\n"
 "   a dictionary is being used.\n"
+"threads\n"
+"   Number of threads to use to compress data concurrently. When set,\n"
+"   compression operations are performed on multiple threads. The default\n"
+"   value (0) disables multi-threaded compression. A value of ``-1`` means to\n"
+"   set the number of threads to the number of detected logical CPUs.\n"
 );
 
 static int ZstdCompressor_init(ZstdCompressor* self, PyObject* args, PyObject* kwargs) {
@@ -113,6 +169,7 @@
 		"write_checksum",
 		"write_content_size",
 		"write_dict_id",
+		"threads",
 		NULL
 	};
 
@@ -122,16 +179,12 @@
 	PyObject* writeChecksum = NULL;
 	PyObject* writeContentSize = NULL;
 	PyObject* writeDictID = NULL;
+	int threads = 0;
 
-	self->cctx = NULL;
-	self->dict = NULL;
-	self->cparams = NULL;
-	self->cdict = NULL;
-
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iO!O!OOO", kwlist,
-		&level, &ZstdCompressionDictType, &dict,
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iO!O!OOOi:ZstdCompressor",
+		kwlist,	&level, &ZstdCompressionDictType, &dict,
 		&CompressionParametersType, &params,
-		&writeChecksum, &writeContentSize, &writeDictID)) {
+		&writeChecksum, &writeContentSize, &writeDictID, &threads)) {
 		return -1;
 	}
 
@@ -146,12 +199,27 @@
 		return -1;
 	}
 
+	if (threads < 0) {
+		threads = cpu_count();
+	}
+
+	self->threads = threads;
+
 	/* We create a ZSTD_CCtx for reuse among multiple operations to reduce the
 	   overhead of each compression operation. */
-	self->cctx = ZSTD_createCCtx();
-	if (!self->cctx) {
-		PyErr_NoMemory();
-		return -1;
+	if (threads) {
+		self->mtcctx = ZSTDMT_createCCtx(threads);
+		if (!self->mtcctx) {
+			PyErr_NoMemory();
+			return -1;
+		}
+	}
+	else {
+		self->cctx = ZSTD_createCCtx();
+		if (!self->cctx) {
+			PyErr_NoMemory();
+			return -1;
+		}
 	}
 
 	self->compressionLevel = level;
@@ -182,6 +250,11 @@
 }
 
 static void ZstdCompressor_dealloc(ZstdCompressor* self) {
+	if (self->cstream) {
+		ZSTD_freeCStream(self->cstream);
+		self->cstream = NULL;
+	}
+
 	Py_XDECREF(self->cparams);
 	Py_XDECREF(self->dict);
 
@@ -195,6 +268,11 @@
 		self->cctx = NULL;
 	}
 
+	if (self->mtcctx) {
+		ZSTDMT_freeCCtx(self->mtcctx);
+		self->mtcctx = NULL;
+	}
+
 	PyObject_Del(self);
 }
 
@@ -229,7 +307,6 @@
 	Py_ssize_t sourceSize = 0;
 	size_t inSize = ZSTD_CStreamInSize();
 	size_t outSize = ZSTD_CStreamOutSize();
-	ZSTD_CStream* cstream;
 	ZSTD_inBuffer input;
 	ZSTD_outBuffer output;
 	Py_ssize_t totalRead = 0;
@@ -243,8 +320,8 @@
 	PyObject* totalReadPy;
 	PyObject* totalWritePy;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|nkk", kwlist, &source, &dest, &sourceSize,
-		&inSize, &outSize)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|nkk:copy_stream", kwlist,
+		&source, &dest, &sourceSize, &inSize, &outSize)) {
 		return NULL;
 	}
 
@@ -261,10 +338,17 @@
 	/* Prevent free on uninitialized memory in finally. */
 	output.dst = NULL;
 
-	cstream = CStream_from_ZstdCompressor(self, sourceSize);
-	if (!cstream) {
-		res = NULL;
-		goto finally;
+	if (self->mtcctx) {
+		if (init_mtcstream(self, sourceSize)) {
+			res = NULL;
+			goto finally;
+		}
+	}
+	else {
+		if (0 != init_cstream(self, sourceSize)) {
+			res = NULL;
+			goto finally;
+		}
 	}
 
 	output.dst = PyMem_Malloc(outSize);
@@ -300,7 +384,12 @@
 
 		while (input.pos < input.size) {
 			Py_BEGIN_ALLOW_THREADS
-			zresult = ZSTD_compressStream(cstream, &output, &input);
+			if (self->mtcctx) {
+				zresult = ZSTDMT_compressStream(self->mtcctx, &output, &input);
+			}
+			else {
+				zresult = ZSTD_compressStream(self->cstream, &output, &input);
+			}
 			Py_END_ALLOW_THREADS
 
 			if (ZSTD_isError(zresult)) {
@@ -325,7 +414,12 @@
 
 	/* We've finished reading. Now flush the compressor stream. */
 	while (1) {
-		zresult = ZSTD_endStream(cstream, &output);
+		if (self->mtcctx) {
+			zresult = ZSTDMT_endStream(self->mtcctx, &output);
+		}
+		else {
+			zresult = ZSTD_endStream(self->cstream, &output);
+		}
 		if (ZSTD_isError(zresult)) {
 			PyErr_Format(ZstdError, "error ending compression stream: %s",
 				ZSTD_getErrorName(zresult));
@@ -350,24 +444,17 @@
 		}
 	}
 
-	ZSTD_freeCStream(cstream);
-	cstream = NULL;
-
 	totalReadPy = PyLong_FromSsize_t(totalRead);
 	totalWritePy = PyLong_FromSsize_t(totalWrite);
 	res = PyTuple_Pack(2, totalReadPy, totalWritePy);
-	Py_DecRef(totalReadPy);
-	Py_DecRef(totalWritePy);
+	Py_DECREF(totalReadPy);
+	Py_DECREF(totalWritePy);
 
 finally:
 	if (output.dst) {
 		PyMem_Free(output.dst);
 	}
 
-	if (cstream) {
-		ZSTD_freeCStream(cstream);
-	}
-
 	return res;
 }
 
@@ -402,14 +489,26 @@
 	ZSTD_parameters zparams;
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|O",
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|O:compress",
 #else
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|O",
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|O:compress",
 #endif
 		kwlist, &source, &sourceSize, &allowEmpty)) {
 		return NULL;
 	}
 
+	if (self->threads && self->dict) {
+		PyErr_SetString(ZstdError,
+			"compress() cannot be used with both dictionaries and multi-threaded compression");
+		return NULL;
+	}
+
+	if (self->threads && self->cparams) {
+		PyErr_SetString(ZstdError,
+			"compress() cannot be used with both compression parameters and multi-threaded compression");
+		return NULL;
+	}
+
 	/* Limitation in zstd C API doesn't let decompression side distinguish
 	   between content size of 0 and unknown content size. This can make round
 	   tripping via Python difficult. Until this is fixed, require a flag
@@ -456,24 +555,28 @@
 	https://github.com/facebook/zstd/issues/358 contains more info. We could
 	potentially add an argument somewhere to control this behavior.
 	*/
-	if (dictData && !self->cdict) {
-		if (populate_cdict(self, dictData, dictSize, &zparams)) {
-			Py_DECREF(output);
-			return NULL;
-		}
+	if (0 != populate_cdict(self, &zparams)) {
+		Py_DECREF(output);
+		return NULL;
 	}
 
 	Py_BEGIN_ALLOW_THREADS
-	/* By avoiding ZSTD_compress(), we don't necessarily write out content
-	   size. This means the argument to ZstdCompressor to control frame
-	   parameters is honored. */
-	if (self->cdict) {
-		zresult = ZSTD_compress_usingCDict(self->cctx, dest, destSize,
-			source, sourceSize, self->cdict);
+	if (self->mtcctx) {
+		zresult = ZSTDMT_compressCCtx(self->mtcctx, dest, destSize,
+			source, sourceSize, self->compressionLevel);
 	}
 	else {
-		zresult = ZSTD_compress_advanced(self->cctx, dest, destSize,
-			source, sourceSize, dictData, dictSize, zparams);
+		/* By avoiding ZSTD_compress(), we don't necessarily write out content
+		   size. This means the argument to ZstdCompressor to control frame
+		   parameters is honored. */
+		if (self->cdict) {
+			zresult = ZSTD_compress_usingCDict(self->cctx, dest, destSize,
+				source, sourceSize, self->cdict);
+		}
+		else {
+			zresult = ZSTD_compress_advanced(self->cctx, dest, destSize,
+				source, sourceSize, dictData, dictSize, zparams);
+		}
 	}
 	Py_END_ALLOW_THREADS
 
@@ -507,19 +610,28 @@
 
 	Py_ssize_t inSize = 0;
 	size_t outSize = ZSTD_CStreamOutSize();
-	ZstdCompressionObj* result = PyObject_New(ZstdCompressionObj, &ZstdCompressionObjType);
+	ZstdCompressionObj* result = NULL;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|n:compressobj", kwlist, &inSize)) {
+		return NULL;
+	}
+
+	result = (ZstdCompressionObj*)PyObject_CallObject((PyObject*)&ZstdCompressionObjType, NULL);
 	if (!result) {
 		return NULL;
 	}
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|n", kwlist, &inSize)) {
-		return NULL;
+	if (self->mtcctx) {
+		if (init_mtcstream(self, inSize)) {
+			Py_DECREF(result);
+			return NULL;
+		}
 	}
-
-	result->cstream = CStream_from_ZstdCompressor(self, inSize);
-	if (!result->cstream) {
-		Py_DECREF(result);
-		return NULL;
+	else {
+		if (0 != init_cstream(self, inSize)) {
+			Py_DECREF(result);
+			return NULL;
+		}
 	}
 
 	result->output.dst = PyMem_Malloc(outSize);
@@ -529,13 +641,9 @@
 		return NULL;
 	}
 	result->output.size = outSize;
-	result->output.pos = 0;
-
 	result->compressor = self;
 	Py_INCREF(result->compressor);
 
-	result->finished = 0;
-
 	return result;
 }
 
@@ -574,24 +682,15 @@
 	size_t outSize = ZSTD_CStreamOutSize();
 	ZstdCompressorIterator* result;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nkk", kwlist, &reader, &sourceSize,
-		&inSize, &outSize)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nkk:read_from", kwlist,
+		&reader, &sourceSize, &inSize, &outSize)) {
 		return NULL;
 	}
 
-	result = PyObject_New(ZstdCompressorIterator, &ZstdCompressorIteratorType);
+	result = (ZstdCompressorIterator*)PyObject_CallObject((PyObject*)&ZstdCompressorIteratorType, NULL);
 	if (!result) {
 		return NULL;
 	}
-
-	result->compressor = NULL;
-	result->reader = NULL;
-	result->buffer = NULL;
-	result->cstream = NULL;
-	result->input.src = NULL;
-	result->output.dst = NULL;
-	result->readResult = NULL;
-
 	if (PyObject_HasAttrString(reader, "read")) {
 		result->reader = reader;
 		Py_INCREF(result->reader);
@@ -608,7 +707,6 @@
 			goto except;
 		}
 
-		result->bufferOffset = 0;
 		sourceSize = result->buffer->len;
 	}
 	else {
@@ -621,9 +719,16 @@
 	Py_INCREF(result->compressor);
 
 	result->sourceSize = sourceSize;
-	result->cstream = CStream_from_ZstdCompressor(self, sourceSize);
-	if (!result->cstream) {
-		goto except;
+
+	if (self->mtcctx) {
+		if (init_mtcstream(self, sourceSize)) {
+			goto except;
+		}
+	}
+	else {
+		if (0 != init_cstream(self, sourceSize)) {
+			goto except;
+		}
 	}
 
 	result->inSize = inSize;
@@ -635,26 +740,12 @@
 		goto except;
 	}
 	result->output.size = outSize;
-	result->output.pos = 0;
-
-	result->input.src = NULL;
-	result->input.size = 0;
-	result->input.pos = 0;
-
-	result->finishedInput = 0;
-	result->finishedOutput = 0;
 
 	goto finally;
 
 except:
-	if (result->cstream) {
-		ZSTD_freeCStream(result->cstream);
-		result->cstream = NULL;
-	}
-
-	Py_DecRef((PyObject*)result->compressor);
-	Py_DecRef(result->reader);
-
+	Py_XDECREF(result->compressor);
+	Py_XDECREF(result->reader);
 	Py_DECREF(result);
 	result = NULL;
 
@@ -693,8 +784,8 @@
 	Py_ssize_t sourceSize = 0;
 	size_t outSize = ZSTD_CStreamOutSize();
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nk", kwlist, &writer, &sourceSize,
-		&outSize)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|nk:write_to", kwlist,
+		&writer, &sourceSize, &outSize)) {
 		return NULL;
 	}
 
@@ -703,7 +794,7 @@
 		return NULL;
 	}
 
-	result = PyObject_New(ZstdCompressionWriter, &ZstdCompressionWriterType);
+	result = (ZstdCompressionWriter*)PyObject_CallObject((PyObject*)&ZstdCompressionWriterType, NULL);
 	if (!result) {
 		return NULL;
 	}
@@ -715,11 +806,671 @@
 	Py_INCREF(result->writer);
 
 	result->sourceSize = sourceSize;
-
 	result->outSize = outSize;
 
-	result->entered = 0;
-	result->cstream = NULL;
+	return result;
+}
+
+typedef struct {
+	void* sourceData;
+	size_t sourceSize;
+} DataSource;
+
+typedef struct {
+	DataSource* sources;
+	Py_ssize_t sourcesSize;
+	unsigned long long totalSourceSize;
+} DataSources;
+
+typedef struct {
+	void* dest;
+	Py_ssize_t destSize;
+	BufferSegment* segments;
+	Py_ssize_t segmentsSize;
+} DestBuffer;
+
+typedef enum {
+	WorkerError_none = 0,
+	WorkerError_zstd = 1,
+	WorkerError_no_memory = 2,
+} WorkerError;
+
+/**
+ * Holds state for an individual worker performing multi_compress_to_buffer work.
+ */
+typedef struct {
+	/* Used for compression. */
+	ZSTD_CCtx* cctx;
+	ZSTD_CDict* cdict;
+	int cLevel;
+	CompressionParametersObject* cParams;
+	ZSTD_frameParameters fParams;
+
+	/* What to compress. */
+	DataSource* sources;
+	Py_ssize_t sourcesSize;
+	Py_ssize_t startOffset;
+	Py_ssize_t endOffset;
+	unsigned long long totalSourceSize;
+
+	/* Result storage. */
+	DestBuffer* destBuffers;
+	Py_ssize_t destCount;
+
+	/* Error tracking. */
+	WorkerError error;
+	size_t zresult;
+	Py_ssize_t errorOffset;
+} WorkerState;
+
+static void compress_worker(WorkerState* state) {
+	Py_ssize_t inputOffset = state->startOffset;
+	Py_ssize_t remainingItems = state->endOffset - state->startOffset + 1;
+	Py_ssize_t currentBufferStartOffset = state->startOffset;
+	size_t zresult;
+	ZSTD_parameters zparams;
+	void* newDest;
+	size_t allocationSize;
+	size_t boundSize;
+	Py_ssize_t destOffset = 0;
+	DataSource* sources = state->sources;
+	DestBuffer* destBuffer;
+
+	assert(!state->destBuffers);
+	assert(0 == state->destCount);
+
+	if (state->cParams) {
+		ztopy_compression_parameters(state->cParams, &zparams.cParams);
+	}
+
+	zparams.fParams = state->fParams;
+
+	/*
+	 * The total size of the compressed data is unknown until we actually
+	 * compress data. That means we can't pre-allocate the exact size we need.
+	 * 
+	 * There is a cost to every allocation and reallocation. So, it is in our
+	 * interest to minimize the number of allocations.
+	 *
+	 * There is also a cost to too few allocations. If allocations are too
+	 * large they may fail. If buffers are shared and all inputs become
+	 * irrelevant at different lifetimes, then a reference to one segment
+	 * in the buffer will keep the entire buffer alive. This leads to excessive
+	 * memory usage.
+	 *
+	 * Our current strategy is to assume a compression ratio of 16:1 and
+	 * allocate buffers of that size, rounded up to the nearest power of 2
+	 * (because computers like round numbers). That ratio is greater than what
+	 * most inputs achieve. This is by design: we don't want to over-allocate.
+	 * But we don't want to under-allocate and lead to too many buffers either.
+	 */
+
+	state->destCount = 1;
+
+	state->destBuffers = calloc(1, sizeof(DestBuffer));
+	if (NULL == state->destBuffers) {
+		state->error = WorkerError_no_memory;
+		return;
+	}
+
+	destBuffer = &state->destBuffers[state->destCount - 1];
+
+	/*
+	 * Rather than track bounds and grow the segments buffer, allocate space
+	 * to hold remaining items then truncate when we're done with it.
+	 */
+	destBuffer->segments = calloc(remainingItems, sizeof(BufferSegment));
+	if (NULL == destBuffer->segments) {
+		state->error = WorkerError_no_memory;
+		return;
+	}
+
+	destBuffer->segmentsSize = remainingItems;
+
+	allocationSize = roundpow2(state->totalSourceSize >> 4);
+
+	/* If the maximum size of the output is larger than that, round up. */
+	boundSize = ZSTD_compressBound(sources[inputOffset].sourceSize);
+
+	if (boundSize > allocationSize) {
+		allocationSize = roundpow2(boundSize);
+	}
+
+	destBuffer->dest = malloc(allocationSize);
+	if (NULL == destBuffer->dest) {
+		state->error = WorkerError_no_memory;
+		return;
+	}
+
+	destBuffer->destSize = allocationSize;
+
+	for (inputOffset = state->startOffset; inputOffset <= state->endOffset; inputOffset++) {
+		void* source = sources[inputOffset].sourceData;
+		size_t sourceSize = sources[inputOffset].sourceSize;
+		size_t destAvailable;
+		void* dest;
+
+		destAvailable = destBuffer->destSize - destOffset;
+		boundSize = ZSTD_compressBound(sourceSize);
+
+		/*
+		 * Not enough space in current buffer to hold largest compressed output.
+		 * So allocate and switch to a new output buffer.
+		 */
+		if (boundSize > destAvailable) {
+			/*
+			 * The downsizing of the existing buffer is optional. It should be cheap
+			 * (unlike growing). So we just do it.
+			 */
+			if (destAvailable) {
+				newDest = realloc(destBuffer->dest, destOffset);
+				if (NULL == newDest) {
+					state->error = WorkerError_no_memory;
+					return;
+				}
+
+				destBuffer->dest = newDest;
+				destBuffer->destSize = destOffset;
+			}
+
+			/* Truncate segments buffer. */
+			newDest = realloc(destBuffer->segments,
+				(inputOffset - currentBufferStartOffset + 1) * sizeof(BufferSegment));
+			if (NULL == newDest) {
+				state->error = WorkerError_no_memory;
+				return;
+			}
+
+			destBuffer->segments = newDest;
+			destBuffer->segmentsSize = inputOffset - currentBufferStartOffset;
+
+			/* Grow space for new struct. */
+			/* TODO consider over-allocating so we don't do this every time. */
+			newDest = realloc(state->destBuffers, (state->destCount + 1) * sizeof(DestBuffer));
+			if (NULL == newDest) {
+				state->error = WorkerError_no_memory;
+				return;
+			}
+
+			state->destBuffers = newDest;
+			state->destCount++;
+
+			destBuffer = &state->destBuffers[state->destCount - 1];
+
+			/* Don't take any chances with non-NULL pointers. */
+			memset(destBuffer, 0, sizeof(DestBuffer));
+
+			/**
+			 * We could dynamically update allocation size based on work done so far.
+			 * For now, keep is simple.
+			 */
+			allocationSize = roundpow2(state->totalSourceSize >> 4);
+
+			if (boundSize > allocationSize) {
+				allocationSize = roundpow2(boundSize);
+			}
+
+			destBuffer->dest = malloc(allocationSize);
+			if (NULL == destBuffer->dest) {
+				state->error = WorkerError_no_memory;
+				return;
+			}
+
+			destBuffer->destSize = allocationSize;
+			destAvailable = allocationSize;
+			destOffset = 0;
+
+			destBuffer->segments = calloc(remainingItems, sizeof(BufferSegment));
+			if (NULL == destBuffer->segments) {
+				state->error = WorkerError_no_memory;
+				return;
+			}
+
+			destBuffer->segmentsSize = remainingItems;
+			currentBufferStartOffset = inputOffset;
+		}
+
+		dest = (char*)destBuffer->dest + destOffset;
+
+		if (state->cdict) {
+			zresult = ZSTD_compress_usingCDict(state->cctx, dest, destAvailable,
+				source, sourceSize, state->cdict);
+		}
+		else {
+			if (!state->cParams) {
+				zparams.cParams = ZSTD_getCParams(state->cLevel, sourceSize, 0);
+			}
+
+			zresult = ZSTD_compress_advanced(state->cctx, dest, destAvailable,
+				source, sourceSize, NULL, 0, zparams);
+		}
+
+		if (ZSTD_isError(zresult)) {
+			state->error = WorkerError_zstd;
+			state->zresult = zresult;
+			state->errorOffset = inputOffset;
+			break;
+		}
+
+		destBuffer->segments[inputOffset - currentBufferStartOffset].offset = destOffset;
+		destBuffer->segments[inputOffset - currentBufferStartOffset].length = zresult;
+
+		destOffset += zresult;
+		remainingItems--;
+	}
+
+	if (destBuffer->destSize > destOffset) {
+		newDest = realloc(destBuffer->dest, destOffset);
+		if (NULL == newDest) {
+			state->error = WorkerError_no_memory;
+			return;
+		}
+
+		destBuffer->dest = newDest;
+		destBuffer->destSize = destOffset;
+	}
+}
+
+ZstdBufferWithSegmentsCollection* compress_from_datasources(ZstdCompressor* compressor,
+	DataSources* sources, unsigned int threadCount) {
+	ZSTD_parameters zparams;
+	unsigned long long bytesPerWorker;
+	POOL_ctx* pool = NULL;
+	WorkerState* workerStates = NULL;
+	Py_ssize_t i;
+	unsigned long long workerBytes = 0;
+	Py_ssize_t workerStartOffset = 0;
+	size_t currentThread = 0;
+	int errored = 0;
+	Py_ssize_t segmentsCount = 0;
+	Py_ssize_t segmentIndex;
+	PyObject* segmentsArg = NULL;
+	ZstdBufferWithSegments* buffer;
+	ZstdBufferWithSegmentsCollection* result = NULL;
+
+	assert(sources->sourcesSize > 0);
+	assert(sources->totalSourceSize > 0);
+	assert(threadCount >= 1);
+
+	/* More threads than inputs makes no sense. */
+	threadCount = sources->sourcesSize < threadCount ? (unsigned int)sources->sourcesSize
+													 : threadCount;
+
+	/* TODO lower thread count when input size is too small and threads would add
+	overhead. */
+
+	/*
+	 * When dictionaries are used, parameters are derived from the size of the
+	 * first element.
+	 *
+	 * TODO come up with a better mechanism.
+	 */
+	memset(&zparams, 0, sizeof(zparams));
+	if (compressor->cparams) {
+		ztopy_compression_parameters(compressor->cparams, &zparams.cParams);
+	}
+	else {
+		zparams.cParams = ZSTD_getCParams(compressor->compressionLevel,
+			sources->sources[0].sourceSize,
+			compressor->dict ? compressor->dict->dictSize : 0);
+	}
+
+	zparams.fParams = compressor->fparams;
+
+	if (0 != populate_cdict(compressor, &zparams)) {
+		return NULL;
+	}
+
+	workerStates = PyMem_Malloc(threadCount * sizeof(WorkerState));
+	if (NULL == workerStates) {
+		PyErr_NoMemory();
+		goto finally;
+	}
+
+	memset(workerStates, 0, threadCount * sizeof(WorkerState));
+
+	if (threadCount > 1) {
+		pool = POOL_create(threadCount, 1);
+		if (NULL == pool) {
+			PyErr_SetString(ZstdError, "could not initialize zstd thread pool");
+			goto finally;
+		}
+	}
+
+	bytesPerWorker = sources->totalSourceSize / threadCount;
+
+	for (i = 0; i < threadCount; i++) {
+		workerStates[i].cctx = ZSTD_createCCtx();
+		if (!workerStates[i].cctx) {
+			PyErr_NoMemory();
+			goto finally;
+		}
+
+		workerStates[i].cdict = compressor->cdict;
+		workerStates[i].cLevel = compressor->compressionLevel;
+		workerStates[i].cParams = compressor->cparams;
+		workerStates[i].fParams = compressor->fparams;
+
+		workerStates[i].sources = sources->sources;
+		workerStates[i].sourcesSize = sources->sourcesSize;
+	}
+
+	Py_BEGIN_ALLOW_THREADS
+	for (i = 0; i < sources->sourcesSize; i++) {
+		workerBytes += sources->sources[i].sourceSize;
+
+		/*
+		 * The last worker/thread needs to handle all remaining work. Don't
+		 * trigger it prematurely. Defer to the block outside of the loop
+		 * to run the last worker/thread. But do still process this loop
+		 * so workerBytes is correct.
+		 */
+		if (currentThread == threadCount - 1) {
+			continue;
+		}
+
+		if (workerBytes >= bytesPerWorker) {
+			assert(currentThread < threadCount);
+			workerStates[currentThread].totalSourceSize = workerBytes;
+			workerStates[currentThread].startOffset = workerStartOffset;
+			workerStates[currentThread].endOffset = i;
+
+			if (threadCount > 1) {
+				POOL_add(pool, (POOL_function)compress_worker, &workerStates[currentThread]);
+			}
+			else {
+				compress_worker(&workerStates[currentThread]);
+			}
+
+			currentThread++;
+			workerStartOffset = i + 1;
+			workerBytes = 0;
+		}
+	}
+
+	if (workerBytes) {
+		assert(currentThread < threadCount);
+		workerStates[currentThread].totalSourceSize = workerBytes;
+		workerStates[currentThread].startOffset = workerStartOffset;
+		workerStates[currentThread].endOffset = sources->sourcesSize - 1;
+
+		if (threadCount > 1) {
+			POOL_add(pool, (POOL_function)compress_worker, &workerStates[currentThread]);
+		}
+		else {
+			compress_worker(&workerStates[currentThread]);
+		}
+	}
+
+	if (threadCount > 1) {
+		POOL_free(pool);
+		pool = NULL;
+	}
+
+	Py_END_ALLOW_THREADS
+
+	for (i = 0; i < threadCount; i++) {
+		switch (workerStates[i].error) {
+		case WorkerError_no_memory:
+			PyErr_NoMemory();
+			errored = 1;
+			break;
+
+		case WorkerError_zstd:
+			PyErr_Format(ZstdError, "error compressing item %zd: %s",
+				workerStates[i].errorOffset, ZSTD_getErrorName(workerStates[i].zresult));
+			errored = 1;
+			break;
+		default:
+			;
+		}
+
+		if (errored) {
+			break;
+		}
+
+	}
+
+	if (errored) {
+		goto finally;
+	}
+
+	segmentsCount = 0;
+	for (i = 0; i < threadCount; i++) {
+		WorkerState* state = &workerStates[i];
+		segmentsCount += state->destCount;
+	}
+
+	segmentsArg = PyTuple_New(segmentsCount);
+	if (NULL == segmentsArg) {
+		goto finally;
+	}
+
+	segmentIndex = 0;
+
+	for (i = 0; i < threadCount; i++) {
+		Py_ssize_t j;
+		WorkerState* state = &workerStates[i];
+
+		for (j = 0; j < state->destCount; j++) {
+			DestBuffer* destBuffer = &state->destBuffers[j];
+			buffer = BufferWithSegments_FromMemory(destBuffer->dest, destBuffer->destSize,
+				destBuffer->segments, destBuffer->segmentsSize);
+
+			if (NULL == buffer) {
+				goto finally;
+			}
+
+			/* Tell instance to use free() instsead of PyMem_Free(). */
+			buffer->useFree = 1;
+
+			/*
+			 * BufferWithSegments_FromMemory takes ownership of the backing memory.
+			 * Unset it here so it doesn't get freed below.
+			 */
+			destBuffer->dest = NULL;
+			destBuffer->segments = NULL;
+
+			PyTuple_SET_ITEM(segmentsArg, segmentIndex++, (PyObject*)buffer);
+		}
+	}
+
+	result = (ZstdBufferWithSegmentsCollection*)PyObject_CallObject(
+		(PyObject*)&ZstdBufferWithSegmentsCollectionType, segmentsArg);
+
+finally:
+	Py_CLEAR(segmentsArg);
+
+	if (pool) {
+		POOL_free(pool);
+	}
+
+	if (workerStates) {
+		Py_ssize_t j;
+
+		for (i = 0; i < threadCount; i++) {
+			WorkerState state = workerStates[i];
+
+			if (state.cctx) {
+				ZSTD_freeCCtx(state.cctx);
+			}
+
+			/* malloc() is used in worker thread. */
+
+			for (j = 0; j < state.destCount; j++) {
+				if (state.destBuffers) {
+					free(state.destBuffers[j].dest);
+					free(state.destBuffers[j].segments);
+				}
+			}
+
+
+			free(state.destBuffers);
+		}
+
+		PyMem_Free(workerStates);
+	}
+
+	return result;
+}
+
+PyDoc_STRVAR(ZstdCompressor_multi_compress_to_buffer__doc__,
+"Compress multiple pieces of data as a single operation\n"
+"\n"
+"Receives a ``BufferWithSegmentsCollection``, a ``BufferWithSegments``, or\n"
+"a list of bytes like objects holding data to compress.\n"
+"\n"
+"Returns a ``BufferWithSegmentsCollection`` holding compressed data.\n"
+"\n"
+"This function is optimized to perform multiple compression operations as\n"
+"as possible with as little overhead as possbile.\n"
+);
+
+static ZstdBufferWithSegmentsCollection* ZstdCompressor_multi_compress_to_buffer(ZstdCompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"data",
+		"threads",
+		NULL
+	};
+
+	PyObject* data;
+	int threads = 0;
+	Py_buffer* dataBuffers = NULL;
+	DataSources sources;
+	Py_ssize_t i;
+	Py_ssize_t sourceCount = 0;
+	ZstdBufferWithSegmentsCollection* result = NULL;
+
+	if (self->mtcctx) {
+		PyErr_SetString(ZstdError,
+			"function cannot be called on ZstdCompressor configured for multi-threaded compression");
+		return NULL;
+	}
+
+	memset(&sources, 0, sizeof(sources));
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|i:multi_compress_to_buffer", kwlist,
+		&data, &threads)) {
+		return NULL;
+	}
+
+	if (threads < 0) {
+		threads = cpu_count();
+	}
+
+	if (threads < 2) {
+		threads = 1;
+	}
+
+	if (PyObject_TypeCheck(data, &ZstdBufferWithSegmentsType)) {
+		ZstdBufferWithSegments* buffer = (ZstdBufferWithSegments*)data;
+
+		sources.sources = PyMem_Malloc(buffer->segmentCount * sizeof(DataSource));
+		if (NULL == sources.sources) {
+			PyErr_NoMemory();
+			goto finally;
+		}
+
+		for (i = 0; i < buffer->segmentCount; i++) {
+			sources.sources[i].sourceData = (char*)buffer->data + buffer->segments[i].offset;
+			sources.sources[i].sourceSize = buffer->segments[i].length;
+			sources.totalSourceSize += buffer->segments[i].length;
+		}
+
+		sources.sourcesSize = buffer->segmentCount;
+	}
+	else if (PyObject_TypeCheck(data, &ZstdBufferWithSegmentsCollectionType)) {
+		Py_ssize_t j;
+		Py_ssize_t offset = 0;
+		ZstdBufferWithSegments* buffer;
+		ZstdBufferWithSegmentsCollection* collection = (ZstdBufferWithSegmentsCollection*)data;
+
+		sourceCount = BufferWithSegmentsCollection_length(collection);
+
+		sources.sources = PyMem_Malloc(sourceCount * sizeof(DataSource));
+		if (NULL == sources.sources) {
+			PyErr_NoMemory();
+			goto finally;
+		}
+
+		for (i = 0; i < collection->bufferCount; i++) {
+			buffer = collection->buffers[i];
+
+			for (j = 0; j < buffer->segmentCount; j++) {
+				sources.sources[offset].sourceData = (char*)buffer->data + buffer->segments[j].offset;
+				sources.sources[offset].sourceSize = buffer->segments[j].length;
+				sources.totalSourceSize += buffer->segments[j].length;
+
+				offset++;
+			}
+		}
+
+		sources.sourcesSize = sourceCount;
+	}
+	else if (PyList_Check(data)) {
+		sourceCount = PyList_GET_SIZE(data);
+
+		sources.sources = PyMem_Malloc(sourceCount * sizeof(DataSource));
+		if (NULL == sources.sources) {
+			PyErr_NoMemory();
+			goto finally;
+		}
+
+		/*
+		 * It isn't clear whether the address referred to by Py_buffer.buf
+		 * is still valid after PyBuffer_Release. We we hold a reference to all
+		 * Py_buffer instances for the duration of the operation.
+		 */
+		dataBuffers = PyMem_Malloc(sourceCount * sizeof(Py_buffer));
+		if (NULL == dataBuffers) {
+			PyErr_NoMemory();
+			goto finally;
+		}
+
+		memset(dataBuffers, 0, sourceCount * sizeof(Py_buffer));
+
+		for (i = 0; i < sourceCount; i++) {
+			if (0 != PyObject_GetBuffer(PyList_GET_ITEM(data, i),
+				&dataBuffers[i], PyBUF_CONTIG_RO)) {
+				PyErr_Clear();
+				PyErr_Format(PyExc_TypeError, "item %zd not a bytes like object", i);
+				goto finally;
+			}
+
+			sources.sources[i].sourceData = dataBuffers[i].buf;
+			sources.sources[i].sourceSize = dataBuffers[i].len;
+			sources.totalSourceSize += dataBuffers[i].len;
+		}
+
+		sources.sourcesSize = sourceCount;
+	}
+	else {
+		PyErr_SetString(PyExc_TypeError, "argument must be list of BufferWithSegments");
+		goto finally;
+	}
+
+	if (0 == sources.sourcesSize) {
+		PyErr_SetString(PyExc_ValueError, "no source elements found");
+		goto finally;
+	}
+
+	if (0 == sources.totalSourceSize) {
+		PyErr_SetString(PyExc_ValueError, "source elements are empty");
+		goto finally;
+	}
+
+	result = compress_from_datasources(self, &sources, threads);
+
+finally:
+	PyMem_Free(sources.sources);
+
+	if (dataBuffers) {
+		for (i = 0; i < sourceCount; i++) {
+			PyBuffer_Release(&dataBuffers[i]);
+		}
+
+		PyMem_Free(dataBuffers);
+	}
 
 	return result;
 }
@@ -735,6 +1486,8 @@
 	METH_VARARGS | METH_KEYWORDS, ZstdCompressor_read_from__doc__ },
 	{ "write_to", (PyCFunction)ZstdCompressor_write_to,
 	METH_VARARGS | METH_KEYWORDS, ZstdCompressor_write_to___doc__ },
+	{ "multi_compress_to_buffer", (PyCFunction)ZstdCompressor_multi_compress_to_buffer,
+	METH_VARARGS | METH_KEYWORDS, ZstdCompressor_multi_compress_to_buffer__doc__ },
 	{ NULL, NULL }
 };
 
--- a/contrib/python-zstandard/c-ext/compressoriterator.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/compressoriterator.c	Tue Apr 18 12:24:34 2017 -0400
@@ -27,11 +27,6 @@
 		self->buffer = NULL;
 	}
 
-	if (self->cstream) {
-		ZSTD_freeCStream(self->cstream);
-		self->cstream = NULL;
-	}
-
 	if (self->output.dst) {
 		PyMem_Free(self->output.dst);
 		self->output.dst = NULL;
@@ -63,7 +58,14 @@
 	/* If we have data left in the input, consume it. */
 	if (self->input.pos < self->input.size) {
 		Py_BEGIN_ALLOW_THREADS
-		zresult = ZSTD_compressStream(self->cstream, &self->output, &self->input);
+		if (self->compressor->mtcctx) {
+			zresult = ZSTDMT_compressStream(self->compressor->mtcctx,
+				&self->output, &self->input);
+		}
+		else {
+			zresult = ZSTD_compressStream(self->compressor->cstream, &self->output,
+				&self->input);
+		}
 		Py_END_ALLOW_THREADS
 
 		/* Release the Python object holding the input buffer. */
@@ -128,7 +130,12 @@
 
 	/* EOF */
 	if (0 == readSize) {
-		zresult = ZSTD_endStream(self->cstream, &self->output);
+		if (self->compressor->mtcctx) {
+			zresult = ZSTDMT_endStream(self->compressor->mtcctx, &self->output);
+		}
+		else {
+			zresult = ZSTD_endStream(self->compressor->cstream, &self->output);
+		}
 		if (ZSTD_isError(zresult)) {
 			PyErr_Format(ZstdError, "error ending compression stream: %s",
 				ZSTD_getErrorName(zresult));
@@ -152,7 +159,13 @@
 	self->input.pos = 0;
 
 	Py_BEGIN_ALLOW_THREADS
-	zresult = ZSTD_compressStream(self->cstream, &self->output, &self->input);
+	if (self->compressor->mtcctx) {
+		zresult = ZSTDMT_compressStream(self->compressor->mtcctx, &self->output,
+			&self->input);
+	}
+	else {
+		zresult = ZSTD_compressStream(self->compressor->cstream, &self->output, &self->input);
+	}
 	Py_END_ALLOW_THREADS
 
 	/* The input buffer currently points to memory managed by Python
--- a/contrib/python-zstandard/c-ext/constants.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/constants.c	Tue Apr 18 12:24:34 2017 -0400
@@ -41,7 +41,7 @@
 	PyTuple_SetItem(zstdVersion, 0, PyLong_FromLong(ZSTD_VERSION_MAJOR));
 	PyTuple_SetItem(zstdVersion, 1, PyLong_FromLong(ZSTD_VERSION_MINOR));
 	PyTuple_SetItem(zstdVersion, 2, PyLong_FromLong(ZSTD_VERSION_RELEASE));
-	Py_IncRef(zstdVersion);
+	Py_INCREF(zstdVersion);
 	PyModule_AddObject(mod, "ZSTD_VERSION", zstdVersion);
 
 	frameHeader = PyBytes_FromStringAndSize(frame_header, sizeof(frame_header));
--- a/contrib/python-zstandard/c-ext/decompressionwriter.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/decompressionwriter.c	Tue Apr 18 12:24:34 2017 -0400
@@ -18,11 +18,6 @@
 	Py_XDECREF(self->decompressor);
 	Py_XDECREF(self->writer);
 
-	if (self->dstream) {
-		ZSTD_freeDStream(self->dstream);
-		self->dstream = NULL;
-	}
-
 	PyObject_Del(self);
 }
 
@@ -32,8 +27,7 @@
 		return NULL;
 	}
 
-	self->dstream = DStream_from_ZstdDecompressor(self->decompressor);
-	if (!self->dstream) {
+	if (0 != init_dstream(self->decompressor)) {
 		return NULL;
 	}
 
@@ -46,22 +40,17 @@
 static PyObject* ZstdDecompressionWriter_exit(ZstdDecompressionWriter* self, PyObject* args) {
 	self->entered = 0;
 
-	if (self->dstream) {
-		ZSTD_freeDStream(self->dstream);
-		self->dstream = NULL;
-	}
-
 	Py_RETURN_FALSE;
 }
 
 static PyObject* ZstdDecompressionWriter_memory_size(ZstdDecompressionWriter* self) {
-	if (!self->dstream) {
+	if (!self->decompressor->dstream) {
 		PyErr_SetString(ZstdError, "cannot determine size of inactive decompressor; "
 			"call when context manager is active");
 		return NULL;
 	}
 
-	return PyLong_FromSize_t(ZSTD_sizeof_DStream(self->dstream));
+	return PyLong_FromSize_t(ZSTD_sizeof_DStream(self->decompressor->dstream));
 }
 
 static PyObject* ZstdDecompressionWriter_write(ZstdDecompressionWriter* self, PyObject* args) {
@@ -71,11 +60,12 @@
 	ZSTD_inBuffer input;
 	ZSTD_outBuffer output;
 	PyObject* res;
+	Py_ssize_t totalWrite = 0;
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTuple(args, "y#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "y#:write", &source, &sourceSize)) {
 #else
-	if (!PyArg_ParseTuple(args, "s#", &source, &sourceSize)) {
+	if (!PyArg_ParseTuple(args, "s#:write", &source, &sourceSize)) {
 #endif
 		return NULL;
 	}
@@ -85,6 +75,8 @@
 		return NULL;
 	}
 
+	assert(self->decompressor->dstream);
+
 	output.dst = PyMem_Malloc(self->outSize);
 	if (!output.dst) {
 		return PyErr_NoMemory();
@@ -98,7 +90,7 @@
 
 	while ((ssize_t)input.pos < sourceSize) {
 		Py_BEGIN_ALLOW_THREADS
-		zresult = ZSTD_decompressStream(self->dstream, &output, &input);
+		zresult = ZSTD_decompressStream(self->decompressor->dstream, &output, &input);
 		Py_END_ALLOW_THREADS
 
 		if (ZSTD_isError(zresult)) {
@@ -116,15 +108,15 @@
 #endif
 				output.dst, output.pos);
 			Py_XDECREF(res);
+			totalWrite += output.pos;
 			output.pos = 0;
 		}
 	}
 
 	PyMem_Free(output.dst);
 
-	/* TODO return bytes written */
-	Py_RETURN_NONE;
-	}
+	return PyLong_FromSsize_t(totalWrite);
+}
 
 static PyMethodDef ZstdDecompressionWriter_methods[] = {
 	{ "__enter__", (PyCFunction)ZstdDecompressionWriter_enter, METH_NOARGS,
--- a/contrib/python-zstandard/c-ext/decompressobj.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/decompressobj.c	Tue Apr 18 12:24:34 2017 -0400
@@ -15,11 +15,6 @@
 );
 
 static void DecompressionObj_dealloc(ZstdDecompressionObj* self) {
-	if (self->dstream) {
-		ZSTD_freeDStream(self->dstream);
-		self->dstream = NULL;
-	}
-
 	Py_XDECREF(self->decompressor);
 
 	PyObject_Del(self);
@@ -35,15 +30,18 @@
 	PyObject* result = NULL;
 	Py_ssize_t resultSize = 0;
 
+	/* Constructor should ensure stream is populated. */
+	assert(self->decompressor->dstream);
+
 	if (self->finished) {
 		PyErr_SetString(ZstdError, "cannot use a decompressobj multiple times");
 		return NULL;
 	}
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTuple(args, "y#",
+	if (!PyArg_ParseTuple(args, "y#:decompress",
 #else
-	if (!PyArg_ParseTuple(args, "s#",
+	if (!PyArg_ParseTuple(args, "s#:decompress",
 #endif
 		&source, &sourceSize)) {
 		return NULL;
@@ -64,7 +62,7 @@
 	/* Read input until exhausted. */
 	while (input.pos < input.size) {
 		Py_BEGIN_ALLOW_THREADS
-		zresult = ZSTD_decompressStream(self->dstream, &output, &input);
+		zresult = ZSTD_decompressStream(self->decompressor->dstream, &output, &input);
 		Py_END_ALLOW_THREADS
 
 		if (ZSTD_isError(zresult)) {
@@ -106,8 +104,7 @@
 	goto finally;
 
 except:
-	Py_DecRef(result);
-	result = NULL;
+	Py_CLEAR(result);
 
 finally:
 	PyMem_Free(output.dst);
--- a/contrib/python-zstandard/c-ext/decompressor.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/decompressor.c	Tue Apr 18 12:24:34 2017 -0400
@@ -7,19 +7,37 @@
 */
 
 #include "python-zstandard.h"
+#include "pool.h"
 
 extern PyObject* ZstdError;
 
-ZSTD_DStream* DStream_from_ZstdDecompressor(ZstdDecompressor* decompressor) {
-	ZSTD_DStream* dstream;
+/**
+  * Ensure the ZSTD_DStream on a ZstdDecompressor is initialized and reset.
+  *
+  * This should be called before starting a decompression operation with a
+  * ZSTD_DStream on a ZstdDecompressor.
+  */
+int init_dstream(ZstdDecompressor* decompressor) {
 	void* dictData = NULL;
 	size_t dictSize = 0;
 	size_t zresult;
 
-	dstream = ZSTD_createDStream();
-	if (!dstream) {
+	/* Simple case of dstream already exists. Just reset it. */
+	if (decompressor->dstream) {
+		zresult = ZSTD_resetDStream(decompressor->dstream);
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(ZstdError, "could not reset DStream: %s",
+				ZSTD_getErrorName(zresult));
+			return -1;
+		}
+
+		return 0;
+	}
+
+	decompressor->dstream = ZSTD_createDStream();
+	if (!decompressor->dstream) {
 		PyErr_SetString(ZstdError, "could not create DStream");
-		return NULL;
+		return -1;
 	}
 
 	if (decompressor->dict) {
@@ -28,19 +46,23 @@
 	}
 
 	if (dictData) {
-		zresult = ZSTD_initDStream_usingDict(dstream, dictData, dictSize);
+		zresult = ZSTD_initDStream_usingDict(decompressor->dstream, dictData, dictSize);
 	}
 	else {
-		zresult = ZSTD_initDStream(dstream);
+		zresult = ZSTD_initDStream(decompressor->dstream);
 	}
 
 	if (ZSTD_isError(zresult)) {
+		/* Don't leave a reference to an invalid object. */
+		ZSTD_freeDStream(decompressor->dstream);
+		decompressor->dstream = NULL;
+
 		PyErr_Format(ZstdError, "could not initialize DStream: %s",
 			ZSTD_getErrorName(zresult));
-		return NULL;
+		return -1;
 	}
 
-	return dstream;
+	return 0;
 }
 
 PyDoc_STRVAR(Decompressor__doc__,
@@ -59,23 +81,19 @@
 
 	ZstdCompressionDict* dict = NULL;
 
-	self->refdctx = NULL;
+	self->dctx = NULL;
 	self->dict = NULL;
 	self->ddict = NULL;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!", kwlist,
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!:ZstdDecompressor", kwlist,
 		&ZstdCompressionDictType, &dict)) {
 		return -1;
 	}
 
-	/* Instead of creating a ZSTD_DCtx for every decompression operation,
-	   we create an instance at object creation time and recycle it via
-	   ZSTD_copyDCTx() on each use. This means each use is a malloc+memcpy
-	   instead of a malloc+init. */
 	/* TODO lazily initialize the reference ZSTD_DCtx on first use since
 	   not instances of ZstdDecompressor will use a ZSTD_DCtx. */
-	self->refdctx = ZSTD_createDCtx();
-	if (!self->refdctx) {
+	self->dctx = ZSTD_createDCtx();
+	if (!self->dctx) {
 		PyErr_NoMemory();
 		goto except;
 	}
@@ -88,26 +106,32 @@
 	return 0;
 
 except:
-	if (self->refdctx) {
-		ZSTD_freeDCtx(self->refdctx);
-		self->refdctx = NULL;
+	if (self->dctx) {
+		ZSTD_freeDCtx(self->dctx);
+		self->dctx = NULL;
 	}
 
 	return -1;
 }
 
 static void Decompressor_dealloc(ZstdDecompressor* self) {
-	if (self->refdctx) {
-		ZSTD_freeDCtx(self->refdctx);
-	}
-
-	Py_XDECREF(self->dict);
+	Py_CLEAR(self->dict);
 
 	if (self->ddict) {
 		ZSTD_freeDDict(self->ddict);
 		self->ddict = NULL;
 	}
 
+	if (self->dstream) {
+		ZSTD_freeDStream(self->dstream);
+		self->dstream = NULL;
+	}
+
+	if (self->dctx) {
+		ZSTD_freeDCtx(self->dctx);
+		self->dctx = NULL;
+	}
+
 	PyObject_Del(self);
 }
 
@@ -136,7 +160,6 @@
 	PyObject* dest;
 	size_t inSize = ZSTD_DStreamInSize();
 	size_t outSize = ZSTD_DStreamOutSize();
-	ZSTD_DStream* dstream;
 	ZSTD_inBuffer input;
 	ZSTD_outBuffer output;
 	Py_ssize_t totalRead = 0;
@@ -150,8 +173,8 @@
 	PyObject* totalReadPy;
 	PyObject* totalWritePy;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|kk", kwlist, &source,
-		&dest, &inSize, &outSize)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|kk:copy_stream", kwlist,
+		&source, &dest, &inSize, &outSize)) {
 		return NULL;
 	}
 
@@ -168,8 +191,7 @@
 	/* Prevent free on uninitialized memory in finally. */
 	output.dst = NULL;
 
-	dstream = DStream_from_ZstdDecompressor(self);
-	if (!dstream) {
+	if (0 != init_dstream(self)) {
 		res = NULL;
 		goto finally;
 	}
@@ -207,7 +229,7 @@
 
 		while (input.pos < input.size) {
 			Py_BEGIN_ALLOW_THREADS
-			zresult = ZSTD_decompressStream(dstream, &output, &input);
+			zresult = ZSTD_decompressStream(self->dstream, &output, &input);
 			Py_END_ALLOW_THREADS
 
 			if (ZSTD_isError(zresult)) {
@@ -234,24 +256,17 @@
 
 	/* Source stream is exhausted. Finish up. */
 
-	ZSTD_freeDStream(dstream);
-	dstream = NULL;
-
 	totalReadPy = PyLong_FromSsize_t(totalRead);
 	totalWritePy = PyLong_FromSsize_t(totalWrite);
 	res = PyTuple_Pack(2, totalReadPy, totalWritePy);
-	Py_DecRef(totalReadPy);
-	Py_DecRef(totalWritePy);
+	Py_DECREF(totalReadPy);
+	Py_DECREF(totalWritePy);
 
-	finally:
+finally:
 	if (output.dst) {
 		PyMem_Free(output.dst);
 	}
 
-	if (dstream) {
-		ZSTD_freeDStream(dstream);
-	}
-
 	return res;
 }
 
@@ -291,28 +306,19 @@
 	unsigned long long decompressedSize;
 	size_t destCapacity;
 	PyObject* result = NULL;
-	ZSTD_DCtx* dctx = NULL;
 	void* dictData = NULL;
 	size_t dictSize = 0;
 	size_t zresult;
 
 #if PY_MAJOR_VERSION >= 3
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|n", kwlist,
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y#|n:decompress",
 #else
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|n", kwlist,
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|n:decompress",
 #endif
-		&source, &sourceSize, &maxOutputSize)) {
+		kwlist, &source, &sourceSize, &maxOutputSize)) {
 		return NULL;
 	}
 
-	dctx = PyMem_Malloc(ZSTD_sizeof_DCtx(self->refdctx));
-	if (!dctx) {
-		PyErr_NoMemory();
-		return NULL;
-	}
-
-	ZSTD_copyDCtx(dctx, self->refdctx);
-
 	if (self->dict) {
 		dictData = self->dict->dictData;
 		dictSize = self->dict->dictSize;
@@ -320,12 +326,12 @@
 
 	if (dictData && !self->ddict) {
 		Py_BEGIN_ALLOW_THREADS
-		self->ddict = ZSTD_createDDict(dictData, dictSize);
+		self->ddict = ZSTD_createDDict_byReference(dictData, dictSize);
 		Py_END_ALLOW_THREADS
 
 		if (!self->ddict) {
 			PyErr_SetString(ZstdError, "could not create decompression dict");
-			goto except;
+			return NULL;
 		}
 	}
 
@@ -335,7 +341,7 @@
 		if (0 == maxOutputSize) {
 			PyErr_SetString(ZstdError, "input data invalid or missing content size "
 				"in frame header");
-			goto except;
+			return NULL;
 		}
 		else {
 			result = PyBytes_FromStringAndSize(NULL, maxOutputSize);
@@ -348,45 +354,39 @@
 	}
 
 	if (!result) {
-		goto except;
+		return NULL;
 	}
 
 	Py_BEGIN_ALLOW_THREADS
 	if (self->ddict) {
-		zresult = ZSTD_decompress_usingDDict(dctx, PyBytes_AsString(result), destCapacity,
+		zresult = ZSTD_decompress_usingDDict(self->dctx,
+			PyBytes_AsString(result), destCapacity,
 			source, sourceSize, self->ddict);
 	}
 	else {
-		zresult = ZSTD_decompressDCtx(dctx, PyBytes_AsString(result), destCapacity, source, sourceSize);
+		zresult = ZSTD_decompressDCtx(self->dctx,
+			PyBytes_AsString(result), destCapacity, source, sourceSize);
 	}
 	Py_END_ALLOW_THREADS
 
 	if (ZSTD_isError(zresult)) {
 		PyErr_Format(ZstdError, "decompression error: %s", ZSTD_getErrorName(zresult));
-		goto except;
+		Py_DECREF(result);
+		return NULL;
 	}
 	else if (decompressedSize && zresult != decompressedSize) {
 		PyErr_Format(ZstdError, "decompression error: decompressed %zu bytes; expected %llu",
 			zresult, decompressedSize);
-		goto except;
+		Py_DECREF(result);
+		return NULL;
 	}
 	else if (zresult < destCapacity) {
 		if (_PyBytes_Resize(&result, zresult)) {
-			goto except;
+			Py_DECREF(result);
+			return NULL;
 		}
 	}
 
-	goto finally;
-
-except:
-	Py_DecRef(result);
-	result = NULL;
-
-finally:
-	if (dctx) {
-		PyMem_FREE(dctx);
-	}
-
 	return result;
 }
 
@@ -401,22 +401,19 @@
 );
 
 static ZstdDecompressionObj* Decompressor_decompressobj(ZstdDecompressor* self) {
-	ZstdDecompressionObj* result = PyObject_New(ZstdDecompressionObj, &ZstdDecompressionObjType);
+	ZstdDecompressionObj* result = (ZstdDecompressionObj*)PyObject_CallObject((PyObject*)&ZstdDecompressionObjType, NULL);
 	if (!result) {
 		return NULL;
 	}
 
-	result->dstream = DStream_from_ZstdDecompressor(self);
-	if (!result->dstream) {
-		Py_DecRef((PyObject*)result);
+	if (0 != init_dstream(self)) {
+		Py_DECREF(result);
 		return NULL;
 	}
 
 	result->decompressor = self;
 	Py_INCREF(result->decompressor);
 
-	result->finished = 0;
-
 	return result;
 }
 
@@ -455,8 +452,8 @@
 	ZstdDecompressorIterator* result;
 	size_t skipBytes = 0;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|kkk", kwlist, &reader,
-		&inSize, &outSize, &skipBytes)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|kkk:read_from", kwlist,
+		&reader, &inSize, &outSize, &skipBytes)) {
 		return NULL;
 	}
 
@@ -466,18 +463,11 @@
 		return NULL;
 	}
 
-	result = PyObject_New(ZstdDecompressorIterator, &ZstdDecompressorIteratorType);
+	result = (ZstdDecompressorIterator*)PyObject_CallObject((PyObject*)&ZstdDecompressorIteratorType, NULL);
 	if (!result) {
 		return NULL;
 	}
 
-	result->decompressor = NULL;
-	result->reader = NULL;
-	result->buffer = NULL;
-	result->dstream = NULL;
-	result->input.src = NULL;
-	result->output.dst = NULL;
-
 	if (PyObject_HasAttrString(reader, "read")) {
 		result->reader = reader;
 		Py_INCREF(result->reader);
@@ -494,8 +484,6 @@
 		if (0 != PyObject_GetBuffer(reader, result->buffer, PyBUF_CONTIG_RO)) {
 			goto except;
 		}
-
-		result->bufferOffset = 0;
 	}
 	else {
 		PyErr_SetString(PyExc_ValueError,
@@ -510,8 +498,7 @@
 	result->outSize = outSize;
 	result->skipBytes = skipBytes;
 
-	result->dstream = DStream_from_ZstdDecompressor(self);
-	if (!result->dstream) {
+	if (0 != init_dstream(self)) {
 		goto except;
 	}
 
@@ -520,33 +507,18 @@
 		PyErr_NoMemory();
 		goto except;
 	}
-	result->input.size = 0;
-	result->input.pos = 0;
-
-	result->output.dst = NULL;
-	result->output.size = 0;
-	result->output.pos = 0;
-
-	result->readCount = 0;
-	result->finishedInput = 0;
-	result->finishedOutput = 0;
 
 	goto finally;
 
 except:
-	if (result->reader) {
-		Py_DECREF(result->reader);
-		result->reader = NULL;
-	}
+	Py_CLEAR(result->reader);
 
 	if (result->buffer) {
 		PyBuffer_Release(result->buffer);
-		Py_DECREF(result->buffer);
-		result->buffer = NULL;
+		Py_CLEAR(result->buffer);
 	}
 
-	Py_DECREF(result);
-	result = NULL;
+	Py_CLEAR(result);
 
 finally:
 
@@ -577,7 +549,8 @@
 	size_t outSize = ZSTD_DStreamOutSize();
 	ZstdDecompressionWriter* result;
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|k", kwlist, &writer, &outSize)) {
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|k:write_to", kwlist,
+		&writer, &outSize)) {
 		return NULL;
 	}
 
@@ -586,7 +559,7 @@
 		return NULL;
 	}
 
-	result = PyObject_New(ZstdDecompressionWriter, &ZstdDecompressionWriterType);
+	result = (ZstdDecompressionWriter*)PyObject_CallObject((PyObject*)&ZstdDecompressionWriterType, NULL);
 	if (!result) {
 		return NULL;
 	}
@@ -599,8 +572,939 @@
 
 	result->outSize = outSize;
 
-	result->entered = 0;
-	result->dstream = NULL;
+	return result;
+}
+
+PyDoc_STRVAR(Decompressor_decompress_content_dict_chain__doc__,
+"Decompress a series of chunks using the content dictionary chaining technique\n"
+);
+
+static PyObject* Decompressor_decompress_content_dict_chain(PyObject* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"frames",
+		NULL
+	};
+
+	PyObject* chunks;
+	Py_ssize_t chunksLen;
+	Py_ssize_t chunkIndex;
+	char parity = 0;
+	PyObject* chunk;
+	char* chunkData;
+	Py_ssize_t chunkSize;
+	ZSTD_DCtx* dctx = NULL;
+	size_t zresult;
+	ZSTD_frameParams frameParams;
+	void* buffer1 = NULL;
+	size_t buffer1Size = 0;
+	size_t buffer1ContentSize = 0;
+	void* buffer2 = NULL;
+	size_t buffer2Size = 0;
+	size_t buffer2ContentSize = 0;
+	void* destBuffer = NULL;
+	PyObject* result = NULL;
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!:decompress_content_dict_chain",
+		kwlist, &PyList_Type, &chunks)) {
+		return NULL;
+	}
+
+	chunksLen = PyList_Size(chunks);
+	if (!chunksLen) {
+		PyErr_SetString(PyExc_ValueError, "empty input chain");
+		return NULL;
+	}
+
+	/* The first chunk should not be using a dictionary. We handle it specially. */
+	chunk = PyList_GetItem(chunks, 0);
+	if (!PyBytes_Check(chunk)) {
+		PyErr_SetString(PyExc_ValueError, "chunk 0 must be bytes");
+		return NULL;
+	}
+
+	/* We require that all chunks be zstd frames and that they have content size set. */
+	PyBytes_AsStringAndSize(chunk, &chunkData, &chunkSize);
+	zresult = ZSTD_getFrameParams(&frameParams, (void*)chunkData, chunkSize);
+	if (ZSTD_isError(zresult)) {
+		PyErr_SetString(PyExc_ValueError, "chunk 0 is not a valid zstd frame");
+		return NULL;
+	}
+	else if (zresult) {
+		PyErr_SetString(PyExc_ValueError, "chunk 0 is too small to contain a zstd frame");
+		return NULL;
+	}
+
+	if (0 == frameParams.frameContentSize) {
+		PyErr_SetString(PyExc_ValueError, "chunk 0 missing content size in frame");
+		return NULL;
+	}
+
+	dctx = ZSTD_createDCtx();
+	if (!dctx) {
+		PyErr_NoMemory();
+		goto finally;
+	}
+
+	buffer1Size = frameParams.frameContentSize;
+	buffer1 = PyMem_Malloc(buffer1Size);
+	if (!buffer1) {
+		goto finally;
+	}
+
+	Py_BEGIN_ALLOW_THREADS
+	zresult = ZSTD_decompressDCtx(dctx, buffer1, buffer1Size, chunkData, chunkSize);
+	Py_END_ALLOW_THREADS
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "could not decompress chunk 0: %s", ZSTD_getErrorName(zresult));
+		goto finally;
+	}
+
+	buffer1ContentSize = zresult;
+
+	/* Special case of a simple chain. */
+	if (1 == chunksLen) {
+		result = PyBytes_FromStringAndSize(buffer1, buffer1Size);
+		goto finally;
+	}
+
+	/* This should ideally look at next chunk. But this is slightly simpler. */
+	buffer2Size = frameParams.frameContentSize;
+	buffer2 = PyMem_Malloc(buffer2Size);
+	if (!buffer2) {
+		goto finally;
+	}
+
+	/* For each subsequent chunk, use the previous fulltext as a content dictionary.
+	   Our strategy is to have 2 buffers. One holds the previous fulltext (to be
+	   used as a content dictionary) and the other holds the new fulltext. The
+	   buffers grow when needed but never decrease in size. This limits the
+	   memory allocator overhead.
+	*/
+	for (chunkIndex = 1; chunkIndex < chunksLen; chunkIndex++) {
+		chunk = PyList_GetItem(chunks, chunkIndex);
+		if (!PyBytes_Check(chunk)) {
+			PyErr_Format(PyExc_ValueError, "chunk %zd must be bytes", chunkIndex);
+			goto finally;
+		}
+
+		PyBytes_AsStringAndSize(chunk, &chunkData, &chunkSize);
+		zresult = ZSTD_getFrameParams(&frameParams, (void*)chunkData, chunkSize);
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(PyExc_ValueError, "chunk %zd is not a valid zstd frame", chunkIndex);
+			goto finally;
+		}
+		else if (zresult) {
+			PyErr_Format(PyExc_ValueError, "chunk %zd is too small to contain a zstd frame", chunkIndex);
+			goto finally;
+		}
+
+		if (0 == frameParams.frameContentSize) {
+			PyErr_Format(PyExc_ValueError, "chunk %zd missing content size in frame", chunkIndex);
+			goto finally;
+		}
+
+		parity = chunkIndex % 2;
+
+		/* This could definitely be abstracted to reduce code duplication. */
+		if (parity) {
+			/* Resize destination buffer to hold larger content. */
+			if (buffer2Size < frameParams.frameContentSize) {
+				buffer2Size = frameParams.frameContentSize;
+				destBuffer = PyMem_Realloc(buffer2, buffer2Size);
+				if (!destBuffer) {
+					goto finally;
+				}
+				buffer2 = destBuffer;
+			}
+
+			Py_BEGIN_ALLOW_THREADS
+			zresult = ZSTD_decompress_usingDict(dctx, buffer2, buffer2Size,
+				chunkData, chunkSize, buffer1, buffer1ContentSize);
+			Py_END_ALLOW_THREADS
+			if (ZSTD_isError(zresult)) {
+				PyErr_Format(ZstdError, "could not decompress chunk %zd: %s",
+					chunkIndex, ZSTD_getErrorName(zresult));
+				goto finally;
+			}
+			buffer2ContentSize = zresult;
+		}
+		else {
+			if (buffer1Size < frameParams.frameContentSize) {
+				buffer1Size = frameParams.frameContentSize;
+				destBuffer = PyMem_Realloc(buffer1, buffer1Size);
+				if (!destBuffer) {
+					goto finally;
+				}
+				buffer1 = destBuffer;
+			}
+
+			Py_BEGIN_ALLOW_THREADS
+			zresult = ZSTD_decompress_usingDict(dctx, buffer1, buffer1Size,
+				chunkData, chunkSize, buffer2, buffer2ContentSize);
+			Py_END_ALLOW_THREADS
+			if (ZSTD_isError(zresult)) {
+				PyErr_Format(ZstdError, "could not decompress chunk %zd: %s",
+					chunkIndex, ZSTD_getErrorName(zresult));
+				goto finally;
+			}
+			buffer1ContentSize = zresult;
+		}
+	}
+
+	result = PyBytes_FromStringAndSize(parity ? buffer2 : buffer1,
+		parity ? buffer2ContentSize : buffer1ContentSize);
+
+finally:
+	if (buffer2) {
+		PyMem_Free(buffer2);
+	}
+	if (buffer1) {
+		PyMem_Free(buffer1);
+	}
+
+	if (dctx) {
+		ZSTD_freeDCtx(dctx);
+	}
+
+	return result;
+}
+
+typedef struct {
+	void* sourceData;
+	size_t sourceSize;
+	unsigned long long destSize;
+} FramePointer;
+
+typedef struct {
+	FramePointer* frames;
+	Py_ssize_t framesSize;
+	unsigned long long compressedSize;
+} FrameSources;
+
+typedef struct {
+	void* dest;
+	Py_ssize_t destSize;
+	BufferSegment* segments;
+	Py_ssize_t segmentsSize;
+} DestBuffer;
+
+typedef enum {
+	WorkerError_none = 0,
+	WorkerError_zstd = 1,
+	WorkerError_memory = 2,
+	WorkerError_sizeMismatch = 3,
+	WorkerError_unknownSize = 4,
+} WorkerError;
+
+typedef struct {
+	/* Source records and length */
+	FramePointer* framePointers;
+	/* Which records to process. */
+	Py_ssize_t startOffset;
+	Py_ssize_t endOffset;
+	unsigned long long totalSourceSize;
+
+	/* Compression state and settings. */
+	ZSTD_DCtx* dctx;
+	ZSTD_DDict* ddict;
+	int requireOutputSizes;
+
+	/* Output storage. */
+	DestBuffer* destBuffers;
+	Py_ssize_t destCount;
+
+	/* Item that error occurred on. */
+	Py_ssize_t errorOffset;
+	/* If an error occurred. */
+	WorkerError error;
+	/* result from zstd decompression operation */
+	size_t zresult;
+} WorkerState;
+
+static void decompress_worker(WorkerState* state) {
+	size_t allocationSize;
+	DestBuffer* destBuffer;
+	Py_ssize_t frameIndex;
+	Py_ssize_t localOffset = 0;
+	Py_ssize_t currentBufferStartIndex = state->startOffset;
+	Py_ssize_t remainingItems = state->endOffset - state->startOffset + 1;
+	void* tmpBuf;
+	Py_ssize_t destOffset = 0;
+	FramePointer* framePointers = state->framePointers;
+	size_t zresult;
+	unsigned long long totalOutputSize = 0;
+
+	assert(NULL == state->destBuffers);
+	assert(0 == state->destCount);
+	assert(state->endOffset - state->startOffset >= 0);
+
+	/*
+	 * We need to allocate a buffer to hold decompressed data. How we do this
+	 * depends on what we know about the output. The following scenarios are
+	 * possible:
+	 *
+	 * 1. All structs defining frames declare the output size.
+	 * 2. The decompressed size is embedded within the zstd frame.
+	 * 3. The decompressed size is not stored anywhere.
+	 *
+	 * For now, we only support #1 and #2.
+	 */
+
+	/* Resolve ouput segments. */
+	for (frameIndex = state->startOffset; frameIndex <= state->endOffset; frameIndex++) {
+		FramePointer* fp = &framePointers[frameIndex];
+
+		if (0 == fp->destSize) {
+			fp->destSize = ZSTD_getDecompressedSize(fp->sourceData, fp->sourceSize);
+			if (0 == fp->destSize && state->requireOutputSizes) {
+				state->error = WorkerError_unknownSize;
+				state->errorOffset = frameIndex;
+				return;
+			}
+		}
+
+		totalOutputSize += fp->destSize;
+	}
+
+	state->destBuffers = calloc(1, sizeof(DestBuffer));
+	if (NULL == state->destBuffers) {
+		state->error = WorkerError_memory;
+		return;
+	}
+
+	state->destCount = 1;
+
+	destBuffer = &state->destBuffers[state->destCount - 1];
+
+	assert(framePointers[state->startOffset].destSize > 0); /* For now. */
+
+	allocationSize = roundpow2(state->totalSourceSize);
+
+	if (framePointers[state->startOffset].destSize > allocationSize) {
+		allocationSize = roundpow2(framePointers[state->startOffset].destSize);
+	}
+
+	destBuffer->dest = malloc(allocationSize);
+	if (NULL == destBuffer->dest) {
+		state->error = WorkerError_memory;
+		return;
+	}
+
+	destBuffer->destSize = allocationSize;
+
+	destBuffer->segments = calloc(remainingItems, sizeof(BufferSegment));
+	if (NULL == destBuffer->segments) {
+		/* Caller will free state->dest as part of cleanup. */
+		state->error = WorkerError_memory;
+		return;
+	}
+
+	destBuffer->segmentsSize = remainingItems;
+
+	for (frameIndex = state->startOffset; frameIndex <= state->endOffset; frameIndex++) {
+		const void* source = framePointers[frameIndex].sourceData;
+		const size_t sourceSize = framePointers[frameIndex].sourceSize;
+		void* dest;
+		const size_t decompressedSize = framePointers[frameIndex].destSize;
+		size_t destAvailable = destBuffer->destSize - destOffset;
+
+		assert(decompressedSize > 0); /* For now. */
+
+		/*
+		 * Not enough space in current buffer. Finish current before and allocate and
+		 * switch to a new one.
+		 */
+		if (decompressedSize > destAvailable) {
+			/*
+			 * Shrinking the destination buffer is optional. But it should be cheap,
+			 * so we just do it.
+			 */
+			if (destAvailable) {
+				tmpBuf = realloc(destBuffer->dest, destOffset);
+				if (NULL == tmpBuf) {
+					state->error = WorkerError_memory;
+					return;
+				}
+
+				destBuffer->dest = tmpBuf;
+				destBuffer->destSize = destOffset;
+			}
+
+			/* Truncate segments buffer. */
+			tmpBuf = realloc(destBuffer->segments,
+				(frameIndex - currentBufferStartIndex) * sizeof(BufferSegment));
+			if (NULL == tmpBuf) {
+				state->error = WorkerError_memory;
+				return;
+			}
+
+			destBuffer->segments = tmpBuf;
+			destBuffer->segmentsSize = frameIndex - currentBufferStartIndex;
+
+			/* Grow space for new DestBuffer. */
+			tmpBuf = realloc(state->destBuffers, (state->destCount + 1) * sizeof(DestBuffer));
+			if (NULL == tmpBuf) {
+				state->error = WorkerError_memory;
+				return;
+			}
+
+			state->destBuffers = tmpBuf;
+			state->destCount++;
+
+			destBuffer = &state->destBuffers[state->destCount - 1];
+
+			/* Don't take any chances will non-NULL pointers. */
+			memset(destBuffer, 0, sizeof(DestBuffer));
+
+			allocationSize = roundpow2(state->totalSourceSize);
+
+			if (decompressedSize > allocationSize) {
+				allocationSize = roundpow2(decompressedSize);
+			}
+
+			destBuffer->dest = malloc(allocationSize);
+			if (NULL == destBuffer->dest) {
+				state->error = WorkerError_memory;
+				return;
+			}
+
+			destBuffer->destSize = allocationSize;
+			destAvailable = allocationSize;
+			destOffset = 0;
+			localOffset = 0;
+
+			destBuffer->segments = calloc(remainingItems, sizeof(BufferSegment));
+			if (NULL == destBuffer->segments) {
+				state->error = WorkerError_memory;
+				return;
+			}
+
+			destBuffer->segmentsSize = remainingItems;
+			currentBufferStartIndex = frameIndex;
+		}
+
+		dest = (char*)destBuffer->dest + destOffset;
+
+		if (state->ddict) {
+			zresult = ZSTD_decompress_usingDDict(state->dctx, dest, decompressedSize,
+				source, sourceSize, state->ddict);
+		}
+		else {
+			zresult = ZSTD_decompressDCtx(state->dctx, dest, decompressedSize,
+				source, sourceSize);
+		}
+
+		if (ZSTD_isError(zresult)) {
+			state->error = WorkerError_zstd;
+			state->zresult = zresult;
+			state->errorOffset = frameIndex;
+			return;
+		}
+		else if (zresult != decompressedSize) {
+			state->error = WorkerError_sizeMismatch;
+			state->zresult = zresult;
+			state->errorOffset = frameIndex;
+			return;
+		}
+
+		destBuffer->segments[localOffset].offset = destOffset;
+		destBuffer->segments[localOffset].length = decompressedSize;
+		destOffset += zresult;
+		localOffset++;
+		remainingItems--;
+	}
+
+	if (destBuffer->destSize > destOffset) {
+		tmpBuf = realloc(destBuffer->dest, destOffset);
+		if (NULL == tmpBuf) {
+			state->error = WorkerError_memory;
+			return;
+		}
+
+		destBuffer->dest = tmpBuf;
+		destBuffer->destSize = destOffset;
+	}
+}
+
+ZstdBufferWithSegmentsCollection* decompress_from_framesources(ZstdDecompressor* decompressor, FrameSources* frames,
+	unsigned int threadCount) {
+	void* dictData = NULL;
+	size_t dictSize = 0;
+	Py_ssize_t i = 0;
+	int errored = 0;
+	Py_ssize_t segmentsCount;
+	ZstdBufferWithSegments* bws = NULL;
+	PyObject* resultArg = NULL;
+	Py_ssize_t resultIndex;
+	ZstdBufferWithSegmentsCollection* result = NULL;
+	FramePointer* framePointers = frames->frames;
+	unsigned long long workerBytes = 0;
+	int currentThread = 0;
+	Py_ssize_t workerStartOffset = 0;
+	POOL_ctx* pool = NULL;
+	WorkerState* workerStates = NULL;
+	unsigned long long bytesPerWorker;
+
+	/* Caller should normalize 0 and negative values to 1 or larger. */
+	assert(threadCount >= 1);
+
+	/* More threads than inputs makes no sense under any conditions. */
+	threadCount = frames->framesSize < threadCount ? (unsigned int)frames->framesSize
+												   : threadCount;
+
+	/* TODO lower thread count if input size is too small and threads would just
+	   add overhead. */
+
+	if (decompressor->dict) {
+		dictData = decompressor->dict->dictData;
+		dictSize = decompressor->dict->dictSize;
+	}
+
+	if (dictData && !decompressor->ddict) {
+		Py_BEGIN_ALLOW_THREADS
+		decompressor->ddict = ZSTD_createDDict_byReference(dictData, dictSize);
+		Py_END_ALLOW_THREADS
+
+		if (!decompressor->ddict) {
+			PyErr_SetString(ZstdError, "could not create decompression dict");
+			return NULL;
+		}
+	}
+
+	/* If threadCount==1, we don't start a thread pool. But we do leverage the
+	   same API for dispatching work. */
+	workerStates = PyMem_Malloc(threadCount * sizeof(WorkerState));
+	if (NULL == workerStates) {
+		PyErr_NoMemory();
+		goto finally;
+	}
+
+	memset(workerStates, 0, threadCount * sizeof(WorkerState));
+
+	if (threadCount > 1) {
+		pool = POOL_create(threadCount, 1);
+		if (NULL == pool) {
+			PyErr_SetString(ZstdError, "could not initialize zstd thread pool");
+			goto finally;
+		}
+	}
+
+	bytesPerWorker = frames->compressedSize / threadCount;
+
+	for (i = 0; i < threadCount; i++) {
+		workerStates[i].dctx = ZSTD_createDCtx();
+		if (NULL == workerStates[i].dctx) {
+			PyErr_NoMemory();
+			goto finally;
+		}
+
+		ZSTD_copyDCtx(workerStates[i].dctx, decompressor->dctx);
+
+		workerStates[i].ddict = decompressor->ddict;
+		workerStates[i].framePointers = framePointers;
+		workerStates[i].requireOutputSizes = 1;
+	}
+
+	Py_BEGIN_ALLOW_THREADS
+	/* There are many ways to split work among workers.
+
+	   For now, we take a simple approach of splitting work so each worker
+	   gets roughly the same number of input bytes. This will result in more
+	   starvation than running N>threadCount jobs. But it avoids complications
+	   around state tracking, which could involve extra locking.
+	*/
+	for (i = 0; i < frames->framesSize; i++) {
+		workerBytes += frames->frames[i].sourceSize;
+
+		/*
+		 * The last worker/thread needs to handle all remaining work. Don't
+		 * trigger it prematurely. Defer to the block outside of the loop.
+		 * (But still process this loop so workerBytes is correct.
+		 */
+		if (currentThread == threadCount - 1) {
+			continue;
+		}
+
+		if (workerBytes >= bytesPerWorker) {
+			workerStates[currentThread].startOffset = workerStartOffset;
+			workerStates[currentThread].endOffset = i;
+			workerStates[currentThread].totalSourceSize = workerBytes;
+
+			if (threadCount > 1) {
+				POOL_add(pool, (POOL_function)decompress_worker, &workerStates[currentThread]);
+			}
+			else {
+				decompress_worker(&workerStates[currentThread]);
+			}
+			currentThread++;
+			workerStartOffset = i + 1;
+			workerBytes = 0;
+		}
+	}
+
+	if (workerBytes) {
+		workerStates[currentThread].startOffset = workerStartOffset;
+		workerStates[currentThread].endOffset = frames->framesSize - 1;
+		workerStates[currentThread].totalSourceSize = workerBytes;
+
+		if (threadCount > 1) {
+			POOL_add(pool, (POOL_function)decompress_worker, &workerStates[currentThread]);
+		}
+		else {
+			decompress_worker(&workerStates[currentThread]);
+		}
+	}
+
+	if (threadCount > 1) {
+		POOL_free(pool);
+		pool = NULL;
+	}
+	Py_END_ALLOW_THREADS
+
+	for (i = 0; i < threadCount; i++) {
+		switch (workerStates[i].error) {
+		case WorkerError_none:
+			break;
+
+		case WorkerError_zstd:
+			PyErr_Format(ZstdError, "error decompressing item %zd: %s",
+				workerStates[i].errorOffset, ZSTD_getErrorName(workerStates[i].zresult));
+			errored = 1;
+			break;
+
+		case WorkerError_memory:
+			PyErr_NoMemory();
+			errored = 1;
+			break;
+
+		case WorkerError_sizeMismatch:
+			PyErr_Format(ZstdError, "error decompressing item %zd: decompressed %zu bytes; expected %llu",
+				workerStates[i].errorOffset, workerStates[i].zresult,
+				framePointers[workerStates[i].errorOffset].destSize);
+			errored = 1;
+			break;
+
+		case WorkerError_unknownSize:
+			PyErr_Format(PyExc_ValueError, "could not determine decompressed size of item %zd",
+				workerStates[i].errorOffset);
+			errored = 1;
+			break;
+
+		default:
+			PyErr_Format(ZstdError, "unhandled error type: %d; this is a bug",
+				workerStates[i].error);
+			errored = 1;
+			break;
+		}
+
+		if (errored) {
+			break;
+		}
+	}
+
+	if (errored) {
+		goto finally;
+	}
+
+	segmentsCount = 0;
+	for (i = 0; i < threadCount; i++) {
+		segmentsCount += workerStates[i].destCount;
+	}
+
+	resultArg = PyTuple_New(segmentsCount);
+	if (NULL == resultArg) {
+		goto finally;
+	}
+
+	resultIndex = 0;
+
+	for (i = 0; i < threadCount; i++) {
+		Py_ssize_t bufferIndex;
+		WorkerState* state = &workerStates[i];
+
+		for (bufferIndex = 0; bufferIndex < state->destCount; bufferIndex++) {
+			DestBuffer* destBuffer = &state->destBuffers[bufferIndex];
+
+			bws = BufferWithSegments_FromMemory(destBuffer->dest, destBuffer->destSize,
+				destBuffer->segments, destBuffer->segmentsSize);
+			if (NULL == bws) {
+				goto finally;
+			}
+
+			/*
+			* Memory for buffer and segments was allocated using malloc() in worker
+			* and the memory is transferred to the BufferWithSegments instance. So
+			* tell instance to use free() and NULL the reference in the state struct
+			* so it isn't freed below.
+			*/
+			bws->useFree = 1;
+			destBuffer->dest = NULL;
+			destBuffer->segments = NULL;
+
+			PyTuple_SET_ITEM(resultArg, resultIndex++, (PyObject*)bws);
+		}
+	}
+
+	result = (ZstdBufferWithSegmentsCollection*)PyObject_CallObject(
+		(PyObject*)&ZstdBufferWithSegmentsCollectionType, resultArg);
+
+finally:
+	Py_CLEAR(resultArg);
+
+	if (workerStates) {
+		for (i = 0; i < threadCount; i++) {
+			Py_ssize_t bufferIndex;
+			WorkerState* state = &workerStates[i];
+
+			if (state->dctx) {
+				ZSTD_freeDCtx(state->dctx);
+			}
+
+			for (bufferIndex = 0; bufferIndex < state->destCount; bufferIndex++) {
+				if (state->destBuffers) {
+					/*
+					* Will be NULL if memory transfered to a BufferWithSegments.
+					* Otherwise it is left over after an error occurred.
+					*/
+					free(state->destBuffers[bufferIndex].dest);
+					free(state->destBuffers[bufferIndex].segments);
+				}
+			}
+
+			free(state->destBuffers);
+		}
+
+		PyMem_Free(workerStates);
+	}
+
+	POOL_free(pool);
+
+	return result;
+}
+
+PyDoc_STRVAR(Decompressor_multi_decompress_to_buffer__doc__,
+"Decompress multiple frames to output buffers\n"
+"\n"
+"Receives a ``BufferWithSegments``, a ``BufferWithSegmentsCollection`` or a\n"
+"list of bytes-like objects. Each item in the passed collection should be a\n"
+"compressed zstd frame.\n"
+"\n"
+"Unless ``decompressed_sizes`` is specified, the content size *must* be\n"
+"written into the zstd frame header. If ``decompressed_sizes`` is specified,\n"
+"it is an object conforming to the buffer protocol that represents an array\n"
+"of 64-bit unsigned integers in the machine's native format. Specifying\n"
+"``decompressed_sizes`` avoids a pre-scan of each frame to determine its\n"
+"output size.\n"
+"\n"
+"Returns a ``BufferWithSegmentsCollection`` containing the decompressed\n"
+"data. All decompressed data is allocated in a single memory buffer. The\n"
+"``BufferWithSegments`` instance tracks which objects are at which offsets\n"
+"and their respective lengths.\n"
+"\n"
+"The ``threads`` argument controls how many threads to use for operations.\n"
+"Negative values will use the same number of threads as logical CPUs on the\n"
+"machine.\n"
+);
+
+static ZstdBufferWithSegmentsCollection* Decompressor_multi_decompress_to_buffer(ZstdDecompressor* self, PyObject* args, PyObject* kwargs) {
+	static char* kwlist[] = {
+		"frames",
+		"decompressed_sizes",
+		"threads",
+		NULL
+	};
+
+	PyObject* frames;
+	Py_buffer frameSizes;
+	int threads = 0;
+	Py_ssize_t frameCount;
+	Py_buffer* frameBuffers = NULL;
+	FramePointer* framePointers = NULL;
+	unsigned long long* frameSizesP = NULL;
+	unsigned long long totalInputSize = 0;
+	FrameSources frameSources;
+	ZstdBufferWithSegmentsCollection* result = NULL;
+	Py_ssize_t i;
+
+	memset(&frameSizes, 0, sizeof(frameSizes));
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|y*i:multi_decompress_to_buffer",
+#else
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|s*i:multi_decompress_to_buffer",
+#endif
+		kwlist, &frames, &frameSizes, &threads)) {
+		return NULL;
+	}
+
+	if (frameSizes.buf) {
+		if (!PyBuffer_IsContiguous(&frameSizes, 'C') || frameSizes.ndim > 1) {
+			PyErr_SetString(PyExc_ValueError, "decompressed_sizes buffer should be contiguous and have a single dimension");
+			goto finally;
+		}
+
+		frameSizesP = (unsigned long long*)frameSizes.buf;
+	}
+
+	if (threads < 0) {
+		threads = cpu_count();
+	}
+
+	if (threads < 2) {
+		threads = 1;
+	}
+
+	if (PyObject_TypeCheck(frames, &ZstdBufferWithSegmentsType)) {
+		ZstdBufferWithSegments* buffer = (ZstdBufferWithSegments*)frames;
+		frameCount = buffer->segmentCount;
+
+		if (frameSizes.buf && frameSizes.len != frameCount * (Py_ssize_t)sizeof(unsigned long long)) {
+			PyErr_Format(PyExc_ValueError, "decompressed_sizes size mismatch; expected %zd, got %zd",
+				frameCount * sizeof(unsigned long long), frameSizes.len);
+			goto finally;
+		}
+
+		framePointers = PyMem_Malloc(frameCount * sizeof(FramePointer));
+		if (!framePointers) {
+			PyErr_NoMemory();
+			goto finally;
+		}
+
+		for (i = 0; i < frameCount; i++) {
+			void* sourceData;
+			unsigned long long sourceSize;
+			unsigned long long decompressedSize = 0;
+
+			if (buffer->segments[i].offset + buffer->segments[i].length > buffer->dataSize) {
+				PyErr_Format(PyExc_ValueError, "item %zd has offset outside memory area", i);
+				goto finally;
+			}
+
+			sourceData = (char*)buffer->data + buffer->segments[i].offset;
+			sourceSize = buffer->segments[i].length;
+			totalInputSize += sourceSize;
+
+			if (frameSizesP) {
+				decompressedSize = frameSizesP[i];
+			}
+
+			framePointers[i].sourceData = sourceData;
+			framePointers[i].sourceSize = sourceSize;
+			framePointers[i].destSize = decompressedSize;
+		}
+	}
+	else if (PyObject_TypeCheck(frames, &ZstdBufferWithSegmentsCollectionType)) {
+		Py_ssize_t offset = 0;
+		ZstdBufferWithSegments* buffer;
+		ZstdBufferWithSegmentsCollection* collection = (ZstdBufferWithSegmentsCollection*)frames;
+
+		frameCount = BufferWithSegmentsCollection_length(collection);
+
+		if (frameSizes.buf && frameSizes.len != frameCount) {
+			PyErr_Format(PyExc_ValueError,
+				"decompressed_sizes size mismatch; expected %zd; got %zd",
+				frameCount * sizeof(unsigned long long), frameSizes.len);
+			goto finally;
+		}
+
+		framePointers = PyMem_Malloc(frameCount * sizeof(FramePointer));
+		if (NULL == framePointers) {
+			PyErr_NoMemory();
+			goto finally;
+		}
+
+		/* Iterate the data structure directly because it is faster. */
+		for (i = 0; i < collection->bufferCount; i++) {
+			Py_ssize_t segmentIndex;
+			buffer = collection->buffers[i];
+
+			for (segmentIndex = 0; segmentIndex < buffer->segmentCount; segmentIndex++) {
+				if (buffer->segments[segmentIndex].offset + buffer->segments[segmentIndex].length > buffer->dataSize) {
+					PyErr_Format(PyExc_ValueError, "item %zd has offset outside memory area",
+						offset);
+					goto finally;
+				}
+
+				totalInputSize += buffer->segments[segmentIndex].length;
+
+				framePointers[offset].sourceData = (char*)buffer->data + buffer->segments[segmentIndex].offset;
+				framePointers[offset].sourceSize = buffer->segments[segmentIndex].length;
+				framePointers[offset].destSize = frameSizesP ? frameSizesP[offset] : 0;
+
+				offset++;
+			}
+		}
+	}
+	else if (PyList_Check(frames)) {
+		frameCount = PyList_GET_SIZE(frames);
+
+		if (frameSizes.buf && frameSizes.len != frameCount * (Py_ssize_t)sizeof(unsigned long long)) {
+			PyErr_Format(PyExc_ValueError, "decompressed_sizes size mismatch; expected %zd, got %zd",
+				frameCount * sizeof(unsigned long long), frameSizes.len);
+			goto finally;
+		}
+
+		framePointers = PyMem_Malloc(frameCount * sizeof(FramePointer));
+		if (!framePointers) {
+			PyErr_NoMemory();
+			goto finally;
+		}
+
+		/*
+		 * It is not clear whether Py_buffer.buf is still valid after
+		 * PyBuffer_Release. So, we hold a reference to all Py_buffer instances
+		 * for the duration of the operation.
+		 */
+		frameBuffers = PyMem_Malloc(frameCount * sizeof(Py_buffer));
+		if (NULL == frameBuffers) {
+			PyErr_NoMemory();
+			goto finally;
+		}
+
+		memset(frameBuffers, 0, frameCount * sizeof(Py_buffer));
+
+		/* Do a pass to assemble info about our input buffers and output sizes. */
+		for (i = 0; i < frameCount; i++) {
+			if (0 != PyObject_GetBuffer(PyList_GET_ITEM(frames, i),
+				&frameBuffers[i], PyBUF_CONTIG_RO)) {
+				PyErr_Clear();
+				PyErr_Format(PyExc_TypeError, "item %zd not a bytes like object", i);
+				goto finally;
+			}
+
+			totalInputSize += frameBuffers[i].len;
+
+			framePointers[i].sourceData = frameBuffers[i].buf;
+			framePointers[i].sourceSize = frameBuffers[i].len;
+			framePointers[i].destSize = frameSizesP ? frameSizesP[i] : 0;
+		}
+	}
+	else {
+		PyErr_SetString(PyExc_TypeError, "argument must be list or BufferWithSegments");
+		goto finally;
+	}
+
+	/* We now have an array with info about our inputs and outputs. Feed it into
+	   our generic decompression function. */
+	frameSources.frames = framePointers;
+	frameSources.framesSize = frameCount;
+	frameSources.compressedSize = totalInputSize;
+
+	result = decompress_from_framesources(self, &frameSources, threads);
+
+finally:
+	if (frameSizes.buf) {
+		PyBuffer_Release(&frameSizes);
+	}
+	PyMem_Free(framePointers);
+
+	if (frameBuffers) {
+		for (i = 0; i < frameCount; i++) {
+			PyBuffer_Release(&frameBuffers[i]);
+		}
+
+		PyMem_Free(frameBuffers);
+	}
 
 	return result;
 }
@@ -616,6 +1520,10 @@
 	Decompressor_read_from__doc__ },
 	{ "write_to", (PyCFunction)Decompressor_write_to, METH_VARARGS | METH_KEYWORDS,
 	Decompressor_write_to__doc__ },
+	{ "decompress_content_dict_chain", (PyCFunction)Decompressor_decompress_content_dict_chain,
+	  METH_VARARGS | METH_KEYWORDS, Decompressor_decompress_content_dict_chain__doc__ },
+	{ "multi_decompress_to_buffer", (PyCFunction)Decompressor_multi_decompress_to_buffer,
+	  METH_VARARGS | METH_KEYWORDS, Decompressor_multi_decompress_to_buffer__doc__ },
 	{ NULL, NULL }
 };
 
--- a/contrib/python-zstandard/c-ext/decompressoriterator.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/decompressoriterator.c	Tue Apr 18 12:24:34 2017 -0400
@@ -26,11 +26,6 @@
 		self->buffer = NULL;
 	}
 
-	if (self->dstream) {
-		ZSTD_freeDStream(self->dstream);
-		self->dstream = NULL;
-	}
-
 	if (self->input.src) {
 		PyMem_Free((void*)self->input.src);
 		self->input.src = NULL;
@@ -50,6 +45,8 @@
 	DecompressorIteratorResult result;
 	size_t oldInputPos = self->input.pos;
 
+	assert(self->decompressor->dstream);
+
 	result.chunk = NULL;
 
 	chunk = PyBytes_FromStringAndSize(NULL, self->outSize);
@@ -63,7 +60,7 @@
 	self->output.pos = 0;
 
 	Py_BEGIN_ALLOW_THREADS
-	zresult = ZSTD_decompressStream(self->dstream, &self->output, &self->input);
+	zresult = ZSTD_decompressStream(self->decompressor->dstream, &self->output, &self->input);
 	Py_END_ALLOW_THREADS
 
 	/* We're done with the pointer. Nullify to prevent anyone from getting a
@@ -160,7 +157,7 @@
 					PyErr_SetString(PyExc_ValueError,
 						"skip_bytes larger than first input chunk; "
 						"this scenario is currently unsupported");
-					Py_DecRef(readResult);
+					Py_XDECREF(readResult);
 					return NULL;
 				}
 
@@ -179,7 +176,7 @@
 		else if (!self->readCount) {
 			self->finishedInput = 1;
 			self->finishedOutput = 1;
-			Py_DecRef(readResult);
+			Py_XDECREF(readResult);
 			PyErr_SetString(PyExc_StopIteration, "empty input");
 			return NULL;
 		}
@@ -188,7 +185,7 @@
 		}
 
 		/* We've copied the data managed by memory. Discard the Python object. */
-		Py_DecRef(readResult);
+		Py_XDECREF(readResult);
 	}
 
 	result = read_decompressor_iterator(self);
--- a/contrib/python-zstandard/c-ext/dictparams.c	Tue Apr 18 11:22:42 2017 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,125 +0,0 @@
-/**
-* Copyright (c) 2016-present, Gregory Szorc
-* All rights reserved.
-*
-* This software may be modified and distributed under the terms
-* of the BSD license. See the LICENSE file for details.
-*/
-
-#include "python-zstandard.h"
-
-PyDoc_STRVAR(DictParameters__doc__,
-"DictParameters: low-level control over dictionary generation");
-
-static PyObject* DictParameters_new(PyTypeObject* subtype, PyObject* args, PyObject* kwargs) {
-	DictParametersObject* self;
-	unsigned selectivityLevel;
-	int compressionLevel;
-	unsigned notificationLevel;
-	unsigned dictID;
-
-	if (!PyArg_ParseTuple(args, "IiII", &selectivityLevel, &compressionLevel,
-		&notificationLevel, &dictID)) {
-		return NULL;
-	}
-
-	self = (DictParametersObject*)subtype->tp_alloc(subtype, 1);
-	if (!self) {
-		return NULL;
-	}
-
-	self->selectivityLevel = selectivityLevel;
-	self->compressionLevel = compressionLevel;
-	self->notificationLevel = notificationLevel;
-	self->dictID = dictID;
-
-	return (PyObject*)self;
-}
-
-static void DictParameters_dealloc(PyObject* self) {
-	PyObject_Del(self);
-}
-
-static Py_ssize_t DictParameters_length(PyObject* self) {
-	return 4;
-}
-
-static PyObject* DictParameters_item(PyObject* o, Py_ssize_t i) {
-	DictParametersObject* self = (DictParametersObject*)o;
-
-	switch (i) {
-	case 0:
-		return PyLong_FromLong(self->selectivityLevel);
-	case 1:
-		return PyLong_FromLong(self->compressionLevel);
-	case 2:
-		return PyLong_FromLong(self->notificationLevel);
-	case 3:
-		return PyLong_FromLong(self->dictID);
-	default:
-		PyErr_SetString(PyExc_IndexError, "index out of range");
-		return NULL;
-	}
-}
-
-static PySequenceMethods DictParameters_sq = {
-	DictParameters_length, /* sq_length */
-	0,	                   /* sq_concat */
-	0,                     /* sq_repeat */
-	DictParameters_item,   /* sq_item */
-	0,                     /* sq_ass_item */
-	0,                     /* sq_contains */
-	0,                     /* sq_inplace_concat */
-	0                      /* sq_inplace_repeat */
-};
-
-PyTypeObject DictParametersType = {
-	PyVarObject_HEAD_INIT(NULL, 0)
-	"DictParameters", /* tp_name */
-	sizeof(DictParametersObject), /* tp_basicsize */
-	0,                         /* tp_itemsize */
-	(destructor)DictParameters_dealloc, /* tp_dealloc */
-	0,                         /* tp_print */
-	0,                         /* tp_getattr */
-	0,                         /* tp_setattr */
-	0,                         /* tp_compare */
-	0,                         /* tp_repr */
-	0,                         /* tp_as_number */
-	&DictParameters_sq,        /* tp_as_sequence */
-	0,                         /* tp_as_mapping */
-	0,                         /* tp_hash  */
-	0,                         /* tp_call */
-	0,                         /* tp_str */
-	0,                         /* tp_getattro */
-	0,                         /* tp_setattro */
-	0,                         /* tp_as_buffer */
-	Py_TPFLAGS_DEFAULT,        /* tp_flags */
-	DictParameters__doc__,     /* tp_doc */
-	0,                         /* tp_traverse */
-	0,                         /* tp_clear */
-	0,                         /* tp_richcompare */
-	0,                         /* tp_weaklistoffset */
-	0,                         /* tp_iter */
-	0,                         /* tp_iternext */
-	0,                         /* tp_methods */
-	0,                         /* tp_members */
-	0,                         /* tp_getset */
-	0,                         /* tp_base */
-	0,                         /* tp_dict */
-	0,                         /* tp_descr_get */
-	0,                         /* tp_descr_set */
-	0,                         /* tp_dictoffset */
-	0,                         /* tp_init */
-	0,                         /* tp_alloc */
-	DictParameters_new,        /* tp_new */
-};
-
-void dictparams_module_init(PyObject* mod) {
-	Py_TYPE(&DictParametersType) = &PyType_Type;
-	if (PyType_Ready(&DictParametersType) < 0) {
-		return;
-	}
-
-	Py_IncRef((PyObject*)&DictParametersType);
-	PyModule_AddObject(mod, "DictParameters", (PyObject*)&DictParametersType);
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/c-ext/frameparams.c	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,132 @@
+/**
+* Copyright (c) 2017-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+PyDoc_STRVAR(FrameParameters__doc__,
+	"FrameParameters: information about a zstd frame");
+
+FrameParametersObject* get_frame_parameters(PyObject* self, PyObject* args) {
+	const char* source;
+	Py_ssize_t sourceSize;
+	ZSTD_frameParams params;
+	FrameParametersObject* result = NULL;
+	size_t zresult;
+
+#if PY_MAJOR_VERSION >= 3
+	if (!PyArg_ParseTuple(args, "y#:get_frame_parameters",
+#else
+	if (!PyArg_ParseTuple(args, "s#:get_frame_parameters",
+#endif
+		&source, &sourceSize)) {
+		return NULL;
+	}
+
+	/* Needed for Python 2 to reject unicode */
+	if (!PyBytes_Check(PyTuple_GET_ITEM(args, 0))) {
+		PyErr_SetString(PyExc_TypeError, "argument must be bytes");
+		return NULL;
+	}
+
+	zresult = ZSTD_getFrameParams(&params, (void*)source, sourceSize);
+
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "cannot get frame parameters: %s", ZSTD_getErrorName(zresult));
+		return NULL;
+	}
+
+	if (zresult) {
+		PyErr_Format(ZstdError, "not enough data for frame parameters; need %zu bytes", zresult);
+		return NULL;
+	}
+
+	result = PyObject_New(FrameParametersObject, &FrameParametersType);
+	if (!result) {
+		return NULL;
+	}
+
+	result->frameContentSize = params.frameContentSize;
+	result->windowSize = params.windowSize;
+	result->dictID = params.dictID;
+	result->checksumFlag = params.checksumFlag ? 1 : 0;
+
+	return result;
+}
+
+static void FrameParameters_dealloc(PyObject* self) {
+	PyObject_Del(self);
+}
+
+static PyMemberDef FrameParameters_members[] = {
+	{ "content_size", T_ULONGLONG,
+	  offsetof(FrameParametersObject, frameContentSize), READONLY,
+	  "frame content size" },
+	{ "window_size", T_UINT,
+	  offsetof(FrameParametersObject, windowSize), READONLY,
+	  "window size" },
+	{ "dict_id", T_UINT,
+	  offsetof(FrameParametersObject, dictID), READONLY,
+	  "dictionary ID" },
+	{ "has_checksum", T_BOOL,
+	  offsetof(FrameParametersObject, checksumFlag), READONLY,
+	  "checksum flag" },
+	{ NULL }
+};
+
+PyTypeObject FrameParametersType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"FrameParameters",          /* tp_name */
+	sizeof(FrameParametersObject), /* tp_basicsize */
+	0,                         /* tp_itemsize */
+	(destructor)FrameParameters_dealloc, /* tp_dealloc */
+	0,                         /* tp_print */
+	0,                         /* tp_getattr */
+	0,                         /* tp_setattr */
+	0,                         /* tp_compare */
+	0,                         /* tp_repr */
+	0,                         /* tp_as_number */
+	0,                         /* tp_as_sequence */
+	0,                         /* tp_as_mapping */
+	0,                         /* tp_hash  */
+	0,                         /* tp_call */
+	0,                         /* tp_str */
+	0,                         /* tp_getattro */
+	0,                         /* tp_setattro */
+	0,                         /* tp_as_buffer */
+	Py_TPFLAGS_DEFAULT,        /* tp_flags */
+	FrameParameters__doc__,    /* tp_doc */
+	0,                         /* tp_traverse */
+	0,                         /* tp_clear */
+	0,                         /* tp_richcompare */
+	0,                         /* tp_weaklistoffset */
+	0,                         /* tp_iter */
+	0,                         /* tp_iternext */
+	0,                         /* tp_methods */
+	FrameParameters_members,   /* tp_members */
+	0,                         /* tp_getset */
+	0,                         /* tp_base */
+	0,                         /* tp_dict */
+	0,                         /* tp_descr_get */
+	0,                         /* tp_descr_set */
+	0,                         /* tp_dictoffset */
+	0,                         /* tp_init */
+	0,                         /* tp_alloc */
+	0,                         /* tp_new */
+};
+
+void frameparams_module_init(PyObject* mod) {
+	Py_TYPE(&FrameParametersType) = &PyType_Type;
+	if (PyType_Ready(&FrameParametersType) < 0) {
+		return;
+	}
+
+	Py_INCREF(&FrameParametersType);
+	PyModule_AddObject(mod, "FrameParameters", (PyObject*)&FrameParametersType);
+}
--- a/contrib/python-zstandard/c-ext/python-zstandard.h	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/c-ext/python-zstandard.h	Tue Apr 18 12:24:34 2017 -0400
@@ -8,20 +8,27 @@
 
 #define PY_SSIZE_T_CLEAN
 #include <Python.h>
+#include "structmember.h"
 
 #define ZSTD_STATIC_LINKING_ONLY
 #define ZDICT_STATIC_LINKING_ONLY
 #include "mem.h"
 #include "zstd.h"
 #include "zdict.h"
+#include "zstdmt_compress.h"
 
-#define PYTHON_ZSTANDARD_VERSION "0.6.0"
+#define PYTHON_ZSTANDARD_VERSION "0.8.1"
 
 typedef enum {
 	compressorobj_flush_finish,
 	compressorobj_flush_block,
 } CompressorObj_Flush;
 
+/*
+   Represents a CompressionParameters type.
+
+   This type is basically a wrapper around ZSTD_compressionParameters.
+*/
 typedef struct {
 	PyObject_HEAD
 	unsigned windowLog;
@@ -35,34 +42,70 @@
 
 extern PyTypeObject CompressionParametersType;
 
+/*
+   Represents a FrameParameters type.
+
+   This type is basically a wrapper around ZSTD_frameParams.
+*/
 typedef struct {
 	PyObject_HEAD
-	unsigned selectivityLevel;
-	int compressionLevel;
-	unsigned notificationLevel;
+	unsigned long long frameContentSize;
+	unsigned windowSize;
 	unsigned dictID;
-} DictParametersObject;
+	char checksumFlag;
+} FrameParametersObject;
+
+extern PyTypeObject FrameParametersType;
 
-extern PyTypeObject DictParametersType;
+/*
+   Represents a ZstdCompressionDict type.
 
+   Instances hold data used for a zstd compression dictionary.
+*/
 typedef struct {
 	PyObject_HEAD
 
+	/* Pointer to dictionary data. Owned by self. */
 	void* dictData;
+	/* Size of dictionary data. */
 	size_t dictSize;
+	/* k parameter for cover dictionaries. Only populated by train_cover_dict(). */
+	unsigned k;
+	/* d parameter for cover dictionaries. Only populated by train_cover_dict(). */
+	unsigned d;
 } ZstdCompressionDict;
 
 extern PyTypeObject ZstdCompressionDictType;
 
+/*
+   Represents a ZstdCompressor type.
+*/
 typedef struct {
 	PyObject_HEAD
 
+	/* Configured compression level. Should be always set. */
 	int compressionLevel;
+	/* Number of threads to use for operations. */
+	unsigned int threads;
+	/* Pointer to compression dictionary to use. NULL if not using dictionary
+	   compression. */
 	ZstdCompressionDict* dict;
+	/* Compression context to use. Populated during object construction. NULL
+	   if using multi-threaded compression. */
 	ZSTD_CCtx* cctx;
+	/* Multi-threaded compression context to use. Populated during object
+	   construction. NULL if not using multi-threaded compression. */
+	ZSTDMT_CCtx* mtcctx;
+	/* Digest compression dictionary. NULL initially. Populated on first use. */
 	ZSTD_CDict* cdict;
+	/* Low-level compression parameter control. NULL unless passed to
+	   constructor. Takes precedence over `compressionLevel` if defined. */
 	CompressionParametersObject* cparams;
+	/* Controls zstd frame options. */
 	ZSTD_frameParameters fparams;
+	/* Holds state for streaming compression. Shared across all invocation.
+	   Populated on first use. */
+	ZSTD_CStream* cstream;
 } ZstdCompressor;
 
 extern PyTypeObject ZstdCompressorType;
@@ -71,7 +114,6 @@
 	PyObject_HEAD
 
 	ZstdCompressor* compressor;
-	ZSTD_CStream* cstream;
 	ZSTD_outBuffer output;
 	int finished;
 } ZstdCompressionObj;
@@ -85,7 +127,6 @@
 	PyObject* writer;
 	Py_ssize_t sourceSize;
 	size_t outSize;
-	ZSTD_CStream* cstream;
 	int entered;
 } ZstdCompressionWriter;
 
@@ -102,7 +143,6 @@
 	size_t inSize;
 	size_t outSize;
 
-	ZSTD_CStream* cstream;
 	ZSTD_inBuffer input;
 	ZSTD_outBuffer output;
 	int finishedOutput;
@@ -115,10 +155,11 @@
 typedef struct {
 	PyObject_HEAD
 
-	ZSTD_DCtx* refdctx;
+	ZSTD_DCtx* dctx;
 
 	ZstdCompressionDict* dict;
 	ZSTD_DDict* ddict;
+	ZSTD_DStream* dstream;
 } ZstdDecompressor;
 
 extern PyTypeObject ZstdDecompressorType;
@@ -127,7 +168,6 @@
 	PyObject_HEAD
 
 	ZstdDecompressor* decompressor;
-	ZSTD_DStream* dstream;
 	int finished;
 } ZstdDecompressionObj;
 
@@ -139,7 +179,6 @@
 	ZstdDecompressor* decompressor;
 	PyObject* writer;
 	size_t outSize;
-	ZSTD_DStream* dstream;
 	int entered;
 } ZstdDecompressionWriter;
 
@@ -155,7 +194,6 @@
 	size_t inSize;
 	size_t outSize;
 	size_t skipBytes;
-	ZSTD_DStream* dstream;
 	ZSTD_inBuffer input;
 	ZSTD_outBuffer output;
 	Py_ssize_t readCount;
@@ -170,9 +208,78 @@
 	PyObject* chunk;
 } DecompressorIteratorResult;
 
+typedef struct {
+	unsigned long long offset;
+	unsigned long long length;
+} BufferSegment;
+
+typedef struct {
+	PyObject_HEAD
+
+	PyObject* parent;
+	BufferSegment* segments;
+	Py_ssize_t segmentCount;
+} ZstdBufferSegments;
+
+extern PyTypeObject ZstdBufferSegmentsType;
+
+typedef struct {
+	PyObject_HEAD
+
+	PyObject* parent;
+	void* data;
+	Py_ssize_t dataSize;
+	unsigned long long offset;
+} ZstdBufferSegment;
+
+extern PyTypeObject ZstdBufferSegmentType;
+
+typedef struct {
+	PyObject_HEAD
+
+	Py_buffer parent;
+	void* data;
+	unsigned long long dataSize;
+	BufferSegment* segments;
+	Py_ssize_t segmentCount;
+	int useFree;
+} ZstdBufferWithSegments;
+
+extern PyTypeObject ZstdBufferWithSegmentsType;
+
+/**
+ * An ordered collection of BufferWithSegments exposed as a squashed collection.
+ *
+ * This type provides a virtual view spanning multiple BufferWithSegments
+ * instances. It allows multiple instances to be "chained" together and
+ * exposed as a single collection. e.g. if there are 2 buffers holding
+ * 10 segments each, then o[14] will access the 5th segment in the 2nd buffer.
+ */
+typedef struct {
+	PyObject_HEAD
+
+	/* An array of buffers that should be exposed through this instance. */
+	ZstdBufferWithSegments** buffers;
+	/* Number of elements in buffers array. */
+	Py_ssize_t bufferCount;
+	/* Array of first offset in each buffer instance. 0th entry corresponds
+	   to number of elements in the 0th buffer. 1st entry corresponds to the
+	   sum of elements in 0th and 1st buffers. */
+	Py_ssize_t* firstElements;
+} ZstdBufferWithSegmentsCollection;
+
+extern PyTypeObject ZstdBufferWithSegmentsCollectionType;
+
 void ztopy_compression_parameters(CompressionParametersObject* params, ZSTD_compressionParameters* zparams);
 CompressionParametersObject* get_compression_parameters(PyObject* self, PyObject* args);
+FrameParametersObject* get_frame_parameters(PyObject* self, PyObject* args);
 PyObject* estimate_compression_context_size(PyObject* self, PyObject* args);
-ZSTD_CStream* CStream_from_ZstdCompressor(ZstdCompressor* compressor, Py_ssize_t sourceSize);
-ZSTD_DStream* DStream_from_ZstdDecompressor(ZstdDecompressor* decompressor);
+int init_cstream(ZstdCompressor* compressor, unsigned long long sourceSize);
+int init_mtcstream(ZstdCompressor* compressor, Py_ssize_t sourceSize);
+int init_dstream(ZstdDecompressor* decompressor);
 ZstdCompressionDict* train_dictionary(PyObject* self, PyObject* args, PyObject* kwargs);
+ZstdCompressionDict* train_cover_dictionary(PyObject* self, PyObject* args, PyObject* kwargs);
+ZstdBufferWithSegments* BufferWithSegments_FromMemory(void* data, unsigned long long dataSize, BufferSegment* segments, Py_ssize_t segmentsSize);
+Py_ssize_t BufferWithSegmentsCollection_length(ZstdBufferWithSegmentsCollection*);
+int cpu_count(void);
+size_t roundpow2(size_t);
--- a/contrib/python-zstandard/make_cffi.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/make_cffi.py	Tue Apr 18 12:24:34 2017 -0400
@@ -9,6 +9,7 @@
 import cffi
 import distutils.ccompiler
 import os
+import re
 import subprocess
 import tempfile
 
@@ -19,17 +20,28 @@
     'common/entropy_common.c',
     'common/error_private.c',
     'common/fse_decompress.c',
+    'common/pool.c',
+    'common/threading.c',
     'common/xxhash.c',
     'common/zstd_common.c',
     'compress/fse_compress.c',
     'compress/huf_compress.c',
     'compress/zstd_compress.c',
+    'compress/zstdmt_compress.c',
     'decompress/huf_decompress.c',
     'decompress/zstd_decompress.c',
+    'dictBuilder/cover.c',
     'dictBuilder/divsufsort.c',
     'dictBuilder/zdict.c',
 )]
 
+# Headers whose preprocessed output will be fed into cdef().
+HEADERS = [os.path.join(HERE, 'zstd', *p) for p in (
+    ('zstd.h',),
+    ('compress', 'zstdmt_compress.h'),
+    ('dictBuilder', 'zdict.h'),
+)]
+
 INCLUDE_DIRS = [os.path.join(HERE, d) for d in (
     'zstd',
     'zstd/common',
@@ -53,56 +65,123 @@
     args.extend([
         '-E',
         '-DZSTD_STATIC_LINKING_ONLY',
+        '-DZDICT_STATIC_LINKING_ONLY',
     ])
 elif compiler.compiler_type == 'msvc':
     args = [compiler.cc]
     args.extend([
         '/EP',
         '/DZSTD_STATIC_LINKING_ONLY',
+        '/DZDICT_STATIC_LINKING_ONLY',
     ])
 else:
     raise Exception('unsupported compiler type: %s' % compiler.compiler_type)
 
-# zstd.h includes <stddef.h>, which is also included by cffi's boilerplate.
-# This can lead to duplicate declarations. So we strip this include from the
-# preprocessor invocation.
-
-with open(os.path.join(HERE, 'zstd', 'zstd.h'), 'rb') as fh:
-    lines = [l for l in fh if not l.startswith(b'#include <stddef.h>')]
-
-fd, input_file = tempfile.mkstemp(suffix='.h')
-os.write(fd, b''.join(lines))
-os.close(fd)
+def preprocess(path):
+    with open(path, 'rb') as fh:
+        lines = []
+        for l in fh:
+            # zstd.h includes <stddef.h>, which is also included by cffi's
+            # boilerplate. This can lead to duplicate declarations. So we strip
+            # this include from the preprocessor invocation.
+            #
+            # The same things happens for including zstd.h, so give it the same
+            # treatment.
+            #
+            # We define ZSTD_STATIC_LINKING_ONLY, which is redundant with the inline
+            # #define in zstdmt_compress.h and results in a compiler warning. So drop
+            # the inline #define.
+            if l.startswith((b'#include <stddef.h>',
+                             b'#include "zstd.h"',
+                             b'#define ZSTD_STATIC_LINKING_ONLY')):
+                continue
 
-args.append(input_file)
+            # ZSTDLIB_API may not be defined if we dropped zstd.h. It isn't
+            # important so just filter it out.
+            if l.startswith(b'ZSTDLIB_API'):
+                l = l[len(b'ZSTDLIB_API '):]
+
+            lines.append(l)
+
+    fd, input_file = tempfile.mkstemp(suffix='.h')
+    os.write(fd, b''.join(lines))
+    os.close(fd)
 
-try:
-    process = subprocess.Popen(args, stdout=subprocess.PIPE)
-    output = process.communicate()[0]
-    ret = process.poll()
-    if ret:
-        raise Exception('preprocessor exited with error')
-finally:
-    os.unlink(input_file)
+    try:
+        process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE)
+        output = process.communicate()[0]
+        ret = process.poll()
+        if ret:
+            raise Exception('preprocessor exited with error')
 
-def normalize_output():
+        return output
+    finally:
+        os.unlink(input_file)
+
+
+def normalize_output(output):
     lines = []
     for line in output.splitlines():
         # CFFI's parser doesn't like __attribute__ on UNIX compilers.
         if line.startswith(b'__attribute__ ((visibility ("default"))) '):
             line = line[len(b'__attribute__ ((visibility ("default"))) '):]
 
+        if line.startswith(b'__attribute__((deprecated('):
+            continue
+        elif b'__declspec(deprecated(' in line:
+            continue
+
         lines.append(line)
 
     return b'\n'.join(lines)
 
+
 ffi = cffi.FFI()
+# *_DISABLE_DEPRECATE_WARNINGS prevents the compiler from emitting a warning
+# when cffi uses the function. Since we statically link against zstd, even
+# if we use the deprecated functions it shouldn't be a huge problem.
 ffi.set_source('_zstd_cffi', '''
+#include "mem.h"
 #define ZSTD_STATIC_LINKING_ONLY
 #include "zstd.h"
+#define ZDICT_STATIC_LINKING_ONLY
+#define ZDICT_DISABLE_DEPRECATE_WARNINGS
+#include "zdict.h"
+#include "zstdmt_compress.h"
 ''', sources=SOURCES, include_dirs=INCLUDE_DIRS)
 
-ffi.cdef(normalize_output().decode('latin1'))
+DEFINE = re.compile(b'^\\#define ([a-zA-Z0-9_]+) ')
+
+sources = []
+
+# Feed normalized preprocessor output for headers into the cdef parser.
+for header in HEADERS:
+    preprocessed = preprocess(header)
+    sources.append(normalize_output(preprocessed))
+
+    # #define's are effectively erased as part of going through preprocessor.
+    # So perform a manual pass to re-add those to the cdef source.
+    with open(header, 'rb') as fh:
+        for line in fh:
+            line = line.strip()
+            m = DEFINE.match(line)
+            if not m:
+                continue
+
+            if m.group(1) == b'ZSTD_STATIC_LINKING_ONLY':
+                continue
+
+            # The parser doesn't like some constants with complex values.
+            if m.group(1) in (b'ZSTD_LIB_VERSION', b'ZSTD_VERSION_STRING'):
+                continue
+
+            # The ... is magic syntax by the cdef parser to resolve the
+            # value at compile time.
+            sources.append(m.group(0) + b' ...')
+
+cdeflines = b'\n'.join(sources).splitlines()
+cdeflines = [l for l in cdeflines if l.strip()]
+ffi.cdef(b'\n'.join(cdeflines).decode('latin1'))
 
 if __name__ == '__main__':
     ffi.compile()
--- a/contrib/python-zstandard/setup.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/setup.py	Tue Apr 18 12:24:34 2017 -0400
@@ -25,10 +25,15 @@
 # facilitate reuse in other projects.
 extensions = [setup_zstd.get_c_extension(SUPPORT_LEGACY, 'zstd')]
 
+install_requires = []
+
 if cffi:
     import make_cffi
     extensions.append(make_cffi.ffi.distutils_extension())
 
+    # Need change in 1.8 for ffi.from_buffer() behavior.
+    install_requires.append('cffi>=1.8')
+
 version = None
 
 with open('c-ext/python-zstandard.h', 'r') as fh:
@@ -62,8 +67,10 @@
         'Programming Language :: Python :: 3.3',
         'Programming Language :: Python :: 3.4',
         'Programming Language :: Python :: 3.5',
+        'Programming Language :: Python :: 3.6',
     ],
     keywords='zstandard zstd compression',
     ext_modules=extensions,
     test_suite='tests',
+    install_requires=install_requires,
 )
--- a/contrib/python-zstandard/setup_zstd.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/setup_zstd.py	Tue Apr 18 12:24:34 2017 -0400
@@ -12,18 +12,23 @@
     'common/entropy_common.c',
     'common/error_private.c',
     'common/fse_decompress.c',
+    'common/pool.c',
+    'common/threading.c',
     'common/xxhash.c',
     'common/zstd_common.c',
     'compress/fse_compress.c',
     'compress/huf_compress.c',
     'compress/zstd_compress.c',
+    'compress/zstdmt_compress.c',
     'decompress/huf_decompress.c',
     'decompress/zstd_decompress.c',
+    'dictBuilder/cover.c',
     'dictBuilder/divsufsort.c',
     'dictBuilder/zdict.c',
 )]
 
 zstd_sources_legacy = ['zstd/%s' % p for p in (
+    'deprecated/zbuff_common.c',
     'deprecated/zbuff_compress.c',
     'deprecated/zbuff_decompress.c',
     'legacy/zstd_v01.c',
@@ -51,6 +56,7 @@
 
 ext_sources = [
     'zstd.c',
+    'c-ext/bufferutil.c',
     'c-ext/compressiondict.c',
     'c-ext/compressobj.c',
     'c-ext/compressor.c',
@@ -62,7 +68,7 @@
     'c-ext/decompressor.c',
     'c-ext/decompressoriterator.c',
     'c-ext/decompressionwriter.c',
-    'c-ext/dictparams.c',
+    'c-ext/frameparams.c',
 ]
 
 zstd_depends = [
@@ -84,8 +90,13 @@
 
     depends = [os.path.join(root, p) for p in zstd_depends]
 
+    extra_args = ['-DZSTD_MULTITHREAD']
+
+    if support_legacy:
+        extra_args.append('-DZSTD_LEGACY_SUPPORT=1')
+
     # TODO compile with optimizations.
     return Extension(name, sources,
                      include_dirs=include_dirs,
                      depends=depends,
-                     extra_compile_args=["-DZSTD_LEGACY_SUPPORT=1"] if support_legacy else [])
+                     extra_compile_args=extra_args)
--- a/contrib/python-zstandard/tests/common.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/tests/common.py	Tue Apr 18 12:24:34 2017 -0400
@@ -1,4 +1,51 @@
+import inspect
 import io
+import os
+import types
+
+
+def make_cffi(cls):
+    """Decorator to add CFFI versions of each test method."""
+
+    try:
+        import zstd_cffi
+    except ImportError:
+        return cls
+
+    # If CFFI version is available, dynamically construct test methods
+    # that use it.
+
+    for attr in dir(cls):
+        fn = getattr(cls, attr)
+        if not inspect.ismethod(fn) and not inspect.isfunction(fn):
+            continue
+
+        if not fn.__name__.startswith('test_'):
+            continue
+
+        name = '%s_cffi' % fn.__name__
+
+        # Replace the "zstd" symbol with the CFFI module instance. Then copy
+        # the function object and install it in a new attribute.
+        if isinstance(fn, types.FunctionType):
+            globs = dict(fn.__globals__)
+            globs['zstd'] = zstd_cffi
+            new_fn = types.FunctionType(fn.__code__, globs, name,
+                                        fn.__defaults__, fn.__closure__)
+            new_method = new_fn
+        else:
+            globs = dict(fn.__func__.func_globals)
+            globs['zstd'] = zstd_cffi
+            new_fn = types.FunctionType(fn.__func__.func_code, globs, name,
+                                        fn.__func__.func_defaults,
+                                        fn.__func__.func_closure)
+            new_method = types.UnboundMethodType(new_fn, fn.im_self,
+                                                 fn.im_class)
+
+        setattr(cls, name, new_method)
+
+    return cls
+
 
 class OpCountingBytesIO(io.BytesIO):
     def __init__(self, *args, **kwargs):
@@ -13,3 +60,29 @@
     def write(self, data):
         self._write_count += 1
         return super(OpCountingBytesIO, self).write(data)
+
+
+_source_files = []
+
+
+def random_input_data():
+    """Obtain the raw content of source files.
+
+    This is used for generating "random" data to feed into fuzzing, since it is
+    faster than random content generation.
+    """
+    if _source_files:
+        return _source_files
+
+    for root, dirs, files in os.walk(os.path.dirname(__file__)):
+        dirs[:] = list(sorted(dirs))
+        for f in sorted(files):
+            try:
+                with open(os.path.join(root, f), 'rb') as fh:
+                    data = fh.read()
+                    if data:
+                        _source_files.append(data)
+            except OSError:
+                pass
+
+    return _source_files
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_buffer_util.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,112 @@
+import struct
+
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+import zstd
+
+ss = struct.Struct('=QQ')
+
+
+class TestBufferWithSegments(unittest.TestCase):
+    def test_arguments(self):
+        with self.assertRaises(TypeError):
+            zstd.BufferWithSegments()
+
+        with self.assertRaises(TypeError):
+            zstd.BufferWithSegments(b'foo')
+
+        # Segments data should be a multiple of 16.
+        with self.assertRaisesRegexp(ValueError, 'segments array size is not a multiple of 16'):
+            zstd.BufferWithSegments(b'foo', b'\x00\x00')
+
+    def test_invalid_offset(self):
+        with self.assertRaisesRegexp(ValueError, 'offset within segments array references memory'):
+            zstd.BufferWithSegments(b'foo', ss.pack(0, 4))
+
+    def test_invalid_getitem(self):
+        b = zstd.BufferWithSegments(b'foo', ss.pack(0, 3))
+
+        with self.assertRaisesRegexp(IndexError, 'offset must be non-negative'):
+            test = b[-10]
+
+        with self.assertRaisesRegexp(IndexError, 'offset must be less than 1'):
+            test = b[1]
+
+        with self.assertRaisesRegexp(IndexError, 'offset must be less than 1'):
+            test = b[2]
+
+    def test_single(self):
+        b = zstd.BufferWithSegments(b'foo', ss.pack(0, 3))
+        self.assertEqual(len(b), 1)
+        self.assertEqual(b.size, 3)
+        self.assertEqual(b.tobytes(), b'foo')
+
+        self.assertEqual(len(b[0]), 3)
+        self.assertEqual(b[0].offset, 0)
+        self.assertEqual(b[0].tobytes(), b'foo')
+
+    def test_multiple(self):
+        b = zstd.BufferWithSegments(b'foofooxfooxy', b''.join([ss.pack(0, 3),
+                                                               ss.pack(3, 4),
+                                                               ss.pack(7, 5)]))
+        self.assertEqual(len(b), 3)
+        self.assertEqual(b.size, 12)
+        self.assertEqual(b.tobytes(), b'foofooxfooxy')
+
+        self.assertEqual(b[0].tobytes(), b'foo')
+        self.assertEqual(b[1].tobytes(), b'foox')
+        self.assertEqual(b[2].tobytes(), b'fooxy')
+
+
+class TestBufferWithSegmentsCollection(unittest.TestCase):
+    def test_empty_constructor(self):
+        with self.assertRaisesRegexp(ValueError, 'must pass at least 1 argument'):
+            zstd.BufferWithSegmentsCollection()
+
+    def test_argument_validation(self):
+        with self.assertRaisesRegexp(TypeError, 'arguments must be BufferWithSegments'):
+            zstd.BufferWithSegmentsCollection(None)
+
+        with self.assertRaisesRegexp(TypeError, 'arguments must be BufferWithSegments'):
+            zstd.BufferWithSegmentsCollection(zstd.BufferWithSegments(b'foo', ss.pack(0, 3)),
+                                              None)
+
+        with self.assertRaisesRegexp(ValueError, 'ZstdBufferWithSegments cannot be empty'):
+            zstd.BufferWithSegmentsCollection(zstd.BufferWithSegments(b'', b''))
+
+    def test_length(self):
+        b1 = zstd.BufferWithSegments(b'foo', ss.pack(0, 3))
+        b2 = zstd.BufferWithSegments(b'barbaz', b''.join([ss.pack(0, 3),
+                                                          ss.pack(3, 3)]))
+
+        c = zstd.BufferWithSegmentsCollection(b1)
+        self.assertEqual(len(c), 1)
+        self.assertEqual(c.size(), 3)
+
+        c = zstd.BufferWithSegmentsCollection(b2)
+        self.assertEqual(len(c), 2)
+        self.assertEqual(c.size(), 6)
+
+        c = zstd.BufferWithSegmentsCollection(b1, b2)
+        self.assertEqual(len(c), 3)
+        self.assertEqual(c.size(), 9)
+
+    def test_getitem(self):
+        b1 = zstd.BufferWithSegments(b'foo', ss.pack(0, 3))
+        b2 = zstd.BufferWithSegments(b'barbaz', b''.join([ss.pack(0, 3),
+                                                          ss.pack(3, 3)]))
+
+        c = zstd.BufferWithSegmentsCollection(b1, b2)
+
+        with self.assertRaisesRegexp(IndexError, 'offset must be less than 3'):
+            c[3]
+
+        with self.assertRaisesRegexp(IndexError, 'offset must be less than 3'):
+            c[4]
+
+        self.assertEqual(c[0].tobytes(), b'foo')
+        self.assertEqual(c[1].tobytes(), b'bar')
+        self.assertEqual(c[2].tobytes(), b'baz')
--- a/contrib/python-zstandard/tests/test_cffi.py	Tue Apr 18 11:22:42 2017 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-import io
-
-try:
-    import unittest2 as unittest
-except ImportError:
-    import unittest
-
-import zstd
-
-try:
-    import zstd_cffi
-except ImportError:
-    raise unittest.SkipTest('cffi version of zstd not available')
-
-
-class TestCFFIWriteToToCDecompressor(unittest.TestCase):
-    def test_simple(self):
-        orig = io.BytesIO()
-        orig.write(b'foo')
-        orig.write(b'bar')
-        orig.write(b'foobar' * 16384)
-
-        dest = io.BytesIO()
-        cctx = zstd_cffi.ZstdCompressor()
-        with cctx.write_to(dest) as compressor:
-            compressor.write(orig.getvalue())
-
-        uncompressed = io.BytesIO()
-        dctx = zstd.ZstdDecompressor()
-        with dctx.write_to(uncompressed) as decompressor:
-            decompressor.write(dest.getvalue())
-
-        self.assertEqual(uncompressed.getvalue(), orig.getvalue())
-
-
--- a/contrib/python-zstandard/tests/test_compressor.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/tests/test_compressor.py	Tue Apr 18 12:24:34 2017 -0400
@@ -10,7 +10,10 @@
 
 import zstd
 
-from .common import OpCountingBytesIO
+from .common import (
+    make_cffi,
+    OpCountingBytesIO,
+)
 
 
 if sys.version_info[0] >= 3:
@@ -19,6 +22,13 @@
     next = lambda it: it.next()
 
 
+def multithreaded_chunk_size(level, source_size=0):
+    params = zstd.get_compression_parameters(level, source_size)
+
+    return 1 << (params.window_log + 2)
+
+
+@make_cffi
 class TestCompressor(unittest.TestCase):
     def test_level_bounds(self):
         with self.assertRaises(ValueError):
@@ -28,18 +38,35 @@
             zstd.ZstdCompressor(level=23)
 
 
+@make_cffi
 class TestCompressor_compress(unittest.TestCase):
-    def test_compress_empty(self):
-        cctx = zstd.ZstdCompressor(level=1)
-        cctx.compress(b'')
+    def test_multithreaded_unsupported(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'bar' * 64)
+
+        d = zstd.train_dictionary(8192, samples)
 
-        cctx = zstd.ZstdCompressor(level=22)
-        cctx.compress(b'')
+        cctx = zstd.ZstdCompressor(dict_data=d, threads=2)
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'compress\(\) cannot be used with both dictionaries and multi-threaded compression'):
+            cctx.compress(b'foo')
+
+        params = zstd.get_compression_parameters(3)
+        cctx = zstd.ZstdCompressor(compression_params=params, threads=2)
+        with self.assertRaisesRegexp(zstd.ZstdError, 'compress\(\) cannot be used with both compression parameters and multi-threaded compression'):
+            cctx.compress(b'foo')
 
     def test_compress_empty(self):
         cctx = zstd.ZstdCompressor(level=1)
-        self.assertEqual(cctx.compress(b''),
-                         b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        result = cctx.compress(b'')
+        self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        params = zstd.get_frame_parameters(result)
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 524288)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum, 0)
 
         # TODO should be temporary until https://github.com/facebook/zstd/issues/506
         # is fixed.
@@ -59,6 +86,13 @@
         self.assertEqual(len(result), 999)
         self.assertEqual(result[0:4], b'\x28\xb5\x2f\xfd')
 
+        # This matches the test for read_from() below.
+        cctx = zstd.ZstdCompressor(level=1)
+        result = cctx.compress(b'f' * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b'o')
+        self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00'
+                                 b'\x10\x66\x66\x01\x00\xfb\xff\x39\xc0'
+                                 b'\x02\x09\x00\x00\x6f')
+
     def test_write_checksum(self):
         cctx = zstd.ZstdCompressor(level=1)
         no_checksum = cctx.compress(b'foobar')
@@ -67,6 +101,12 @@
 
         self.assertEqual(len(with_checksum), len(no_checksum) + 4)
 
+        no_params = zstd.get_frame_parameters(no_checksum)
+        with_params = zstd.get_frame_parameters(with_checksum)
+
+        self.assertFalse(no_params.has_checksum)
+        self.assertTrue(with_params.has_checksum)
+
     def test_write_content_size(self):
         cctx = zstd.ZstdCompressor(level=1)
         no_size = cctx.compress(b'foobar' * 256)
@@ -75,6 +115,11 @@
 
         self.assertEqual(len(with_size), len(no_size) + 1)
 
+        no_params = zstd.get_frame_parameters(no_size)
+        with_params = zstd.get_frame_parameters(with_size)
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 1536)
+
     def test_no_dict_id(self):
         samples = []
         for i in range(128):
@@ -92,6 +137,11 @@
 
         self.assertEqual(len(with_dict_id), len(no_dict_id) + 4)
 
+        no_params = zstd.get_frame_parameters(no_dict_id)
+        with_params = zstd.get_frame_parameters(with_dict_id)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 1584102229)
+
     def test_compress_dict_multiple(self):
         samples = []
         for i in range(128):
@@ -106,7 +156,23 @@
         for i in range(32):
             cctx.compress(b'foo bar foobar foo bar foobar')
 
+    def test_multithreaded(self):
+        chunk_size = multithreaded_chunk_size(1)
+        source = b''.join([b'x' * chunk_size, b'y' * chunk_size])
 
+        cctx = zstd.ZstdCompressor(level=1, threads=2)
+        compressed = cctx.compress(source)
+
+        params = zstd.get_frame_parameters(compressed)
+        self.assertEqual(params.content_size, chunk_size * 2)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
+        dctx = zstd.ZstdDecompressor()
+        self.assertEqual(dctx.decompress(compressed), source)
+
+
+@make_cffi
 class TestCompressor_compressobj(unittest.TestCase):
     def test_compressobj_empty(self):
         cctx = zstd.ZstdCompressor(level=1)
@@ -127,6 +193,12 @@
         self.assertEqual(len(result), 999)
         self.assertEqual(result[0:4], b'\x28\xb5\x2f\xfd')
 
+        params = zstd.get_frame_parameters(result)
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1048576)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
     def test_write_checksum(self):
         cctx = zstd.ZstdCompressor(level=1)
         cobj = cctx.compressobj()
@@ -135,6 +207,15 @@
         cobj = cctx.compressobj()
         with_checksum = cobj.compress(b'foobar') + cobj.flush()
 
+        no_params = zstd.get_frame_parameters(no_checksum)
+        with_params = zstd.get_frame_parameters(with_checksum)
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 0)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertTrue(with_params.has_checksum)
+
         self.assertEqual(len(with_checksum), len(no_checksum) + 4)
 
     def test_write_content_size(self):
@@ -145,6 +226,15 @@
         cobj = cctx.compressobj(size=len(b'foobar' * 256))
         with_size = cobj.compress(b'foobar' * 256) + cobj.flush()
 
+        no_params = zstd.get_frame_parameters(no_size)
+        with_params = zstd.get_frame_parameters(with_size)
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 1536)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertFalse(with_params.has_checksum)
+
         self.assertEqual(len(with_size), len(no_size) + 1)
 
     def test_compress_after_finished(self):
@@ -186,7 +276,32 @@
         header = trailing[0:3]
         self.assertEqual(header, b'\x01\x00\x00')
 
+    def test_multithreaded(self):
+        source = io.BytesIO()
+        source.write(b'a' * 1048576)
+        source.write(b'b' * 1048576)
+        source.write(b'c' * 1048576)
+        source.seek(0)
 
+        cctx = zstd.ZstdCompressor(level=1, threads=2)
+        cobj = cctx.compressobj()
+
+        chunks = []
+        while True:
+            d = source.read(8192)
+            if not d:
+                break
+
+            chunks.append(cobj.compress(d))
+
+        chunks.append(cobj.flush())
+
+        compressed = b''.join(chunks)
+
+        self.assertEqual(len(compressed), 295)
+
+
+@make_cffi
 class TestCompressor_copy_stream(unittest.TestCase):
     def test_no_read(self):
         source = object()
@@ -229,6 +344,12 @@
         self.assertEqual(r, 255 * 16384)
         self.assertEqual(w, 999)
 
+        params = zstd.get_frame_parameters(dest.getvalue())
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1048576)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
     def test_write_checksum(self):
         source = io.BytesIO(b'foobar')
         no_checksum = io.BytesIO()
@@ -244,6 +365,15 @@
         self.assertEqual(len(with_checksum.getvalue()),
                          len(no_checksum.getvalue()) + 4)
 
+        no_params = zstd.get_frame_parameters(no_checksum.getvalue())
+        with_params = zstd.get_frame_parameters(with_checksum.getvalue())
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 0)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertTrue(with_params.has_checksum)
+
     def test_write_content_size(self):
         source = io.BytesIO(b'foobar' * 256)
         no_size = io.BytesIO()
@@ -268,6 +398,15 @@
         self.assertEqual(len(with_size.getvalue()),
                          len(no_size.getvalue()) + 1)
 
+        no_params = zstd.get_frame_parameters(no_size.getvalue())
+        with_params = zstd.get_frame_parameters(with_size.getvalue())
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 1536)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertFalse(with_params.has_checksum)
+
     def test_read_write_size(self):
         source = OpCountingBytesIO(b'foobarfoobar')
         dest = OpCountingBytesIO()
@@ -279,6 +418,36 @@
         self.assertEqual(source._read_count, len(source.getvalue()) + 1)
         self.assertEqual(dest._write_count, len(dest.getvalue()))
 
+    def test_multithreaded(self):
+        source = io.BytesIO()
+        source.write(b'a' * 1048576)
+        source.write(b'b' * 1048576)
+        source.write(b'c' * 1048576)
+        source.seek(0)
+
+        dest = io.BytesIO()
+        cctx = zstd.ZstdCompressor(threads=2)
+        r, w = cctx.copy_stream(source, dest)
+        self.assertEqual(r, 3145728)
+        self.assertEqual(w, 295)
+
+        params = zstd.get_frame_parameters(dest.getvalue())
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
+        # Writing content size and checksum works.
+        cctx = zstd.ZstdCompressor(threads=2, write_content_size=True,
+                                   write_checksum=True)
+        dest = io.BytesIO()
+        source.seek(0)
+        cctx.copy_stream(source, dest, size=len(source.getvalue()))
+
+        params = zstd.get_frame_parameters(dest.getvalue())
+        self.assertEqual(params.content_size, 3145728)
+        self.assertEqual(params.dict_id, 0)
+        self.assertTrue(params.has_checksum)
+
 
 def compress(data, level):
     buffer = io.BytesIO()
@@ -288,18 +457,25 @@
     return buffer.getvalue()
 
 
+@make_cffi
 class TestCompressor_write_to(unittest.TestCase):
     def test_empty(self):
-        self.assertEqual(compress(b'', 1),
-                         b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+        result = compress(b'', 1)
+        self.assertEqual(result, b'\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00')
+
+        params = zstd.get_frame_parameters(result)
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 524288)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
 
     def test_multiple_compress(self):
         buffer = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=5)
         with cctx.write_to(buffer) as compressor:
-            compressor.write(b'foo')
-            compressor.write(b'bar')
-            compressor.write(b'x' * 8192)
+            self.assertEqual(compressor.write(b'foo'), 0)
+            self.assertEqual(compressor.write(b'bar'), 0)
+            self.assertEqual(compressor.write(b'x' * 8192), 0)
 
         result = buffer.getvalue()
         self.assertEqual(result,
@@ -318,11 +494,23 @@
         buffer = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=9, dict_data=d)
         with cctx.write_to(buffer) as compressor:
-            compressor.write(b'foo')
-            compressor.write(b'bar')
-            compressor.write(b'foo' * 16384)
+            self.assertEqual(compressor.write(b'foo'), 0)
+            self.assertEqual(compressor.write(b'bar'), 0)
+            self.assertEqual(compressor.write(b'foo' * 16384), 634)
 
         compressed = buffer.getvalue()
+
+        params = zstd.get_frame_parameters(compressed)
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1024)
+        self.assertEqual(params.dict_id, d.dict_id())
+        self.assertFalse(params.has_checksum)
+
+        self.assertEqual(compressed[0:32],
+                         b'\x28\xb5\x2f\xfd\x03\x00\x55\x7b\x6b\x5e\x54\x00'
+                         b'\x00\x00\x02\xfc\xf4\xa5\xba\x23\x3f\x85\xb3\x54'
+                         b'\x00\x00\x18\x6f\x6f\x66\x01\x00')
+
         h = hashlib.sha1(compressed).hexdigest()
         self.assertEqual(h, '1c5bcd25181bcd8c1a73ea8773323e0056129f92')
 
@@ -332,11 +520,18 @@
         buffer = io.BytesIO()
         cctx = zstd.ZstdCompressor(compression_params=params)
         with cctx.write_to(buffer) as compressor:
-            compressor.write(b'foo')
-            compressor.write(b'bar')
-            compressor.write(b'foobar' * 16384)
+            self.assertEqual(compressor.write(b'foo'), 0)
+            self.assertEqual(compressor.write(b'bar'), 0)
+            self.assertEqual(compressor.write(b'foobar' * 16384), 0)
 
         compressed = buffer.getvalue()
+
+        params = zstd.get_frame_parameters(compressed)
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1048576)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
         h = hashlib.sha1(compressed).hexdigest()
         self.assertEqual(h, '1ae31f270ed7de14235221a604b31ecd517ebd99')
 
@@ -344,12 +539,21 @@
         no_checksum = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=1)
         with cctx.write_to(no_checksum) as compressor:
-            compressor.write(b'foobar')
+            self.assertEqual(compressor.write(b'foobar'), 0)
 
         with_checksum = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=1, write_checksum=True)
         with cctx.write_to(with_checksum) as compressor:
-            compressor.write(b'foobar')
+            self.assertEqual(compressor.write(b'foobar'), 0)
+
+        no_params = zstd.get_frame_parameters(no_checksum.getvalue())
+        with_params = zstd.get_frame_parameters(with_checksum.getvalue())
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 0)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertTrue(with_params.has_checksum)
 
         self.assertEqual(len(with_checksum.getvalue()),
                          len(no_checksum.getvalue()) + 4)
@@ -358,12 +562,12 @@
         no_size = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=1)
         with cctx.write_to(no_size) as compressor:
-            compressor.write(b'foobar' * 256)
+            self.assertEqual(compressor.write(b'foobar' * 256), 0)
 
         with_size = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=1, write_content_size=True)
         with cctx.write_to(with_size) as compressor:
-            compressor.write(b'foobar' * 256)
+            self.assertEqual(compressor.write(b'foobar' * 256), 0)
 
         # Source size is not known in streaming mode, so header not
         # written.
@@ -373,7 +577,16 @@
         # Declaring size will write the header.
         with_size = io.BytesIO()
         with cctx.write_to(with_size, size=len(b'foobar' * 256)) as compressor:
-            compressor.write(b'foobar' * 256)
+            self.assertEqual(compressor.write(b'foobar' * 256), 0)
+
+        no_params = zstd.get_frame_parameters(no_size.getvalue())
+        with_params = zstd.get_frame_parameters(with_size.getvalue())
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 1536)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, 0)
+        self.assertFalse(no_params.has_checksum)
+        self.assertFalse(with_params.has_checksum)
 
         self.assertEqual(len(with_size.getvalue()),
                          len(no_size.getvalue()) + 1)
@@ -390,12 +603,21 @@
         with_dict_id = io.BytesIO()
         cctx = zstd.ZstdCompressor(level=1, dict_data=d)
         with cctx.write_to(with_dict_id) as compressor:
-            compressor.write(b'foobarfoobar')
+            self.assertEqual(compressor.write(b'foobarfoobar'), 0)
 
         cctx = zstd.ZstdCompressor(level=1, dict_data=d, write_dict_id=False)
         no_dict_id = io.BytesIO()
         with cctx.write_to(no_dict_id) as compressor:
-            compressor.write(b'foobarfoobar')
+            self.assertEqual(compressor.write(b'foobarfoobar'), 0)
+
+        no_params = zstd.get_frame_parameters(no_dict_id.getvalue())
+        with_params = zstd.get_frame_parameters(with_dict_id.getvalue())
+        self.assertEqual(no_params.content_size, 0)
+        self.assertEqual(with_params.content_size, 0)
+        self.assertEqual(no_params.dict_id, 0)
+        self.assertEqual(with_params.dict_id, d.dict_id())
+        self.assertFalse(no_params.has_checksum)
+        self.assertFalse(with_params.has_checksum)
 
         self.assertEqual(len(with_dict_id.getvalue()),
                          len(no_dict_id.getvalue()) + 4)
@@ -412,9 +634,9 @@
         cctx = zstd.ZstdCompressor(level=3)
         dest = OpCountingBytesIO()
         with cctx.write_to(dest, write_size=1) as compressor:
-            compressor.write(b'foo')
-            compressor.write(b'bar')
-            compressor.write(b'foobar')
+            self.assertEqual(compressor.write(b'foo'), 0)
+            self.assertEqual(compressor.write(b'bar'), 0)
+            self.assertEqual(compressor.write(b'foobar'), 0)
 
         self.assertEqual(len(dest.getvalue()), dest._write_count)
 
@@ -422,15 +644,15 @@
         cctx = zstd.ZstdCompressor(level=3)
         dest = OpCountingBytesIO()
         with cctx.write_to(dest) as compressor:
-            compressor.write(b'foo')
+            self.assertEqual(compressor.write(b'foo'), 0)
             self.assertEqual(dest._write_count, 0)
-            compressor.flush()
+            self.assertEqual(compressor.flush(), 12)
             self.assertEqual(dest._write_count, 1)
-            compressor.write(b'bar')
+            self.assertEqual(compressor.write(b'bar'), 0)
             self.assertEqual(dest._write_count, 1)
-            compressor.flush()
+            self.assertEqual(compressor.flush(), 6)
             self.assertEqual(dest._write_count, 2)
-            compressor.write(b'baz')
+            self.assertEqual(compressor.write(b'baz'), 0)
 
         self.assertEqual(dest._write_count, 3)
 
@@ -438,10 +660,10 @@
         cctx = zstd.ZstdCompressor(level=3, write_checksum=True)
         dest = OpCountingBytesIO()
         with cctx.write_to(dest) as compressor:
-            compressor.write(b'foobar' * 8192)
+            self.assertEqual(compressor.write(b'foobar' * 8192), 0)
             count = dest._write_count
             offset = dest.tell()
-            compressor.flush()
+            self.assertEqual(compressor.flush(), 23)
             self.assertGreater(dest._write_count, count)
             self.assertGreater(dest.tell(), offset)
             offset = dest.tell()
@@ -455,19 +677,33 @@
         header = trailing[0:3]
         self.assertEqual(header, b'\x01\x00\x00')
 
+    def test_multithreaded(self):
+        dest = io.BytesIO()
+        cctx = zstd.ZstdCompressor(threads=2)
+        with cctx.write_to(dest) as compressor:
+            compressor.write(b'a' * 1048576)
+            compressor.write(b'b' * 1048576)
+            compressor.write(b'c' * 1048576)
 
+        self.assertEqual(len(dest.getvalue()), 295)
+
+
+@make_cffi
 class TestCompressor_read_from(unittest.TestCase):
     def test_type_validation(self):
         cctx = zstd.ZstdCompressor()
 
         # Object with read() works.
-        cctx.read_from(io.BytesIO())
+        for chunk in cctx.read_from(io.BytesIO()):
+            pass
 
         # Buffer protocol works.
-        cctx.read_from(b'foobar')
+        for chunk in cctx.read_from(b'foobar'):
+            pass
 
         with self.assertRaisesRegexp(ValueError, 'must pass an object with a read'):
-            cctx.read_from(True)
+            for chunk in cctx.read_from(True):
+                pass
 
     def test_read_empty(self):
         cctx = zstd.ZstdCompressor(level=1)
@@ -521,6 +757,12 @@
         # We should get the same output as the one-shot compression mechanism.
         self.assertEqual(b''.join(chunks), cctx.compress(source.getvalue()))
 
+        params = zstd.get_frame_parameters(b''.join(chunks))
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 262144)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
         # Now check the buffer protocol.
         it = cctx.read_from(source.getvalue())
         chunks = list(it)
@@ -534,3 +776,130 @@
             self.assertEqual(len(chunk), 1)
 
         self.assertEqual(source._read_count, len(source.getvalue()) + 1)
+
+    def test_multithreaded(self):
+        source = io.BytesIO()
+        source.write(b'a' * 1048576)
+        source.write(b'b' * 1048576)
+        source.write(b'c' * 1048576)
+        source.seek(0)
+
+        cctx = zstd.ZstdCompressor(threads=2)
+
+        compressed = b''.join(cctx.read_from(source))
+        self.assertEqual(len(compressed), 295)
+
+
+class TestCompressor_multi_compress_to_buffer(unittest.TestCase):
+    def test_multithreaded_unsupported(self):
+        cctx = zstd.ZstdCompressor(threads=2)
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'function cannot be called on ZstdCompressor configured for multi-threaded compression'):
+            cctx.multi_compress_to_buffer([b'foo'])
+
+    def test_invalid_inputs(self):
+        cctx = zstd.ZstdCompressor()
+
+        with self.assertRaises(TypeError):
+            cctx.multi_compress_to_buffer(True)
+
+        with self.assertRaises(TypeError):
+            cctx.multi_compress_to_buffer((1, 2))
+
+        with self.assertRaisesRegexp(TypeError, 'item 0 not a bytes like object'):
+            cctx.multi_compress_to_buffer([u'foo'])
+
+    def test_empty_input(self):
+        cctx = zstd.ZstdCompressor()
+
+        with self.assertRaisesRegexp(ValueError, 'no source elements found'):
+            cctx.multi_compress_to_buffer([])
+
+        with self.assertRaisesRegexp(ValueError, 'source elements are empty'):
+            cctx.multi_compress_to_buffer([b'', b'', b''])
+
+    def test_list_input(self):
+        cctx = zstd.ZstdCompressor(write_content_size=True, write_checksum=True)
+
+        original = [b'foo' * 12, b'bar' * 6]
+        frames = [cctx.compress(c) for c in original]
+        b = cctx.multi_compress_to_buffer(original)
+
+        self.assertIsInstance(b, zstd.BufferWithSegmentsCollection)
+
+        self.assertEqual(len(b), 2)
+        self.assertEqual(b.size(), 44)
+
+        self.assertEqual(b[0].tobytes(), frames[0])
+        self.assertEqual(b[1].tobytes(), frames[1])
+
+    def test_buffer_with_segments_input(self):
+        cctx = zstd.ZstdCompressor(write_content_size=True, write_checksum=True)
+
+        original = [b'foo' * 4, b'bar' * 6]
+        frames = [cctx.compress(c) for c in original]
+
+        offsets = struct.pack('=QQQQ', 0, len(original[0]),
+                                       len(original[0]), len(original[1]))
+        segments = zstd.BufferWithSegments(b''.join(original), offsets)
+
+        result = cctx.multi_compress_to_buffer(segments)
+
+        self.assertEqual(len(result), 2)
+        self.assertEqual(result.size(), 47)
+
+        self.assertEqual(result[0].tobytes(), frames[0])
+        self.assertEqual(result[1].tobytes(), frames[1])
+
+    def test_buffer_with_segments_collection_input(self):
+        cctx = zstd.ZstdCompressor(write_content_size=True, write_checksum=True)
+
+        original = [
+            b'foo1',
+            b'foo2' * 2,
+            b'foo3' * 3,
+            b'foo4' * 4,
+            b'foo5' * 5,
+        ]
+
+        frames = [cctx.compress(c) for c in original]
+
+        b = b''.join([original[0], original[1]])
+        b1 = zstd.BufferWithSegments(b, struct.pack('=QQQQ',
+                                                    0, len(original[0]),
+                                                    len(original[0]), len(original[1])))
+        b = b''.join([original[2], original[3], original[4]])
+        b2 = zstd.BufferWithSegments(b, struct.pack('=QQQQQQ',
+                                                    0, len(original[2]),
+                                                    len(original[2]), len(original[3]),
+                                                    len(original[2]) + len(original[3]), len(original[4])))
+
+        c = zstd.BufferWithSegmentsCollection(b1, b2)
+
+        result = cctx.multi_compress_to_buffer(c)
+
+        self.assertEqual(len(result), len(frames))
+
+        for i, frame in enumerate(frames):
+            self.assertEqual(result[i].tobytes(), frame)
+
+    def test_multiple_threads(self):
+        # threads argument will cause multi-threaded ZSTD APIs to be used, which will
+        # make output different.
+        refcctx = zstd.ZstdCompressor(write_content_size=True, write_checksum=True)
+        reference = [refcctx.compress(b'x' * 64), refcctx.compress(b'y' * 64)]
+
+        cctx = zstd.ZstdCompressor(write_content_size=True, write_checksum=True)
+
+        frames = []
+        frames.extend(b'x' * 64 for i in range(256))
+        frames.extend(b'y' * 64 for i in range(256))
+
+        result = cctx.multi_compress_to_buffer(frames, threads=-1)
+
+        self.assertEqual(len(result), 512)
+        for i in range(512):
+            if i < 256:
+                self.assertEqual(result[i].tobytes(), reference[0])
+            else:
+                self.assertEqual(result[i].tobytes(), reference[1])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_compressor_fuzzing.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,143 @@
+import io
+import os
+
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+try:
+    import hypothesis
+    import hypothesis.strategies as strategies
+except ImportError:
+    raise unittest.SkipTest('hypothesis not available')
+
+import zstd
+
+from . common import (
+    make_cffi,
+    random_input_data,
+)
+
+
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@make_cffi
+class TestCompressor_write_to_fuzzing(unittest.TestCase):
+    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
+                        level=strategies.integers(min_value=1, max_value=5),
+                        write_size=strategies.integers(min_value=1, max_value=1048576))
+    def test_write_size_variance(self, original, level, write_size):
+        refctx = zstd.ZstdCompressor(level=level)
+        ref_frame = refctx.compress(original)
+
+        cctx = zstd.ZstdCompressor(level=level)
+        b = io.BytesIO()
+        with cctx.write_to(b, size=len(original), write_size=write_size) as compressor:
+            compressor.write(original)
+
+        self.assertEqual(b.getvalue(), ref_frame)
+
+
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@make_cffi
+class TestCompressor_copy_stream_fuzzing(unittest.TestCase):
+    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
+                      level=strategies.integers(min_value=1, max_value=5),
+                      read_size=strategies.integers(min_value=1, max_value=1048576),
+                      write_size=strategies.integers(min_value=1, max_value=1048576))
+    def test_read_write_size_variance(self, original, level, read_size, write_size):
+        refctx = zstd.ZstdCompressor(level=level)
+        ref_frame = refctx.compress(original)
+
+        cctx = zstd.ZstdCompressor(level=level)
+        source = io.BytesIO(original)
+        dest = io.BytesIO()
+
+        cctx.copy_stream(source, dest, size=len(original), read_size=read_size,
+                         write_size=write_size)
+
+        self.assertEqual(dest.getvalue(), ref_frame)
+
+
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@make_cffi
+class TestCompressor_compressobj_fuzzing(unittest.TestCase):
+    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
+                      level=strategies.integers(min_value=1, max_value=5),
+                      chunk_sizes=strategies.streaming(
+                          strategies.integers(min_value=1, max_value=4096)))
+    def test_random_input_sizes(self, original, level, chunk_sizes):
+        chunk_sizes = iter(chunk_sizes)
+
+        refctx = zstd.ZstdCompressor(level=level)
+        ref_frame = refctx.compress(original)
+
+        cctx = zstd.ZstdCompressor(level=level)
+        cobj = cctx.compressobj(size=len(original))
+
+        chunks = []
+        i = 0
+        while True:
+            chunk_size = next(chunk_sizes)
+            source = original[i:i + chunk_size]
+            if not source:
+                break
+
+            chunks.append(cobj.compress(source))
+            i += chunk_size
+
+        chunks.append(cobj.flush())
+
+        self.assertEqual(b''.join(chunks), ref_frame)
+
+
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@make_cffi
+class TestCompressor_read_from_fuzzing(unittest.TestCase):
+    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
+                      level=strategies.integers(min_value=1, max_value=5),
+                      read_size=strategies.integers(min_value=1, max_value=4096),
+                      write_size=strategies.integers(min_value=1, max_value=4096))
+    def test_read_write_size_variance(self, original, level, read_size, write_size):
+        refcctx = zstd.ZstdCompressor(level=level)
+        ref_frame = refcctx.compress(original)
+
+        source = io.BytesIO(original)
+
+        cctx = zstd.ZstdCompressor(level=level)
+        chunks = list(cctx.read_from(source, size=len(original), read_size=read_size,
+                                     write_size=write_size))
+
+        self.assertEqual(b''.join(chunks), ref_frame)
+
+
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+class TestCompressor_multi_compress_to_buffer_fuzzing(unittest.TestCase):
+    @hypothesis.given(original=strategies.lists(strategies.sampled_from(random_input_data()),
+                                                min_size=1, max_size=1024),
+                        threads=strategies.integers(min_value=1, max_value=8),
+                        use_dict=strategies.booleans())
+    def test_data_equivalence(self, original, threads, use_dict):
+        kwargs = {}
+
+        # Use a content dictionary because it is cheap to create.
+        if use_dict:
+            kwargs['dict_data'] = zstd.ZstdCompressionDict(original[0])
+
+        cctx = zstd.ZstdCompressor(level=1,
+                                   write_content_size=True,
+                                   write_checksum=True,
+                                   **kwargs)
+
+        result = cctx.multi_compress_to_buffer(original, threads=-1)
+
+        self.assertEqual(len(result), len(original))
+
+        # The frame produced via the batch APIs may not be bit identical to that
+        # produced by compress() because compression parameters are adjusted
+        # from the first input in batch mode. So the only thing we can do is
+        # verify the decompressed data matches the input.
+        dctx = zstd.ZstdDecompressor(**kwargs)
+
+        for i, frame in enumerate(result):
+            self.assertEqual(dctx.decompress(frame), original[i])
--- a/contrib/python-zstandard/tests/test_data_structures.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/tests/test_data_structures.py	Tue Apr 18 12:24:34 2017 -0400
@@ -1,18 +1,16 @@
-import io
-
 try:
     import unittest2 as unittest
 except ImportError:
     import unittest
 
-try:
-    import hypothesis
-    import hypothesis.strategies as strategies
-except ImportError:
-    hypothesis = None
-
 import zstd
 
+from . common import (
+    make_cffi,
+)
+
+
+@make_cffi
 class TestCompressionParameters(unittest.TestCase):
     def test_init_bad_arg_type(self):
         with self.assertRaises(TypeError):
@@ -26,7 +24,7 @@
                                    zstd.CHAINLOG_MIN,
                                    zstd.HASHLOG_MIN,
                                    zstd.SEARCHLOG_MIN,
-                                   zstd.SEARCHLENGTH_MIN,
+                                   zstd.SEARCHLENGTH_MIN + 1,
                                    zstd.TARGETLENGTH_MIN,
                                    zstd.STRATEGY_FAST)
 
@@ -34,7 +32,7 @@
                                    zstd.CHAINLOG_MAX,
                                    zstd.HASHLOG_MAX,
                                    zstd.SEARCHLOG_MAX,
-                                   zstd.SEARCHLENGTH_MAX,
+                                   zstd.SEARCHLENGTH_MAX - 1,
                                    zstd.TARGETLENGTH_MAX,
                                    zstd.STRATEGY_BTOPT)
 
@@ -42,66 +40,84 @@
         p = zstd.get_compression_parameters(1)
         self.assertIsInstance(p, zstd.CompressionParameters)
 
-        self.assertEqual(p[0], 19)
+        self.assertEqual(p.window_log, 19)
+
+    def test_members(self):
+        p = zstd.CompressionParameters(10, 6, 7, 4, 5, 8, 1)
+        self.assertEqual(p.window_log, 10)
+        self.assertEqual(p.chain_log, 6)
+        self.assertEqual(p.hash_log, 7)
+        self.assertEqual(p.search_log, 4)
+        self.assertEqual(p.search_length, 5)
+        self.assertEqual(p.target_length, 8)
+        self.assertEqual(p.strategy, 1)
+
+    def test_estimated_compression_context_size(self):
+        p = zstd.CompressionParameters(20, 16, 17,  1,  5, 16, zstd.STRATEGY_DFAST)
+
+        # 32-bit has slightly different values from 64-bit.
+        self.assertAlmostEqual(p.estimated_compression_context_size(), 1287076,
+                               delta=110)
+
 
-if hypothesis:
-    s_windowlog = strategies.integers(min_value=zstd.WINDOWLOG_MIN,
-                                      max_value=zstd.WINDOWLOG_MAX)
-    s_chainlog = strategies.integers(min_value=zstd.CHAINLOG_MIN,
-                                     max_value=zstd.CHAINLOG_MAX)
-    s_hashlog = strategies.integers(min_value=zstd.HASHLOG_MIN,
-                                    max_value=zstd.HASHLOG_MAX)
-    s_searchlog = strategies.integers(min_value=zstd.SEARCHLOG_MIN,
-                                      max_value=zstd.SEARCHLOG_MAX)
-    s_searchlength = strategies.integers(min_value=zstd.SEARCHLENGTH_MIN,
-                                         max_value=zstd.SEARCHLENGTH_MAX)
-    s_targetlength = strategies.integers(min_value=zstd.TARGETLENGTH_MIN,
-                                         max_value=zstd.TARGETLENGTH_MAX)
-    s_strategy = strategies.sampled_from((zstd.STRATEGY_FAST,
-                                          zstd.STRATEGY_DFAST,
-                                          zstd.STRATEGY_GREEDY,
-                                          zstd.STRATEGY_LAZY,
-                                          zstd.STRATEGY_LAZY2,
-                                          zstd.STRATEGY_BTLAZY2,
-                                          zstd.STRATEGY_BTOPT))
+@make_cffi
+class TestFrameParameters(unittest.TestCase):
+    def test_invalid_type(self):
+        with self.assertRaises(TypeError):
+            zstd.get_frame_parameters(None)
+
+        with self.assertRaises(TypeError):
+            zstd.get_frame_parameters(u'foobarbaz')
+
+    def test_invalid_input_sizes(self):
+        with self.assertRaisesRegexp(zstd.ZstdError, 'not enough data for frame'):
+            zstd.get_frame_parameters(b'')
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'not enough data for frame'):
+            zstd.get_frame_parameters(zstd.FRAME_HEADER)
+
+    def test_invalid_frame(self):
+        with self.assertRaisesRegexp(zstd.ZstdError, 'Unknown frame descriptor'):
+            zstd.get_frame_parameters(b'foobarbaz')
 
-    class TestCompressionParametersHypothesis(unittest.TestCase):
-        @hypothesis.given(s_windowlog, s_chainlog, s_hashlog, s_searchlog,
-                          s_searchlength, s_targetlength, s_strategy)
-        def test_valid_init(self, windowlog, chainlog, hashlog, searchlog,
-                            searchlength, targetlength, strategy):
-            p = zstd.CompressionParameters(windowlog, chainlog, hashlog,
-                                           searchlog, searchlength,
-                                           targetlength, strategy)
-            self.assertEqual(tuple(p),
-                             (windowlog, chainlog, hashlog, searchlog,
-                              searchlength, targetlength, strategy))
+    def test_attributes(self):
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x00\x00')
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1024)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
+
+        # Lowest 2 bits indicate a dictionary and length. Here, the dict id is 1 byte.
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x01\x00\xff')
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1024)
+        self.assertEqual(params.dict_id, 255)
+        self.assertFalse(params.has_checksum)
+
+        # Lowest 3rd bit indicates if checksum is present.
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x04\x00')
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 1024)
+        self.assertEqual(params.dict_id, 0)
+        self.assertTrue(params.has_checksum)
 
-            # Verify we can instantiate a compressor with the supplied values.
-            # ZSTD_checkCParams moves the goal posts on us from what's advertised
-            # in the constants. So move along with them.
-            if searchlength == zstd.SEARCHLENGTH_MIN and strategy in (zstd.STRATEGY_FAST, zstd.STRATEGY_GREEDY):
-                searchlength += 1
-                p = zstd.CompressionParameters(windowlog, chainlog, hashlog,
-                                searchlog, searchlength,
-                                targetlength, strategy)
-            elif searchlength == zstd.SEARCHLENGTH_MAX and strategy != zstd.STRATEGY_FAST:
-                searchlength -= 1
-                p = zstd.CompressionParameters(windowlog, chainlog, hashlog,
-                                searchlog, searchlength,
-                                targetlength, strategy)
+        # Upper 2 bits indicate content size.
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x40\x00\xff\x00')
+        self.assertEqual(params.content_size, 511)
+        self.assertEqual(params.window_size, 1024)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
 
-            cctx = zstd.ZstdCompressor(compression_params=p)
-            with cctx.write_to(io.BytesIO()):
-                pass
+        # Window descriptor is 2nd byte after frame header.
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x00\x40')
+        self.assertEqual(params.content_size, 0)
+        self.assertEqual(params.window_size, 262144)
+        self.assertEqual(params.dict_id, 0)
+        self.assertFalse(params.has_checksum)
 
-        @hypothesis.given(s_windowlog, s_chainlog, s_hashlog, s_searchlog,
-                          s_searchlength, s_targetlength, s_strategy)
-        def test_estimate_compression_context_size(self, windowlog, chainlog,
-                                                   hashlog, searchlog,
-                                                   searchlength, targetlength,
-                                                   strategy):
-            p = zstd.CompressionParameters(windowlog, chainlog, hashlog,
-                                searchlog, searchlength,
-                                targetlength, strategy)
-            size = zstd.estimate_compression_context_size(p)
+        # Set multiple things.
+        params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b'\x45\x40\x0f\x10\x00')
+        self.assertEqual(params.content_size, 272)
+        self.assertEqual(params.window_size, 262144)
+        self.assertEqual(params.dict_id, 15)
+        self.assertTrue(params.has_checksum)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_data_structures_fuzzing.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,79 @@
+import io
+import os
+
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+try:
+    import hypothesis
+    import hypothesis.strategies as strategies
+except ImportError:
+    raise unittest.SkipTest('hypothesis not available')
+
+import zstd
+
+from .common import (
+    make_cffi,
+)
+
+
+s_windowlog = strategies.integers(min_value=zstd.WINDOWLOG_MIN,
+                                    max_value=zstd.WINDOWLOG_MAX)
+s_chainlog = strategies.integers(min_value=zstd.CHAINLOG_MIN,
+                                    max_value=zstd.CHAINLOG_MAX)
+s_hashlog = strategies.integers(min_value=zstd.HASHLOG_MIN,
+                                max_value=zstd.HASHLOG_MAX)
+s_searchlog = strategies.integers(min_value=zstd.SEARCHLOG_MIN,
+                                    max_value=zstd.SEARCHLOG_MAX)
+s_searchlength = strategies.integers(min_value=zstd.SEARCHLENGTH_MIN,
+                                        max_value=zstd.SEARCHLENGTH_MAX)
+s_targetlength = strategies.integers(min_value=zstd.TARGETLENGTH_MIN,
+                                        max_value=zstd.TARGETLENGTH_MAX)
+s_strategy = strategies.sampled_from((zstd.STRATEGY_FAST,
+                                        zstd.STRATEGY_DFAST,
+                                        zstd.STRATEGY_GREEDY,
+                                        zstd.STRATEGY_LAZY,
+                                        zstd.STRATEGY_LAZY2,
+                                        zstd.STRATEGY_BTLAZY2,
+                                        zstd.STRATEGY_BTOPT))
+
+
+@make_cffi
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+class TestCompressionParametersHypothesis(unittest.TestCase):
+    @hypothesis.given(s_windowlog, s_chainlog, s_hashlog, s_searchlog,
+                        s_searchlength, s_targetlength, s_strategy)
+    def test_valid_init(self, windowlog, chainlog, hashlog, searchlog,
+                        searchlength, targetlength, strategy):
+        # ZSTD_checkCParams moves the goal posts on us from what's advertised
+        # in the constants. So move along with them.
+        if searchlength == zstd.SEARCHLENGTH_MIN and strategy in (zstd.STRATEGY_FAST, zstd.STRATEGY_GREEDY):
+            searchlength += 1
+        elif searchlength == zstd.SEARCHLENGTH_MAX and strategy != zstd.STRATEGY_FAST:
+            searchlength -= 1
+
+        p = zstd.CompressionParameters(windowlog, chainlog, hashlog,
+                                        searchlog, searchlength,
+                                        targetlength, strategy)
+
+        cctx = zstd.ZstdCompressor(compression_params=p)
+        with cctx.write_to(io.BytesIO()):
+            pass
+
+    @hypothesis.given(s_windowlog, s_chainlog, s_hashlog, s_searchlog,
+                        s_searchlength, s_targetlength, s_strategy)
+    def test_estimate_compression_context_size(self, windowlog, chainlog,
+                                                hashlog, searchlog,
+                                                searchlength, targetlength,
+                                                strategy):
+        if searchlength == zstd.SEARCHLENGTH_MIN and strategy in (zstd.STRATEGY_FAST, zstd.STRATEGY_GREEDY):
+            searchlength += 1
+        elif searchlength == zstd.SEARCHLENGTH_MAX and strategy != zstd.STRATEGY_FAST:
+            searchlength -= 1
+
+        p = zstd.CompressionParameters(windowlog, chainlog, hashlog,
+                            searchlog, searchlength,
+                            targetlength, strategy)
+        size = zstd.estimate_compression_context_size(p)
--- a/contrib/python-zstandard/tests/test_decompressor.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/tests/test_decompressor.py	Tue Apr 18 12:24:34 2017 -0400
@@ -10,7 +10,10 @@
 
 import zstd
 
-from .common import OpCountingBytesIO
+from .common import (
+    make_cffi,
+    OpCountingBytesIO,
+)
 
 
 if sys.version_info[0] >= 3:
@@ -19,6 +22,7 @@
     next = lambda it: it.next()
 
 
+@make_cffi
 class TestDecompressor_decompress(unittest.TestCase):
     def test_empty_input(self):
         dctx = zstd.ZstdDecompressor()
@@ -45,7 +49,7 @@
         compressed = cctx.compress(b'foobar')
 
         dctx = zstd.ZstdDecompressor()
-        decompressed  = dctx.decompress(compressed)
+        decompressed = dctx.decompress(compressed)
         self.assertEqual(decompressed, b'foobar')
 
     def test_max_output_size(self):
@@ -119,6 +123,7 @@
             self.assertEqual(decompressed, sources[i])
 
 
+@make_cffi
 class TestDecompressor_copy_stream(unittest.TestCase):
     def test_no_read(self):
         source = object()
@@ -180,6 +185,7 @@
         self.assertEqual(dest._write_count, len(dest.getvalue()))
 
 
+@make_cffi
 class TestDecompressor_decompressobj(unittest.TestCase):
     def test_simple(self):
         data = zstd.ZstdCompressor(level=1).compress(b'foobar')
@@ -207,6 +213,7 @@
     return buffer.getvalue()
 
 
+@make_cffi
 class TestDecompressor_write_to(unittest.TestCase):
     def test_empty_roundtrip(self):
         cctx = zstd.ZstdCompressor()
@@ -256,14 +263,14 @@
         buffer = io.BytesIO()
         cctx = zstd.ZstdCompressor(dict_data=d)
         with cctx.write_to(buffer) as compressor:
-            compressor.write(orig)
+            self.assertEqual(compressor.write(orig), 1544)
 
         compressed = buffer.getvalue()
         buffer = io.BytesIO()
 
         dctx = zstd.ZstdDecompressor(dict_data=d)
         with dctx.write_to(buffer) as decompressor:
-            decompressor.write(compressed)
+            self.assertEqual(decompressor.write(compressed), len(orig))
 
         self.assertEqual(buffer.getvalue(), orig)
 
@@ -286,11 +293,11 @@
                     c = s.pack(c)
                 decompressor.write(c)
 
-
         self.assertEqual(dest.getvalue(), b'foobarfoobar')
         self.assertEqual(dest._write_count, len(dest.getvalue()))
 
 
+@make_cffi
 class TestDecompressor_read_from(unittest.TestCase):
     def test_type_validation(self):
         dctx = zstd.ZstdDecompressor()
@@ -302,7 +309,7 @@
         dctx.read_from(b'foobar')
 
         with self.assertRaisesRegexp(ValueError, 'must pass an object with a read'):
-            dctx.read_from(True)
+            b''.join(dctx.read_from(True))
 
     def test_empty_input(self):
         dctx = zstd.ZstdDecompressor()
@@ -351,7 +358,7 @@
         dctx = zstd.ZstdDecompressor()
 
         with self.assertRaisesRegexp(ValueError, 'skip_bytes must be smaller than read_size'):
-            dctx.read_from(b'', skip_bytes=1, read_size=1)
+            b''.join(dctx.read_from(b'', skip_bytes=1, read_size=1))
 
         with self.assertRaisesRegexp(ValueError, 'skip_bytes larger than first input chunk'):
             b''.join(dctx.read_from(b'foobar', skip_bytes=10))
@@ -476,3 +483,259 @@
             self.assertEqual(len(chunk), 1)
 
         self.assertEqual(source._read_count, len(source.getvalue()))
+
+
+@make_cffi
+class TestDecompressor_content_dict_chain(unittest.TestCase):
+    def test_bad_inputs_simple(self):
+        dctx = zstd.ZstdDecompressor()
+
+        with self.assertRaises(TypeError):
+            dctx.decompress_content_dict_chain(b'foo')
+
+        with self.assertRaises(TypeError):
+            dctx.decompress_content_dict_chain((b'foo', b'bar'))
+
+        with self.assertRaisesRegexp(ValueError, 'empty input chain'):
+            dctx.decompress_content_dict_chain([])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 0 must be bytes'):
+            dctx.decompress_content_dict_chain([u'foo'])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 0 must be bytes'):
+            dctx.decompress_content_dict_chain([True])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 0 is too small to contain a zstd frame'):
+            dctx.decompress_content_dict_chain([zstd.FRAME_HEADER])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 0 is not a valid zstd frame'):
+            dctx.decompress_content_dict_chain([b'foo' * 8])
+
+        no_size = zstd.ZstdCompressor().compress(b'foo' * 64)
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 0 missing content size in frame'):
+            dctx.decompress_content_dict_chain([no_size])
+
+        # Corrupt first frame.
+        frame = zstd.ZstdCompressor(write_content_size=True).compress(b'foo' * 64)
+        frame = frame[0:12] + frame[15:]
+        with self.assertRaisesRegexp(zstd.ZstdError, 'could not decompress chunk 0'):
+            dctx.decompress_content_dict_chain([frame])
+
+    def test_bad_subsequent_input(self):
+        initial = zstd.ZstdCompressor(write_content_size=True).compress(b'foo' * 64)
+
+        dctx = zstd.ZstdDecompressor()
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 1 must be bytes'):
+            dctx.decompress_content_dict_chain([initial, u'foo'])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 1 must be bytes'):
+            dctx.decompress_content_dict_chain([initial, None])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 1 is too small to contain a zstd frame'):
+            dctx.decompress_content_dict_chain([initial, zstd.FRAME_HEADER])
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 1 is not a valid zstd frame'):
+            dctx.decompress_content_dict_chain([initial, b'foo' * 8])
+
+        no_size = zstd.ZstdCompressor().compress(b'foo' * 64)
+
+        with self.assertRaisesRegexp(ValueError, 'chunk 1 missing content size in frame'):
+            dctx.decompress_content_dict_chain([initial, no_size])
+
+        # Corrupt second frame.
+        cctx = zstd.ZstdCompressor(write_content_size=True, dict_data=zstd.ZstdCompressionDict(b'foo' * 64))
+        frame = cctx.compress(b'bar' * 64)
+        frame = frame[0:12] + frame[15:]
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'could not decompress chunk 1'):
+            dctx.decompress_content_dict_chain([initial, frame])
+
+    def test_simple(self):
+        original = [
+            b'foo' * 64,
+            b'foobar' * 64,
+            b'baz' * 64,
+            b'foobaz' * 64,
+            b'foobarbaz' * 64,
+        ]
+
+        chunks = []
+        chunks.append(zstd.ZstdCompressor(write_content_size=True).compress(original[0]))
+        for i, chunk in enumerate(original[1:]):
+            d = zstd.ZstdCompressionDict(original[i])
+            cctx = zstd.ZstdCompressor(dict_data=d, write_content_size=True)
+            chunks.append(cctx.compress(chunk))
+
+        for i in range(1, len(original)):
+            chain = chunks[0:i]
+            expected = original[i - 1]
+            dctx = zstd.ZstdDecompressor()
+            decompressed = dctx.decompress_content_dict_chain(chain)
+            self.assertEqual(decompressed, expected)
+
+
+# TODO enable for CFFI
+class TestDecompressor_multi_decompress_to_buffer(unittest.TestCase):
+    def test_invalid_inputs(self):
+        dctx = zstd.ZstdDecompressor()
+
+        with self.assertRaises(TypeError):
+            dctx.multi_decompress_to_buffer(True)
+
+        with self.assertRaises(TypeError):
+            dctx.multi_decompress_to_buffer((1, 2))
+
+        with self.assertRaisesRegexp(TypeError, 'item 0 not a bytes like object'):
+            dctx.multi_decompress_to_buffer([u'foo'])
+
+        with self.assertRaisesRegexp(ValueError, 'could not determine decompressed size of item 0'):
+            dctx.multi_decompress_to_buffer([b'foobarbaz'])
+
+    def test_list_input(self):
+        cctx = zstd.ZstdCompressor(write_content_size=True)
+
+        original = [b'foo' * 4, b'bar' * 6]
+        frames = [cctx.compress(d) for d in original]
+
+        dctx = zstd.ZstdDecompressor()
+        result = dctx.multi_decompress_to_buffer(frames)
+
+        self.assertEqual(len(result), len(frames))
+        self.assertEqual(result.size(), sum(map(len, original)))
+
+        for i, data in enumerate(original):
+            self.assertEqual(result[i].tobytes(), data)
+
+        self.assertEqual(result[0].offset, 0)
+        self.assertEqual(len(result[0]), 12)
+        self.assertEqual(result[1].offset, 12)
+        self.assertEqual(len(result[1]), 18)
+
+    def test_list_input_frame_sizes(self):
+        cctx = zstd.ZstdCompressor(write_content_size=False)
+
+        original = [b'foo' * 4, b'bar' * 6, b'baz' * 8]
+        frames = [cctx.compress(d) for d in original]
+        sizes = struct.pack('=' + 'Q' * len(original), *map(len, original))
+
+        dctx = zstd.ZstdDecompressor()
+        result = dctx.multi_decompress_to_buffer(frames, decompressed_sizes=sizes)
+
+        self.assertEqual(len(result), len(frames))
+        self.assertEqual(result.size(), sum(map(len, original)))
+
+        for i, data in enumerate(original):
+            self.assertEqual(result[i].tobytes(), data)
+
+    def test_buffer_with_segments_input(self):
+        cctx = zstd.ZstdCompressor(write_content_size=True)
+
+        original = [b'foo' * 4, b'bar' * 6]
+        frames = [cctx.compress(d) for d in original]
+
+        dctx = zstd.ZstdDecompressor()
+
+        segments = struct.pack('=QQQQ', 0, len(frames[0]), len(frames[0]), len(frames[1]))
+        b = zstd.BufferWithSegments(b''.join(frames), segments)
+
+        result = dctx.multi_decompress_to_buffer(b)
+
+        self.assertEqual(len(result), len(frames))
+        self.assertEqual(result[0].offset, 0)
+        self.assertEqual(len(result[0]), 12)
+        self.assertEqual(result[1].offset, 12)
+        self.assertEqual(len(result[1]), 18)
+
+    def test_buffer_with_segments_sizes(self):
+        cctx = zstd.ZstdCompressor(write_content_size=False)
+        original = [b'foo' * 4, b'bar' * 6, b'baz' * 8]
+        frames = [cctx.compress(d) for d in original]
+        sizes = struct.pack('=' + 'Q' * len(original), *map(len, original))
+
+        segments = struct.pack('=QQQQQQ', 0, len(frames[0]),
+                               len(frames[0]), len(frames[1]),
+                               len(frames[0]) + len(frames[1]), len(frames[2]))
+        b = zstd.BufferWithSegments(b''.join(frames), segments)
+
+        dctx = zstd.ZstdDecompressor()
+        result = dctx.multi_decompress_to_buffer(b, decompressed_sizes=sizes)
+
+        self.assertEqual(len(result), len(frames))
+        self.assertEqual(result.size(), sum(map(len, original)))
+
+        for i, data in enumerate(original):
+            self.assertEqual(result[i].tobytes(), data)
+
+    def test_buffer_with_segments_collection_input(self):
+        cctx = zstd.ZstdCompressor(write_content_size=True)
+
+        original = [
+            b'foo0' * 2,
+            b'foo1' * 3,
+            b'foo2' * 4,
+            b'foo3' * 5,
+            b'foo4' * 6,
+        ]
+
+        frames = cctx.multi_compress_to_buffer(original)
+
+        # Check round trip.
+        dctx = zstd.ZstdDecompressor()
+        decompressed = dctx.multi_decompress_to_buffer(frames, threads=3)
+
+        self.assertEqual(len(decompressed), len(original))
+
+        for i, data in enumerate(original):
+            self.assertEqual(data, decompressed[i].tobytes())
+
+        # And a manual mode.
+        b = b''.join([frames[0].tobytes(), frames[1].tobytes()])
+        b1 = zstd.BufferWithSegments(b, struct.pack('=QQQQ',
+                                                    0, len(frames[0]),
+                                                    len(frames[0]), len(frames[1])))
+
+        b = b''.join([frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()])
+        b2 = zstd.BufferWithSegments(b, struct.pack('=QQQQQQ',
+                                                    0, len(frames[2]),
+                                                    len(frames[2]), len(frames[3]),
+                                                    len(frames[2]) + len(frames[3]), len(frames[4])))
+
+        c = zstd.BufferWithSegmentsCollection(b1, b2)
+
+        dctx = zstd.ZstdDecompressor()
+        decompressed = dctx.multi_decompress_to_buffer(c)
+
+        self.assertEqual(len(decompressed), 5)
+        for i in range(5):
+            self.assertEqual(decompressed[i].tobytes(), original[i])
+
+    def test_multiple_threads(self):
+        cctx = zstd.ZstdCompressor(write_content_size=True)
+
+        frames = []
+        frames.extend(cctx.compress(b'x' * 64) for i in range(256))
+        frames.extend(cctx.compress(b'y' * 64) for i in range(256))
+
+        dctx = zstd.ZstdDecompressor()
+        result = dctx.multi_decompress_to_buffer(frames, threads=-1)
+
+        self.assertEqual(len(result), len(frames))
+        self.assertEqual(result.size(), 2 * 64 * 256)
+        self.assertEqual(result[0].tobytes(), b'x' * 64)
+        self.assertEqual(result[256].tobytes(), b'y' * 64)
+
+    def test_item_failure(self):
+        cctx = zstd.ZstdCompressor(write_content_size=True)
+        frames = [cctx.compress(b'x' * 128), cctx.compress(b'y' * 128)]
+
+        frames[1] = frames[1] + b'extra'
+
+        dctx = zstd.ZstdDecompressor()
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'error decompressing item 1: Src size incorrect'):
+            dctx.multi_decompress_to_buffer(frames)
+
+        with self.assertRaisesRegexp(zstd.ZstdError, 'error decompressing item 1: Src size incorrect'):
+            dctx.multi_decompress_to_buffer(frames, threads=2)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/tests/test_decompressor_fuzzing.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,151 @@
+import io
+import os
+
+try:
+    import unittest2 as unittest
+except ImportError:
+    import unittest
+
+try:
+    import hypothesis
+    import hypothesis.strategies as strategies
+except ImportError:
+    raise unittest.SkipTest('hypothesis not available')
+
+import zstd
+
+from . common import (
+    make_cffi,
+    random_input_data,
+)
+
+
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@make_cffi
+class TestDecompressor_write_to_fuzzing(unittest.TestCase):
+    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
+                      level=strategies.integers(min_value=1, max_value=5),
+                      write_size=strategies.integers(min_value=1, max_value=8192),
+                      input_sizes=strategies.streaming(
+                          strategies.integers(min_value=1, max_value=4096)))
+    def test_write_size_variance(self, original, level, write_size, input_sizes):
+        input_sizes = iter(input_sizes)
+
+        cctx = zstd.ZstdCompressor(level=level)
+        frame = cctx.compress(original)
+
+        dctx = zstd.ZstdDecompressor()
+        source = io.BytesIO(frame)
+        dest = io.BytesIO()
+
+        with dctx.write_to(dest, write_size=write_size) as decompressor:
+            while True:
+                chunk = source.read(next(input_sizes))
+                if not chunk:
+                    break
+
+                decompressor.write(chunk)
+
+        self.assertEqual(dest.getvalue(), original)
+
+
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@make_cffi
+class TestDecompressor_copy_stream_fuzzing(unittest.TestCase):
+    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
+                      level=strategies.integers(min_value=1, max_value=5),
+                      read_size=strategies.integers(min_value=1, max_value=8192),
+                      write_size=strategies.integers(min_value=1, max_value=8192))
+    def test_read_write_size_variance(self, original, level, read_size, write_size):
+        cctx = zstd.ZstdCompressor(level=level)
+        frame = cctx.compress(original)
+
+        source = io.BytesIO(frame)
+        dest = io.BytesIO()
+
+        dctx = zstd.ZstdDecompressor()
+        dctx.copy_stream(source, dest, read_size=read_size, write_size=write_size)
+
+        self.assertEqual(dest.getvalue(), original)
+
+
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@make_cffi
+class TestDecompressor_decompressobj_fuzzing(unittest.TestCase):
+    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
+                      level=strategies.integers(min_value=1, max_value=5),
+                      chunk_sizes=strategies.streaming(
+                          strategies.integers(min_value=1, max_value=4096)))
+    def test_random_input_sizes(self, original, level, chunk_sizes):
+        chunk_sizes = iter(chunk_sizes)
+
+        cctx = zstd.ZstdCompressor(level=level)
+        frame = cctx.compress(original)
+
+        source = io.BytesIO(frame)
+
+        dctx = zstd.ZstdDecompressor()
+        dobj = dctx.decompressobj()
+
+        chunks = []
+        while True:
+            chunk = source.read(next(chunk_sizes))
+            if not chunk:
+                break
+
+            chunks.append(dobj.decompress(chunk))
+
+        self.assertEqual(b''.join(chunks), original)
+
+
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+@make_cffi
+class TestDecompressor_read_from_fuzzing(unittest.TestCase):
+    @hypothesis.given(original=strategies.sampled_from(random_input_data()),
+                      level=strategies.integers(min_value=1, max_value=5),
+                      read_size=strategies.integers(min_value=1, max_value=4096),
+                      write_size=strategies.integers(min_value=1, max_value=4096))
+    def test_read_write_size_variance(self, original, level, read_size, write_size):
+        cctx = zstd.ZstdCompressor(level=level)
+        frame = cctx.compress(original)
+
+        source = io.BytesIO(frame)
+
+        dctx = zstd.ZstdDecompressor()
+        chunks = list(dctx.read_from(source, read_size=read_size, write_size=write_size))
+
+        self.assertEqual(b''.join(chunks), original)
+
+
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
+class TestDecompressor_multi_decompress_to_buffer_fuzzing(unittest.TestCase):
+    @hypothesis.given(original=strategies.lists(strategies.sampled_from(random_input_data()),
+                                        min_size=1, max_size=1024),
+                threads=strategies.integers(min_value=1, max_value=8),
+                use_dict=strategies.booleans())
+    def test_data_equivalence(self, original, threads, use_dict):
+        kwargs = {}
+        if use_dict:
+            kwargs['dict_data'] = zstd.ZstdCompressionDict(original[0])
+
+        cctx = zstd.ZstdCompressor(level=1,
+                                   write_content_size=True,
+                                   write_checksum=True,
+                                   **kwargs)
+
+        frames_buffer = cctx.multi_compress_to_buffer(original, threads=-1)
+
+        dctx = zstd.ZstdDecompressor(**kwargs)
+
+        result = dctx.multi_decompress_to_buffer(frames_buffer)
+
+        self.assertEqual(len(result), len(original))
+        for i, frame in enumerate(result):
+            self.assertEqual(frame.tobytes(), original[i])
+
+        frames_list = [f.tobytes() for f in frames_buffer]
+        result = dctx.multi_decompress_to_buffer(frames_list)
+
+        self.assertEqual(len(result), len(original))
+        for i, frame in enumerate(result):
+            self.assertEqual(frame.tobytes(), original[i])
--- a/contrib/python-zstandard/tests/test_estimate_sizes.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/tests/test_estimate_sizes.py	Tue Apr 18 12:24:34 2017 -0400
@@ -5,7 +5,12 @@
 
 import zstd
 
+from . common import (
+    make_cffi,
+)
 
+
+@make_cffi
 class TestSizes(unittest.TestCase):
     def test_decompression_size(self):
         size = zstd.estimate_decompression_context_size()
--- a/contrib/python-zstandard/tests/test_module_attributes.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/tests/test_module_attributes.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,9 +7,15 @@
 
 import zstd
 
+from . common import (
+    make_cffi,
+)
+
+
+@make_cffi
 class TestModuleAttributes(unittest.TestCase):
     def test_version(self):
-        self.assertEqual(zstd.ZSTD_VERSION, (1, 1, 2))
+        self.assertEqual(zstd.ZSTD_VERSION, (1, 1, 3))
 
     def test_constants(self):
         self.assertEqual(zstd.MAX_COMPRESSION_LEVEL, 22)
@@ -45,4 +51,4 @@
         )
 
         for a in attrs:
-            self.assertTrue(hasattr(zstd, a))
+            self.assertTrue(hasattr(zstd, a), a)
--- a/contrib/python-zstandard/tests/test_roundtrip.py	Tue Apr 18 11:22:42 2017 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-import io
-
-try:
-    import unittest2 as unittest
-except ImportError:
-    import unittest
-
-try:
-    import hypothesis
-    import hypothesis.strategies as strategies
-except ImportError:
-    raise unittest.SkipTest('hypothesis not available')
-
-import zstd
-
-
-compression_levels = strategies.integers(min_value=1, max_value=22)
-
-
-class TestRoundTrip(unittest.TestCase):
-    @hypothesis.given(strategies.binary(), compression_levels)
-    def test_compress_write_to(self, data, level):
-        """Random data from compress() roundtrips via write_to."""
-        cctx = zstd.ZstdCompressor(level=level)
-        compressed = cctx.compress(data)
-
-        buffer = io.BytesIO()
-        dctx = zstd.ZstdDecompressor()
-        with dctx.write_to(buffer) as decompressor:
-            decompressor.write(compressed)
-
-        self.assertEqual(buffer.getvalue(), data)
-
-    @hypothesis.given(strategies.binary(), compression_levels)
-    def test_compressor_write_to_decompressor_write_to(self, data, level):
-        """Random data from compressor write_to roundtrips via write_to."""
-        compress_buffer = io.BytesIO()
-        decompressed_buffer = io.BytesIO()
-
-        cctx = zstd.ZstdCompressor(level=level)
-        with cctx.write_to(compress_buffer) as compressor:
-            compressor.write(data)
-
-        dctx = zstd.ZstdDecompressor()
-        with dctx.write_to(decompressed_buffer) as decompressor:
-            decompressor.write(compress_buffer.getvalue())
-
-        self.assertEqual(decompressed_buffer.getvalue(), data)
-
-    @hypothesis.given(strategies.binary(average_size=1048576))
-    @hypothesis.settings(perform_health_check=False)
-    def test_compressor_write_to_decompressor_write_to_larger(self, data):
-        compress_buffer = io.BytesIO()
-        decompressed_buffer = io.BytesIO()
-
-        cctx = zstd.ZstdCompressor(level=5)
-        with cctx.write_to(compress_buffer) as compressor:
-            compressor.write(data)
-
-        dctx = zstd.ZstdDecompressor()
-        with dctx.write_to(decompressed_buffer) as decompressor:
-            decompressor.write(compress_buffer.getvalue())
-
-        self.assertEqual(decompressed_buffer.getvalue(), data)
--- a/contrib/python-zstandard/tests/test_train_dictionary.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/tests/test_train_dictionary.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,6 +7,9 @@
 
 import zstd
 
+from . common import (
+    make_cffi,
+)
 
 if sys.version_info[0] >= 3:
     int_type = int
@@ -14,6 +17,7 @@
     int_type = long
 
 
+@make_cffi
 class TestTrainDictionary(unittest.TestCase):
     def test_no_args(self):
         with self.assertRaises(TypeError):
@@ -44,3 +48,63 @@
 
         data = d.as_bytes()
         self.assertEqual(data[0:4], b'\x37\xa4\x30\xec')
+
+    def test_set_dict_id(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'foobar' * 64)
+
+        d = zstd.train_dictionary(8192, samples, dict_id=42)
+        self.assertEqual(d.dict_id(), 42)
+
+
+@make_cffi
+class TestTrainCoverDictionary(unittest.TestCase):
+    def test_no_args(self):
+        with self.assertRaises(TypeError):
+            zstd.train_cover_dictionary()
+
+    def test_bad_args(self):
+        with self.assertRaises(TypeError):
+            zstd.train_cover_dictionary(8192, u'foo')
+
+        with self.assertRaises(ValueError):
+            zstd.train_cover_dictionary(8192, [u'foo'])
+
+    def test_basic(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'foobar' * 64)
+
+        d = zstd.train_cover_dictionary(8192, samples, k=64, d=16)
+        self.assertIsInstance(d.dict_id(), int_type)
+
+        data = d.as_bytes()
+        self.assertEqual(data[0:4], b'\x37\xa4\x30\xec')
+
+        self.assertEqual(d.k, 64)
+        self.assertEqual(d.d, 16)
+
+    def test_set_dict_id(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'foobar' * 64)
+
+        d = zstd.train_cover_dictionary(8192, samples, k=64, d=16,
+                                        dict_id=42)
+        self.assertEqual(d.dict_id(), 42)
+
+    def test_optimize(self):
+        samples = []
+        for i in range(128):
+            samples.append(b'foo' * 64)
+            samples.append(b'foobar' * 64)
+
+        d = zstd.train_cover_dictionary(8192, samples, optimize=True,
+                                        threads=-1, steps=1, d=16)
+
+        self.assertEqual(d.k, 16)
+        self.assertEqual(d.d, 16)
--- a/contrib/python-zstandard/zstd.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd.c	Tue Apr 18 12:24:34 2017 -0400
@@ -8,6 +8,14 @@
 
 /* A Python C extension for Zstandard. */
 
+#if defined(_WIN32)
+#define WIN32_LEAN_AND_MEAN
+#include <Windows.h>
+#elif defined(__APPLE__) || defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#endif
+
 #include "python-zstandard.h"
 
 PyObject *ZstdError;
@@ -34,6 +42,11 @@
 "Obtains a ``CompressionParameters`` instance from a compression level and\n"
 "optional input size and dictionary size");
 
+PyDoc_STRVAR(get_frame_parameters__doc__,
+"get_frame_parameters(data)\n"
+"\n"
+"Obtains a ``FrameParameters`` instance by parsing data.\n");
+
 PyDoc_STRVAR(train_dictionary__doc__,
 "train_dictionary(dict_size, samples)\n"
 "\n"
@@ -44,25 +57,42 @@
 "\n"
 "The raw dictionary content will be returned\n");
 
+PyDoc_STRVAR(train_cover_dictionary__doc__,
+"train_cover_dictionary(dict_size, samples, k=None, d=None, notifications=0, dict_id=0, level=0)\n"
+"\n"
+"Train a dictionary from sample data using the COVER algorithm.\n"
+"\n"
+"This behaves like ``train_dictionary()`` except a different algorithm is\n"
+"used to create the dictionary. The algorithm has 2 parameters: ``k`` and\n"
+"``d``. These control the *segment size* and *dmer size*. A reasonable range\n"
+"for ``k`` is ``[16, 2048+]``. A reasonable range for ``d`` is ``[6, 16]``.\n"
+"``d`` must be less than or equal to ``k``.\n"
+);
+
 static char zstd_doc[] = "Interface to zstandard";
 
 static PyMethodDef zstd_methods[] = {
+	/* TODO remove since it is a method on CompressionParameters. */
 	{ "estimate_compression_context_size", (PyCFunction)estimate_compression_context_size,
 	METH_VARARGS, estimate_compression_context_size__doc__ },
 	{ "estimate_decompression_context_size", (PyCFunction)estimate_decompression_context_size,
 	METH_NOARGS, estimate_decompression_context_size__doc__ },
 	{ "get_compression_parameters", (PyCFunction)get_compression_parameters,
 	METH_VARARGS, get_compression_parameters__doc__ },
+	{ "get_frame_parameters", (PyCFunction)get_frame_parameters,
+	METH_VARARGS, get_frame_parameters__doc__ },
 	{ "train_dictionary", (PyCFunction)train_dictionary,
 	METH_VARARGS | METH_KEYWORDS, train_dictionary__doc__ },
+	{ "train_cover_dictionary", (PyCFunction)train_cover_dictionary,
+	METH_VARARGS | METH_KEYWORDS, train_cover_dictionary__doc__ },
 	{ NULL, NULL }
 };
 
+void bufferutil_module_init(PyObject* mod);
 void compressobj_module_init(PyObject* mod);
 void compressor_module_init(PyObject* mod);
 void compressionparams_module_init(PyObject* mod);
 void constants_module_init(PyObject* mod);
-void dictparams_module_init(PyObject* mod);
 void compressiondict_module_init(PyObject* mod);
 void compressionwriter_module_init(PyObject* mod);
 void compressoriterator_module_init(PyObject* mod);
@@ -70,6 +100,7 @@
 void decompressobj_module_init(PyObject* mod);
 void decompressionwriter_module_init(PyObject* mod);
 void decompressoriterator_module_init(PyObject* mod);
+void frameparams_module_init(PyObject* mod);
 
 void zstd_module_init(PyObject* m) {
 	/* python-zstandard relies on unstable zstd C API features. This means
@@ -87,13 +118,13 @@
 	   We detect this mismatch here and refuse to load the module if this
 	   scenario is detected.
 	*/
-	if (ZSTD_VERSION_NUMBER != 10102 || ZSTD_versionNumber() != 10102) {
+	if (ZSTD_VERSION_NUMBER != 10103 || ZSTD_versionNumber() != 10103) {
 		PyErr_SetString(PyExc_ImportError, "zstd C API mismatch; Python bindings not compiled against expected zstd version");
 		return;
 	}
 
+	bufferutil_module_init(m);
 	compressionparams_module_init(m);
-	dictparams_module_init(m);
 	compressiondict_module_init(m);
 	compressobj_module_init(m);
 	compressor_module_init(m);
@@ -104,6 +135,7 @@
 	decompressobj_module_init(m);
 	decompressionwriter_module_init(m);
 	decompressoriterator_module_init(m);
+	frameparams_module_init(m);
 }
 
 #if PY_MAJOR_VERSION >= 3
@@ -134,3 +166,48 @@
 	}
 }
 #endif
+
+/* Attempt to resolve the number of CPUs in the system. */
+int cpu_count() {
+	int count = 0;
+
+#if defined(_WIN32)
+	SYSTEM_INFO si;
+	si.dwNumberOfProcessors = 0;
+	GetSystemInfo(&si);
+	count = si.dwNumberOfProcessors;
+#elif defined(__APPLE__)
+	int num;
+	size_t size = sizeof(int);
+
+	if (0 == sysctlbyname("hw.logicalcpu", &num, &size, NULL, 0)) {
+		count = num;
+	}
+#elif defined(__linux__)
+	count = sysconf(_SC_NPROCESSORS_ONLN);
+#elif defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
+	int mib[2];
+	size_t len = sizeof(count);
+	mib[0] = CTL_HW;
+	mib[1] = HW_NCPU;
+	if (0 != sysctl(mib, 2, &count, &len, NULL, 0)) {
+		count = 0;
+	}
+#elif defined(__hpux)
+	count = mpctl(MPC_GETNUMSPUS, NULL, NULL);
+#endif
+
+	return count;
+}
+
+size_t roundpow2(size_t i) {
+	i--;
+	i |= i >> 1;
+	i |= i >> 2;
+	i |= i >> 4;
+	i |= i >> 8;
+	i |= i >> 16;
+	i++;
+
+	return i;
+}
--- a/contrib/python-zstandard/zstd/common/mem.h	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd/common/mem.h	Tue Apr 18 12:24:34 2017 -0400
@@ -39,7 +39,7 @@
 #endif
 
 /* code only tested on 32 and 64 bits systems */
-#define MEM_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) }; }
+#define MEM_STATIC_ASSERT(c)   { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
 MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/pool.c	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,194 @@
+/**
+ * Copyright (c) 2016-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+
+/* ======   Dependencies   ======= */
+#include <stddef.h>  /* size_t */
+#include <stdlib.h>  /* malloc, calloc, free */
+#include "pool.h"
+
+/* ======   Compiler specifics   ====== */
+#if defined(_MSC_VER)
+#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */
+#endif
+
+
+#ifdef ZSTD_MULTITHREAD
+
+#include "threading.h"   /* pthread adaptation */
+
+/* A job is a function and an opaque argument */
+typedef struct POOL_job_s {
+  POOL_function function;
+  void *opaque;
+} POOL_job;
+
+struct POOL_ctx_s {
+    /* Keep track of the threads */
+    pthread_t *threads;
+    size_t numThreads;
+
+    /* The queue is a circular buffer */
+    POOL_job *queue;
+    size_t queueHead;
+    size_t queueTail;
+    size_t queueSize;
+    /* The mutex protects the queue */
+    pthread_mutex_t queueMutex;
+    /* Condition variable for pushers to wait on when the queue is full */
+    pthread_cond_t queuePushCond;
+    /* Condition variables for poppers to wait on when the queue is empty */
+    pthread_cond_t queuePopCond;
+    /* Indicates if the queue is shutting down */
+    int shutdown;
+};
+
+/* POOL_thread() :
+   Work thread for the thread pool.
+   Waits for jobs and executes them.
+   @returns : NULL on failure else non-null.
+*/
+static void* POOL_thread(void* opaque) {
+    POOL_ctx* const ctx = (POOL_ctx*)opaque;
+    if (!ctx) { return NULL; }
+    for (;;) {
+        /* Lock the mutex and wait for a non-empty queue or until shutdown */
+        pthread_mutex_lock(&ctx->queueMutex);
+        while (ctx->queueHead == ctx->queueTail && !ctx->shutdown) {
+            pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
+        }
+        /* empty => shutting down: so stop */
+        if (ctx->queueHead == ctx->queueTail) {
+            pthread_mutex_unlock(&ctx->queueMutex);
+            return opaque;
+        }
+        /* Pop a job off the queue */
+        {   POOL_job const job = ctx->queue[ctx->queueHead];
+            ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
+            /* Unlock the mutex, signal a pusher, and run the job */
+            pthread_mutex_unlock(&ctx->queueMutex);
+            pthread_cond_signal(&ctx->queuePushCond);
+            job.function(job.opaque);
+        }
+    }
+    /* Unreachable */
+}
+
+POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
+    POOL_ctx *ctx;
+    /* Check the parameters */
+    if (!numThreads || !queueSize) { return NULL; }
+    /* Allocate the context and zero initialize */
+    ctx = (POOL_ctx *)calloc(1, sizeof(POOL_ctx));
+    if (!ctx) { return NULL; }
+    /* Initialize the job queue.
+     * It needs one extra space since one space is wasted to differentiate empty
+     * and full queues.
+     */
+    ctx->queueSize = queueSize + 1;
+    ctx->queue = (POOL_job *)malloc(ctx->queueSize * sizeof(POOL_job));
+    ctx->queueHead = 0;
+    ctx->queueTail = 0;
+    pthread_mutex_init(&ctx->queueMutex, NULL);
+    pthread_cond_init(&ctx->queuePushCond, NULL);
+    pthread_cond_init(&ctx->queuePopCond, NULL);
+    ctx->shutdown = 0;
+    /* Allocate space for the thread handles */
+    ctx->threads = (pthread_t *)malloc(numThreads * sizeof(pthread_t));
+    ctx->numThreads = 0;
+    /* Check for errors */
+    if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
+    /* Initialize the threads */
+    {   size_t i;
+        for (i = 0; i < numThreads; ++i) {
+            if (pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
+                ctx->numThreads = i;
+                POOL_free(ctx);
+                return NULL;
+        }   }
+        ctx->numThreads = numThreads;
+    }
+    return ctx;
+}
+
+/*! POOL_join() :
+    Shutdown the queue, wake any sleeping threads, and join all of the threads.
+*/
+static void POOL_join(POOL_ctx *ctx) {
+    /* Shut down the queue */
+    pthread_mutex_lock(&ctx->queueMutex);
+    ctx->shutdown = 1;
+    pthread_mutex_unlock(&ctx->queueMutex);
+    /* Wake up sleeping threads */
+    pthread_cond_broadcast(&ctx->queuePushCond);
+    pthread_cond_broadcast(&ctx->queuePopCond);
+    /* Join all of the threads */
+    {   size_t i;
+        for (i = 0; i < ctx->numThreads; ++i) {
+            pthread_join(ctx->threads[i], NULL);
+    }   }
+}
+
+void POOL_free(POOL_ctx *ctx) {
+    if (!ctx) { return; }
+    POOL_join(ctx);
+    pthread_mutex_destroy(&ctx->queueMutex);
+    pthread_cond_destroy(&ctx->queuePushCond);
+    pthread_cond_destroy(&ctx->queuePopCond);
+    if (ctx->queue) free(ctx->queue);
+    if (ctx->threads) free(ctx->threads);
+    free(ctx);
+}
+
+void POOL_add(void *ctxVoid, POOL_function function, void *opaque) {
+    POOL_ctx *ctx = (POOL_ctx *)ctxVoid;
+    if (!ctx) { return; }
+
+    pthread_mutex_lock(&ctx->queueMutex);
+    {   POOL_job const job = {function, opaque};
+        /* Wait until there is space in the queue for the new job */
+        size_t newTail = (ctx->queueTail + 1) % ctx->queueSize;
+        while (ctx->queueHead == newTail && !ctx->shutdown) {
+          pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
+          newTail = (ctx->queueTail + 1) % ctx->queueSize;
+        }
+        /* The queue is still going => there is space */
+        if (!ctx->shutdown) {
+            ctx->queue[ctx->queueTail] = job;
+            ctx->queueTail = newTail;
+        }
+    }
+    pthread_mutex_unlock(&ctx->queueMutex);
+    pthread_cond_signal(&ctx->queuePopCond);
+}
+
+#else  /* ZSTD_MULTITHREAD  not defined */
+/* No multi-threading support */
+
+/* We don't need any data, but if it is empty malloc() might return NULL. */
+struct POOL_ctx_s {
+  int data;
+};
+
+POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) {
+  (void)numThreads;
+  (void)queueSize;
+  return (POOL_ctx *)malloc(sizeof(POOL_ctx));
+}
+
+void POOL_free(POOL_ctx *ctx) {
+  if (ctx) free(ctx);
+}
+
+void POOL_add(void *ctx, POOL_function function, void *opaque) {
+  (void)ctx;
+  function(opaque);
+}
+
+#endif  /* ZSTD_MULTITHREAD */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/pool.h	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,56 @@
+/**
+ * Copyright (c) 2016-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+#ifndef POOL_H
+#define POOL_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#include <stddef.h>   /* size_t */
+
+typedef struct POOL_ctx_s POOL_ctx;
+
+/*! POOL_create() :
+    Create a thread pool with at most `numThreads` threads.
+    `numThreads` must be at least 1.
+    The maximum number of queued jobs before blocking is `queueSize`.
+    `queueSize` must be at least 1.
+    @return : The POOL_ctx pointer on success else NULL.
+*/
+POOL_ctx *POOL_create(size_t numThreads, size_t queueSize);
+
+/*! POOL_free() :
+    Free a thread pool returned by POOL_create().
+*/
+void POOL_free(POOL_ctx *ctx);
+
+/*! POOL_function :
+    The function type that can be added to a thread pool.
+*/
+typedef void (*POOL_function)(void *);
+/*! POOL_add_function :
+    The function type for a generic thread pool add function.
+*/
+typedef void (*POOL_add_function)(void *, POOL_function, void *);
+
+/*! POOL_add() :
+    Add the job `function(opaque)` to the thread pool.
+    Possibly blocks until there is room in the queue.
+    Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed.
+*/
+void POOL_add(void *ctx, POOL_function function, void *opaque);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/threading.c	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,79 @@
+
+/**
+ * Copyright (c) 2016 Tino Reichardt
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ * You can contact the author at:
+ * - zstdmt source repository: https://github.com/mcmilk/zstdmt
+ */
+
+/**
+ * This file will hold wrapper for systems, which do not support pthreads
+ */
+
+/* ======   Compiler specifics   ====== */
+#if defined(_MSC_VER)
+#  pragma warning(disable : 4206)        /* disable: C4206: translation unit is empty (when ZSTD_MULTITHREAD is not defined) */
+#endif
+
+
+#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
+
+/**
+ * Windows minimalist Pthread Wrapper, based on :
+ * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
+ */
+
+
+/* ===  Dependencies  === */
+#include <process.h>
+#include <errno.h>
+#include "threading.h"
+
+
+/* ===  Implementation  === */
+
+static unsigned __stdcall worker(void *arg)
+{
+    pthread_t* const thread = (pthread_t*) arg;
+    thread->arg = thread->start_routine(thread->arg);
+    return 0;
+}
+
+int pthread_create(pthread_t* thread, const void* unused,
+            void* (*start_routine) (void*), void* arg)
+{
+    (void)unused;
+    thread->arg = arg;
+    thread->start_routine = start_routine;
+    thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL);
+
+    if (!thread->handle)
+        return errno;
+    else
+        return 0;
+}
+
+int _pthread_join(pthread_t * thread, void **value_ptr)
+{
+    DWORD result;
+
+    if (!thread->handle) return 0;
+
+    result = WaitForSingleObject(thread->handle, INFINITE);
+    switch (result) {
+    case WAIT_OBJECT_0:
+        if (value_ptr) *value_ptr = thread->arg;
+        return 0;
+    case WAIT_ABANDONED:
+        return EINVAL;
+    default:
+        return GetLastError();
+    }
+}
+
+#endif   /* ZSTD_MULTITHREAD */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/common/threading.h	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,104 @@
+
+/**
+ * Copyright (c) 2016 Tino Reichardt
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ *
+ * You can contact the author at:
+ * - zstdmt source repository: https://github.com/mcmilk/zstdmt
+ */
+
+#ifndef THREADING_H_938743
+#define THREADING_H_938743
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
+
+/**
+ * Windows minimalist Pthread Wrapper, based on :
+ * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
+ */
+#ifdef WINVER
+#  undef WINVER
+#endif
+#define WINVER       0x0600
+
+#ifdef _WIN32_WINNT
+#  undef _WIN32_WINNT
+#endif
+#define _WIN32_WINNT 0x0600
+
+#ifndef WIN32_LEAN_AND_MEAN
+#  define WIN32_LEAN_AND_MEAN
+#endif
+
+#include <windows.h>
+
+/* mutex */
+#define pthread_mutex_t           CRITICAL_SECTION
+#define pthread_mutex_init(a,b)   InitializeCriticalSection((a))
+#define pthread_mutex_destroy(a)  DeleteCriticalSection((a))
+#define pthread_mutex_lock(a)     EnterCriticalSection((a))
+#define pthread_mutex_unlock(a)   LeaveCriticalSection((a))
+
+/* condition variable */
+#define pthread_cond_t             CONDITION_VARIABLE
+#define pthread_cond_init(a, b)    InitializeConditionVariable((a))
+#define pthread_cond_destroy(a)    /* No delete */
+#define pthread_cond_wait(a, b)    SleepConditionVariableCS((a), (b), INFINITE)
+#define pthread_cond_signal(a)     WakeConditionVariable((a))
+#define pthread_cond_broadcast(a)  WakeAllConditionVariable((a))
+
+/* pthread_create() and pthread_join() */
+typedef struct {
+    HANDLE handle;
+    void* (*start_routine)(void*);
+    void* arg;
+} pthread_t;
+
+int pthread_create(pthread_t* thread, const void* unused,
+                   void* (*start_routine) (void*), void* arg);
+
+#define pthread_join(a, b) _pthread_join(&(a), (b))
+int _pthread_join(pthread_t* thread, void** value_ptr);
+
+/**
+ * add here more wrappers as required
+ */
+
+
+#elif defined(ZSTD_MULTITHREAD)   /* posix assumed ; need a better detection mathod */
+/* ===   POSIX Systems   === */
+#  include <pthread.h>
+
+#else  /* ZSTD_MULTITHREAD not defined */
+/* No multithreading support */
+
+#define pthread_mutex_t int   /* #define rather than typedef, as sometimes pthread support is implicit, resulting in duplicated symbols */
+#define pthread_mutex_init(a,b)
+#define pthread_mutex_destroy(a)
+#define pthread_mutex_lock(a)
+#define pthread_mutex_unlock(a)
+
+#define pthread_cond_t int
+#define pthread_cond_init(a,b)
+#define pthread_cond_destroy(a)
+#define pthread_cond_wait(a,b)
+#define pthread_cond_signal(a)
+#define pthread_cond_broadcast(a)
+
+/* do not use pthread_t */
+
+#endif /* ZSTD_MULTITHREAD */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* THREADING_H_938743 */
--- a/contrib/python-zstandard/zstd/common/zstd_common.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd/common/zstd_common.c	Tue Apr 18 12:24:34 2017 -0400
@@ -43,10 +43,6 @@
 *   provides error code string from enum */
 const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorName(code); }
 
-/* ---   ZBUFF Error Management  (deprecated)   --- */
-unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); }
-const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
-
 
 /*=**************************************************************
 *  Custom allocator
--- a/contrib/python-zstandard/zstd/common/zstd_errors.h	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd/common/zstd_errors.h	Tue Apr 18 12:24:34 2017 -0400
@@ -18,6 +18,20 @@
 #include <stddef.h>   /* size_t */
 
 
+/* =====   ZSTDERRORLIB_API : control library symbols visibility   ===== */
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#  define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
+#else
+#  define ZSTDERRORLIB_VISIBILITY
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+#  define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+#  define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+#  define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+#endif
+
 /*-****************************************
 *  error codes list
 ******************************************/
@@ -49,8 +63,8 @@
 /*! ZSTD_getErrorCode() :
     convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
     which can be used to compare directly with enum list published into "error_public.h" */
-ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
-const char* ZSTD_getErrorString(ZSTD_ErrorCode code);
+ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
+ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);
 
 
 #if defined (__cplusplus)
--- a/contrib/python-zstandard/zstd/common/zstd_internal.h	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd/common/zstd_internal.h	Tue Apr 18 12:24:34 2017 -0400
@@ -267,4 +267,13 @@
 }
 
 
+/* hidden functions */
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ *        do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);
+
+
 #endif   /* ZSTD_CCOMMON_H_MODULE */
--- a/contrib/python-zstandard/zstd/compress/zstd_compress.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_compress.c	Tue Apr 18 12:24:34 2017 -0400
@@ -51,8 +51,7 @@
 /*-*************************************
 *  Context memory management
 ***************************************/
-struct ZSTD_CCtx_s
-{
+struct ZSTD_CCtx_s {
     const BYTE* nextSrc;    /* next block here to continue on current prefix */
     const BYTE* base;       /* All regular indexes relative to this position */
     const BYTE* dictBase;   /* extDict indexes relative to this position */
@@ -61,10 +60,11 @@
     U32   nextToUpdate;     /* index from which to continue dictionary update */
     U32   nextToUpdate3;    /* index from which to continue dictionary update */
     U32   hashLog3;         /* dispatch table : larger == faster, more memory */
-    U32   loadedDictEnd;
+    U32   loadedDictEnd;    /* index of end of dictionary */
+    U32   forceWindow;      /* force back-references to respect limit of 1<<wLog, even for dictionary */
     ZSTD_compressionStage_e stage;
     U32   rep[ZSTD_REP_NUM];
-    U32   savedRep[ZSTD_REP_NUM];
+    U32   repToConfirm[ZSTD_REP_NUM];
     U32   dictID;
     ZSTD_parameters params;
     void* workSpace;
@@ -101,7 +101,7 @@
     cctx = (ZSTD_CCtx*) ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
     if (!cctx) return NULL;
     memset(cctx, 0, sizeof(ZSTD_CCtx));
-    memcpy(&(cctx->customMem), &customMem, sizeof(customMem));
+    cctx->customMem = customMem;
     return cctx;
 }
 
@@ -119,6 +119,15 @@
     return sizeof(*cctx) + cctx->workSpaceSize;
 }
 
+size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value)
+{
+    switch(param)
+    {
+    case ZSTD_p_forceWindow : cctx->forceWindow = value>0; cctx->loadedDictEnd = 0; return 0;
+    default: return ERROR(parameter_unknown);
+    }
+}
+
 const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx)   /* hidden interface */
 {
     return &(ctx->seqStore);
@@ -318,6 +327,14 @@
     }
 }
 
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ *        do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
+    int i;
+    for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = 0;
+}
 
 /*! ZSTD_copyCCtx() :
 *   Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
@@ -735,12 +752,19 @@
       if ((size_t)(op-ostart) >= maxCSize) return 0; }
 
     /* confirm repcodes */
-    { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->savedRep[i]; }
+    { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->repToConfirm[i]; }
 
     return op - ostart;
 }
 
 
+#if 0 /* for debug */
+#  define STORESEQ_DEBUG
+#include <stdio.h>   /* fprintf */
+U32 g_startDebug = 0;
+const BYTE* g_start = NULL;
+#endif
+
 /*! ZSTD_storeSeq() :
     Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
     `offsetCode` : distance to match, or 0 == repCode.
@@ -748,13 +772,14 @@
 */
 MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode)
 {
-#if 0  /* for debug */
-    static const BYTE* g_start = NULL;
-    const U32 pos = (U32)((const BYTE*)literals - g_start);
-    if (g_start==NULL) g_start = (const BYTE*)literals;
-    //if ((pos > 1) && (pos < 50000))
-        printf("Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
-               pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
+#ifdef STORESEQ_DEBUG
+    if (g_startDebug) {
+        const U32 pos = (U32)((const BYTE*)literals - g_start);
+        if (g_start==NULL) g_start = (const BYTE*)literals;
+        if ((pos > 1895000) && (pos < 1895300))
+            fprintf(stderr, "Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
+                   pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
+    }
 #endif
     /* copy Literals */
     ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
@@ -1004,8 +1029,8 @@
     }   }   }
 
     /* save reps for next block */
-    cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved;
-    cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved;
+    cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
+    cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -1119,7 +1144,7 @@
     }   }   }
 
     /* save reps for next block */
-    ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
+    ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -1273,8 +1298,8 @@
     }   }   }
 
     /* save reps for next block */
-    cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved;
-    cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved;
+    cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
+    cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -1423,7 +1448,7 @@
     }   }   }
 
     /* save reps for next block */
-    ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
+    ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -1955,8 +1980,8 @@
     }   }
 
     /* Save reps for next block */
-    ctx->savedRep[0] = offset_1 ? offset_1 : savedOffset;
-    ctx->savedRep[1] = offset_2 ? offset_2 : savedOffset;
+    ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
+    ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -2150,7 +2175,7 @@
     }   }
 
     /* Save reps for next block */
-    ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
+    ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2;
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -2409,12 +2434,14 @@
 
     cctx->nextSrc = ip + srcSize;
 
-    {   size_t const cSize = frame ?
+    if (srcSize) {
+        size_t const cSize = frame ?
                              ZSTD_compress_generic (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
                              ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
         if (ZSTD_isError(cSize)) return cSize;
         return cSize + fhSize;
-    }
+    } else
+        return fhSize;
 }
 
 
@@ -2450,7 +2477,7 @@
     zc->dictBase = zc->base;
     zc->base += ip - zc->nextSrc;
     zc->nextToUpdate = zc->dictLimit;
-    zc->loadedDictEnd = (U32)(iend - zc->base);
+    zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
 
     zc->nextSrc = iend;
     if (srcSize <= HASH_READ_SIZE) return 0;
@@ -2557,9 +2584,9 @@
     }
 
     if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
-    cctx->rep[0] = MEM_readLE32(dictPtr+0); if (cctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
-    cctx->rep[1] = MEM_readLE32(dictPtr+4); if (cctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
-    cctx->rep[2] = MEM_readLE32(dictPtr+8); if (cctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
+    cctx->rep[0] = MEM_readLE32(dictPtr+0); if (cctx->rep[0] == 0 || cctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
+    cctx->rep[1] = MEM_readLE32(dictPtr+4); if (cctx->rep[1] == 0 || cctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
+    cctx->rep[2] = MEM_readLE32(dictPtr+8); if (cctx->rep[2] == 0 || cctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
     dictPtr += 12;
 
     {   U32 offcodeMax = MaxOff;
@@ -2594,7 +2621,6 @@
     }
 }
 
-
 /*! ZSTD_compressBegin_internal() :
 *   @return : 0, or an error code */
 static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
@@ -2626,9 +2652,9 @@
 }
 
 
-size_t ZSTD_compressBegin(ZSTD_CCtx* zc, int compressionLevel)
+size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
 {
-    return ZSTD_compressBegin_usingDict(zc, NULL, 0, compressionLevel);
+    return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
 }
 
 
@@ -2733,7 +2759,8 @@
 /* =====  Dictionary API  ===== */
 
 struct ZSTD_CDict_s {
-    void* dictContent;
+    void* dictBuffer;
+    const void* dictContent;
     size_t dictContentSize;
     ZSTD_CCtx* refContext;
 };  /* typedef'd tp ZSTD_CDict within "zstd.h" */
@@ -2741,39 +2768,45 @@
 size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
 {
     if (cdict==NULL) return 0;   /* support sizeof on NULL */
-    return ZSTD_sizeof_CCtx(cdict->refContext) + cdict->dictContentSize;
+    return ZSTD_sizeof_CCtx(cdict->refContext) + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
 }
 
-ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, ZSTD_parameters params, ZSTD_customMem customMem)
+ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, unsigned byReference,
+                                      ZSTD_parameters params, ZSTD_customMem customMem)
 {
     if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
     if (!customMem.customAlloc || !customMem.customFree) return NULL;
 
     {   ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
-        void* const dictContent = ZSTD_malloc(dictSize, customMem);
         ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem);
 
-        if (!dictContent || !cdict || !cctx) {
-            ZSTD_free(dictContent, customMem);
+        if (!cdict || !cctx) {
             ZSTD_free(cdict, customMem);
             ZSTD_free(cctx, customMem);
             return NULL;
         }
 
-        if (dictSize) {
-            memcpy(dictContent, dict, dictSize);
+        if ((byReference) || (!dictBuffer) || (!dictSize)) {
+            cdict->dictBuffer = NULL;
+            cdict->dictContent = dictBuffer;
+        } else {
+            void* const internalBuffer = ZSTD_malloc(dictSize, customMem);
+            if (!internalBuffer) { ZSTD_free(cctx, customMem); ZSTD_free(cdict, customMem); return NULL; }
+            memcpy(internalBuffer, dictBuffer, dictSize);
+            cdict->dictBuffer = internalBuffer;
+            cdict->dictContent = internalBuffer;
         }
-        {   size_t const errorCode = ZSTD_compressBegin_advanced(cctx, dictContent, dictSize, params, 0);
+
+        {   size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
             if (ZSTD_isError(errorCode)) {
-                ZSTD_free(dictContent, customMem);
+                ZSTD_free(cdict->dictBuffer, customMem);
+                ZSTD_free(cctx, customMem);
                 ZSTD_free(cdict, customMem);
-                ZSTD_free(cctx, customMem);
                 return NULL;
         }   }
 
-        cdict->dictContent = dictContent;
+        cdict->refContext = cctx;
         cdict->dictContentSize = dictSize;
-        cdict->refContext = cctx;
         return cdict;
     }
 }
@@ -2783,7 +2816,15 @@
     ZSTD_customMem const allocator = { NULL, NULL, NULL };
     ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, dictSize);
     params.fParams.contentSizeFlag = 1;
-    return ZSTD_createCDict_advanced(dict, dictSize, params, allocator);
+    return ZSTD_createCDict_advanced(dict, dictSize, 0, params, allocator);
+}
+
+ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
+{
+    ZSTD_customMem const allocator = { NULL, NULL, NULL };
+    ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, dictSize);
+    params.fParams.contentSizeFlag = 1;
+    return ZSTD_createCDict_advanced(dict, dictSize, 1, params, allocator);
 }
 
 size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
@@ -2791,7 +2832,7 @@
     if (cdict==NULL) return 0;   /* support free on NULL */
     {   ZSTD_customMem const cMem = cdict->refContext->customMem;
         ZSTD_freeCCtx(cdict->refContext);
-        ZSTD_free(cdict->dictContent, cMem);
+        ZSTD_free(cdict->dictBuffer, cMem);
         ZSTD_free(cdict, cMem);
         return 0;
     }
@@ -2801,7 +2842,7 @@
     return ZSTD_getParamsFromCCtx(cdict->refContext);
 }
 
-size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, U64 pledgedSrcSize)
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize)
 {
     if (cdict->dictContentSize) CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
     else CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, cdict->refContext->params, pledgedSrcSize));
@@ -2900,7 +2941,7 @@
 
 size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
 {
-    if (zcs->inBuffSize==0) return ERROR(stage_wrong);   /* zcs has not been init at least once */
+    if (zcs->inBuffSize==0) return ERROR(stage_wrong);   /* zcs has not been init at least once => can't reset */
 
     if (zcs->cdict) CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
     else CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
@@ -2937,9 +2978,9 @@
         if (zcs->outBuff == NULL) return ERROR(memory_allocation);
     }
 
-    if (dict) {
+    if (dict && dictSize >= 8) {
         ZSTD_freeCDict(zcs->cdictLocal);
-        zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, params, zcs->customMem);
+        zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
         if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
         zcs->cdict = zcs->cdictLocal;
     } else zcs->cdict = NULL;
@@ -2956,6 +2997,7 @@
     ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
     size_t const initError =  ZSTD_initCStream_advanced(zcs, NULL, 0, params, 0);
     zcs->cdict = cdict;
+    zcs->cctx->dictID = params.fParams.noDictIDFlag ? 0 : cdict->refContext->dictID;
     return initError;
 }
 
@@ -2967,7 +3009,8 @@
 
 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize)
 {
-    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
+    ZSTD_parameters params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
+    if (pledgedSrcSize) params.fParams.contentSizeFlag = 1;
     return ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
 }
 
--- a/contrib/python-zstandard/zstd/compress/zstd_opt.h	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd/compress/zstd_opt.h	Tue Apr 18 12:24:34 2017 -0400
@@ -38,7 +38,7 @@
 
     ssPtr->cachedLiterals = NULL;
     ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
-    ssPtr->staticPrices = 0; 
+    ssPtr->staticPrices = 0;
 
     if (ssPtr->litLengthSum == 0) {
         if (srcSize <= 1024) ssPtr->staticPrices = 1;
@@ -56,7 +56,7 @@
 
         for (u=0; u<=MaxLit; u++) {
             ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV);
-            ssPtr->litSum += ssPtr->litFreq[u]; 
+            ssPtr->litSum += ssPtr->litFreq[u];
         }
         for (u=0; u<=MaxLL; u++)
             ssPtr->litLengthFreq[u] = 1;
@@ -634,7 +634,7 @@
     }    }   /* for (cur=0; cur < last_pos; ) */
 
     /* Save reps for next block */
-    { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->savedRep[i] = rep[i]; }
+    { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
 
     /* Last Literals */
     {   size_t const lastLLSize = iend - anchor;
@@ -825,7 +825,7 @@
 
             match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
 
-            if (match_num > 0 && matches[match_num-1].len > sufficient_len) {
+            if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) {
                 best_mlen = matches[match_num-1].len;
                 best_off = matches[match_num-1].off;
                 last_pos = cur + 1;
@@ -835,7 +835,7 @@
             /* set prices using matches at position = cur */
             for (u = 0; u < match_num; u++) {
                 mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
-                best_mlen = (cur + matches[u].len < ZSTD_OPT_NUM) ? matches[u].len : ZSTD_OPT_NUM - cur;
+                best_mlen = matches[u].len;
 
                 while (mlen <= best_mlen) {
                     if (opt[cur].mlen == 1) {
@@ -907,7 +907,7 @@
     }    }   /* for (cur=0; cur < last_pos; ) */
 
     /* Save reps for next block */
-    { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->savedRep[i] = rep[i]; }
+    { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
 
     /* Last Literals */
     {   size_t lastLLSize = iend - anchor;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/zstdmt_compress.c	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,740 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+
+/* ======   Tuning parameters   ====== */
+#define ZSTDMT_NBTHREADS_MAX 128
+
+
+/* ======   Compiler specifics   ====== */
+#if defined(_MSC_VER)
+#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */
+#endif
+
+
+/* ======   Dependencies   ====== */
+#include <stdlib.h>   /* malloc */
+#include <string.h>   /* memcpy */
+#include "pool.h"     /* threadpool */
+#include "threading.h"  /* mutex */
+#include "zstd_internal.h"   /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
+#include "zstdmt_compress.h"
+#define XXH_STATIC_LINKING_ONLY   /* XXH64_state_t */
+#include "xxhash.h"
+
+
+/* ======   Debug   ====== */
+#if 0
+
+#  include <stdio.h>
+#  include <unistd.h>
+#  include <sys/times.h>
+   static unsigned g_debugLevel = 3;
+#  define DEBUGLOGRAW(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __VA_ARGS__); }
+#  define DEBUGLOG(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __FILE__ ": "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " \n"); }
+
+#  define DEBUG_PRINTHEX(l,p,n) { \
+    unsigned debug_u;                   \
+    for (debug_u=0; debug_u<(n); debug_u++)           \
+        DEBUGLOGRAW(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
+    DEBUGLOGRAW(l, " \n");       \
+}
+
+static unsigned long long GetCurrentClockTimeMicroseconds()
+{
+   static clock_t _ticksPerSecond = 0;
+   if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
+
+   struct tms junk; clock_t newTicks = (clock_t) times(&junk);
+   return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
+}
+
+#define MUTEX_WAIT_TIME_DLEVEL 5
+#define PTHREAD_MUTEX_LOCK(mutex) \
+if (g_debugLevel>=MUTEX_WAIT_TIME_DLEVEL) { \
+   unsigned long long beforeTime = GetCurrentClockTimeMicroseconds(); \
+   pthread_mutex_lock(mutex); \
+   unsigned long long afterTime = GetCurrentClockTimeMicroseconds(); \
+   unsigned long long elapsedTime = (afterTime-beforeTime); \
+   if (elapsedTime > 1000) {  /* or whatever threshold you like; I'm using 1 millisecond here */ \
+      DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
+               elapsedTime, #mutex); \
+  } \
+} else pthread_mutex_lock(mutex);
+
+#else
+
+#  define DEBUGLOG(l, ...)      {}    /* disabled */
+#  define PTHREAD_MUTEX_LOCK(m) pthread_mutex_lock(m)
+#  define DEBUG_PRINTHEX(l,p,n) {}
+
+#endif
+
+
+/* =====   Buffer Pool   ===== */
+
+typedef struct buffer_s {
+    void* start;
+    size_t size;
+} buffer_t;
+
+static const buffer_t g_nullBuffer = { NULL, 0 };
+
+typedef struct ZSTDMT_bufferPool_s {
+    unsigned totalBuffers;
+    unsigned nbBuffers;
+    buffer_t bTable[1];   /* variable size */
+} ZSTDMT_bufferPool;
+
+static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbThreads)
+{
+    unsigned const maxNbBuffers = 2*nbThreads + 2;
+    ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)calloc(1, sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t));
+    if (bufPool==NULL) return NULL;
+    bufPool->totalBuffers = maxNbBuffers;
+    bufPool->nbBuffers = 0;
+    return bufPool;
+}
+
+static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
+{
+    unsigned u;
+    if (!bufPool) return;   /* compatibility with free on NULL */
+    for (u=0; u<bufPool->totalBuffers; u++)
+        free(bufPool->bTable[u].start);
+    free(bufPool);
+}
+
+/* assumption : invocation from main thread only ! */
+static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* pool, size_t bSize)
+{
+    if (pool->nbBuffers) {   /* try to use an existing buffer */
+        buffer_t const buf = pool->bTable[--(pool->nbBuffers)];
+        size_t const availBufferSize = buf.size;
+        if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize))   /* large enough, but not too much */
+            return buf;
+        free(buf.start);   /* size conditions not respected : scratch this buffer and create a new one */
+    }
+    /* create new buffer */
+    {   buffer_t buffer;
+        void* const start = malloc(bSize);
+        if (start==NULL) bSize = 0;
+        buffer.start = start;   /* note : start can be NULL if malloc fails ! */
+        buffer.size = bSize;
+        return buffer;
+    }
+}
+
+/* store buffer for later re-use, up to pool capacity */
+static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* pool, buffer_t buf)
+{
+    if (buf.start == NULL) return;   /* release on NULL */
+    if (pool->nbBuffers < pool->totalBuffers) {
+        pool->bTable[pool->nbBuffers++] = buf;   /* store for later re-use */
+        return;
+    }
+    /* Reached bufferPool capacity (should not happen) */
+    free(buf.start);
+}
+
+
+/* =====   CCtx Pool   ===== */
+
+typedef struct {
+    unsigned totalCCtx;
+    unsigned availCCtx;
+    ZSTD_CCtx* cctx[1];   /* variable size */
+} ZSTDMT_CCtxPool;
+
+/* assumption : CCtxPool invocation only from main thread */
+
+/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
+static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
+{
+    unsigned u;
+    for (u=0; u<pool->totalCCtx; u++)
+        ZSTD_freeCCtx(pool->cctx[u]);  /* note : compatible with free on NULL */
+    free(pool);
+}
+
+/* ZSTDMT_createCCtxPool() :
+ * implies nbThreads >= 1 , checked by caller ZSTDMT_createCCtx() */
+static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads)
+{
+    ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) calloc(1, sizeof(ZSTDMT_CCtxPool) + (nbThreads-1)*sizeof(ZSTD_CCtx*));
+    if (!cctxPool) return NULL;
+    cctxPool->totalCCtx = nbThreads;
+    cctxPool->availCCtx = 1;   /* at least one cctx for single-thread mode */
+    cctxPool->cctx[0] = ZSTD_createCCtx();
+    if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
+    DEBUGLOG(1, "cctxPool created, with %u threads", nbThreads);
+    return cctxPool;
+}
+
+static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* pool)
+{
+    if (pool->availCCtx) {
+        pool->availCCtx--;
+        return pool->cctx[pool->availCCtx];
+    }
+    return ZSTD_createCCtx();   /* note : can be NULL, when creation fails ! */
+}
+
+static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
+{
+    if (cctx==NULL) return;   /* compatibility with release on NULL */
+    if (pool->availCCtx < pool->totalCCtx)
+        pool->cctx[pool->availCCtx++] = cctx;
+    else
+        /* pool overflow : should not happen, since totalCCtx==nbThreads */
+        ZSTD_freeCCtx(cctx);
+}
+
+
+/* =====   Thread worker   ===== */
+
+typedef struct {
+    buffer_t buffer;
+    size_t filled;
+} inBuff_t;
+
+typedef struct {
+    ZSTD_CCtx* cctx;
+    buffer_t src;
+    const void* srcStart;
+    size_t   srcSize;
+    size_t   dictSize;
+    buffer_t dstBuff;
+    size_t   cSize;
+    size_t   dstFlushed;
+    unsigned firstChunk;
+    unsigned lastChunk;
+    unsigned jobCompleted;
+    unsigned jobScanned;
+    pthread_mutex_t* jobCompleted_mutex;
+    pthread_cond_t* jobCompleted_cond;
+    ZSTD_parameters params;
+    ZSTD_CDict* cdict;
+    unsigned long long fullFrameSize;
+} ZSTDMT_jobDescription;
+
+/* ZSTDMT_compressChunk() : POOL_function type */
+void ZSTDMT_compressChunk(void* jobDescription)
+{
+    ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
+    const void* const src = (const char*)job->srcStart + job->dictSize;
+    buffer_t const dstBuff = job->dstBuff;
+    DEBUGLOG(3, "job (first:%u) (last:%u) : dictSize %u, srcSize %u", job->firstChunk, job->lastChunk, (U32)job->dictSize, (U32)job->srcSize);
+    if (job->cdict) {
+        size_t const initError = ZSTD_compressBegin_usingCDict(job->cctx, job->cdict, job->fullFrameSize);
+        if (job->cdict) DEBUGLOG(3, "using CDict ");
+        if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
+    } else {
+        size_t const initError = ZSTD_compressBegin_advanced(job->cctx, job->srcStart, job->dictSize, job->params, job->fullFrameSize);
+        if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
+        ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceWindow, 1);
+    }
+    if (!job->firstChunk) {  /* flush frame header */
+        size_t const hSize = ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, 0);
+        if (ZSTD_isError(hSize)) { job->cSize = hSize; goto _endJob; }
+        ZSTD_invalidateRepCodes(job->cctx);
+    }
+
+    DEBUGLOG(4, "Compressing : ");
+    DEBUG_PRINTHEX(4, job->srcStart, 12);
+    job->cSize = (job->lastChunk) ?   /* last chunk signal */
+                 ZSTD_compressEnd     (job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize) :
+                 ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize);
+    DEBUGLOG(3, "compressed %u bytes into %u bytes   (first:%u) (last:%u)", (unsigned)job->srcSize, (unsigned)job->cSize, job->firstChunk, job->lastChunk);
+
+_endJob:
+    PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex);
+    job->jobCompleted = 1;
+    job->jobScanned = 0;
+    pthread_cond_signal(job->jobCompleted_cond);
+    pthread_mutex_unlock(job->jobCompleted_mutex);
+}
+
+
+/* ------------------------------------------ */
+/* =====   Multi-threaded compression   ===== */
+/* ------------------------------------------ */
+
+struct ZSTDMT_CCtx_s {
+    POOL_ctx* factory;
+    ZSTDMT_bufferPool* buffPool;
+    ZSTDMT_CCtxPool* cctxPool;
+    pthread_mutex_t jobCompleted_mutex;
+    pthread_cond_t jobCompleted_cond;
+    size_t targetSectionSize;
+    size_t marginSize;
+    size_t inBuffSize;
+    size_t dictSize;
+    size_t targetDictSize;
+    inBuff_t inBuff;
+    ZSTD_parameters params;
+    XXH64_state_t xxhState;
+    unsigned nbThreads;
+    unsigned jobIDMask;
+    unsigned doneJobID;
+    unsigned nextJobID;
+    unsigned frameEnded;
+    unsigned allJobsCompleted;
+    unsigned overlapRLog;
+    unsigned long long frameContentSize;
+    size_t sectionSize;
+    ZSTD_CDict* cdict;
+    ZSTD_CStream* cstream;
+    ZSTDMT_jobDescription jobs[1];   /* variable size (must lies at the end) */
+};
+
+ZSTDMT_CCtx *ZSTDMT_createCCtx(unsigned nbThreads)
+{
+    ZSTDMT_CCtx* cctx;
+    U32 const minNbJobs = nbThreads + 2;
+    U32 const nbJobsLog2 = ZSTD_highbit32(minNbJobs) + 1;
+    U32 const nbJobs = 1 << nbJobsLog2;
+    DEBUGLOG(5, "nbThreads : %u  ; minNbJobs : %u ;  nbJobsLog2 : %u ;  nbJobs : %u  \n",
+            nbThreads, minNbJobs, nbJobsLog2, nbJobs);
+    if ((nbThreads < 1) | (nbThreads > ZSTDMT_NBTHREADS_MAX)) return NULL;
+    cctx = (ZSTDMT_CCtx*) calloc(1, sizeof(ZSTDMT_CCtx) + nbJobs*sizeof(ZSTDMT_jobDescription));
+    if (!cctx) return NULL;
+    cctx->nbThreads = nbThreads;
+    cctx->jobIDMask = nbJobs - 1;
+    cctx->allJobsCompleted = 1;
+    cctx->sectionSize = 0;
+    cctx->overlapRLog = 3;
+    cctx->factory = POOL_create(nbThreads, 1);
+    cctx->buffPool = ZSTDMT_createBufferPool(nbThreads);
+    cctx->cctxPool = ZSTDMT_createCCtxPool(nbThreads);
+    if (!cctx->factory | !cctx->buffPool | !cctx->cctxPool) {  /* one object was not created */
+        ZSTDMT_freeCCtx(cctx);
+        return NULL;
+    }
+    if (nbThreads==1) {
+        cctx->cstream = ZSTD_createCStream();
+        if (!cctx->cstream) {
+            ZSTDMT_freeCCtx(cctx); return NULL;
+    }   }
+    pthread_mutex_init(&cctx->jobCompleted_mutex, NULL);   /* Todo : check init function return */
+    pthread_cond_init(&cctx->jobCompleted_cond, NULL);
+    DEBUGLOG(4, "mt_cctx created, for %u threads \n", nbThreads);
+    return cctx;
+}
+
+/* ZSTDMT_releaseAllJobResources() :
+ * Ensure all workers are killed first. */
+static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
+{
+    unsigned jobID;
+    for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
+        ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].dstBuff);
+        mtctx->jobs[jobID].dstBuff = g_nullBuffer;
+        ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].src);
+        mtctx->jobs[jobID].src = g_nullBuffer;
+        ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[jobID].cctx);
+        mtctx->jobs[jobID].cctx = NULL;
+    }
+    memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
+    ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->inBuff.buffer);
+    mtctx->inBuff.buffer = g_nullBuffer;
+    mtctx->allJobsCompleted = 1;
+}
+
+size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
+{
+    if (mtctx==NULL) return 0;   /* compatible with free on NULL */
+    POOL_free(mtctx->factory);
+    if (!mtctx->allJobsCompleted) ZSTDMT_releaseAllJobResources(mtctx); /* stop workers first */
+    ZSTDMT_freeBufferPool(mtctx->buffPool);  /* release job resources into pools first */
+    ZSTDMT_freeCCtxPool(mtctx->cctxPool);
+    ZSTD_freeCDict(mtctx->cdict);
+    ZSTD_freeCStream(mtctx->cstream);
+    pthread_mutex_destroy(&mtctx->jobCompleted_mutex);
+    pthread_cond_destroy(&mtctx->jobCompleted_cond);
+    free(mtctx);
+    return 0;
+}
+
+size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value)
+{
+    switch(parameter)
+    {
+    case ZSTDMT_p_sectionSize :
+        mtctx->sectionSize = value;
+        return 0;
+    case ZSTDMT_p_overlapSectionLog :
+    DEBUGLOG(4, "ZSTDMT_p_overlapSectionLog : %u", value);
+        mtctx->overlapRLog = (value >= 9) ? 0 : 9 - value;
+        return 0;
+    default :
+        return ERROR(compressionParameter_unsupported);
+    }
+}
+
+
+/* ------------------------------------------ */
+/* =====   Multi-threaded compression   ===== */
+/* ------------------------------------------ */
+
+size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
+                           void* dst, size_t dstCapacity,
+                     const void* src, size_t srcSize,
+                           int compressionLevel)
+{
+    ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
+    size_t const chunkTargetSize = (size_t)1 << (params.cParams.windowLog + 2);
+    unsigned const nbChunksMax = (unsigned)(srcSize / chunkTargetSize) + (srcSize < chunkTargetSize) /* min 1 */;
+    unsigned nbChunks = MIN(nbChunksMax, mtctx->nbThreads);
+    size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks;
+    size_t const avgChunkSize = ((proposedChunkSize & 0x1FFFF) < 0xFFFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize;   /* avoid too small last block */
+    size_t remainingSrcSize = srcSize;
+    const char* const srcStart = (const char*)src;
+    size_t frameStartPos = 0;
+
+    DEBUGLOG(3, "windowLog : %2u => chunkTargetSize : %u bytes  ", params.cParams.windowLog, (U32)chunkTargetSize);
+    DEBUGLOG(2, "nbChunks  : %2u   (chunkSize : %u bytes)   ", nbChunks, (U32)avgChunkSize);
+    params.fParams.contentSizeFlag = 1;
+
+    if (nbChunks==1) {   /* fallback to single-thread mode */
+        ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
+        return ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
+    }
+
+    {   unsigned u;
+        for (u=0; u<nbChunks; u++) {
+            size_t const chunkSize = MIN(remainingSrcSize, avgChunkSize);
+            size_t const dstBufferCapacity = u ? ZSTD_compressBound(chunkSize) : dstCapacity;
+            buffer_t const dstAsBuffer = { dst, dstCapacity };
+            buffer_t const dstBuffer = u ? ZSTDMT_getBuffer(mtctx->buffPool, dstBufferCapacity) : dstAsBuffer;
+            ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(mtctx->cctxPool);
+
+            if ((cctx==NULL) || (dstBuffer.start==NULL)) {
+                mtctx->jobs[u].cSize = ERROR(memory_allocation);   /* job result */
+                mtctx->jobs[u].jobCompleted = 1;
+                nbChunks = u+1;
+                break;   /* let's wait for previous jobs to complete, but don't start new ones */
+            }
+
+            mtctx->jobs[u].srcStart = srcStart + frameStartPos;
+            mtctx->jobs[u].srcSize = chunkSize;
+            mtctx->jobs[u].fullFrameSize = srcSize;
+            mtctx->jobs[u].params = params;
+            mtctx->jobs[u].dstBuff = dstBuffer;
+            mtctx->jobs[u].cctx = cctx;
+            mtctx->jobs[u].firstChunk = (u==0);
+            mtctx->jobs[u].lastChunk = (u==nbChunks-1);
+            mtctx->jobs[u].jobCompleted = 0;
+            mtctx->jobs[u].jobCompleted_mutex = &mtctx->jobCompleted_mutex;
+            mtctx->jobs[u].jobCompleted_cond = &mtctx->jobCompleted_cond;
+
+            DEBUGLOG(3, "posting job %u   (%u bytes)", u, (U32)chunkSize);
+            DEBUG_PRINTHEX(3, mtctx->jobs[u].srcStart, 12);
+            POOL_add(mtctx->factory, ZSTDMT_compressChunk, &mtctx->jobs[u]);
+
+            frameStartPos += chunkSize;
+            remainingSrcSize -= chunkSize;
+    }   }
+    /* note : since nbChunks <= nbThreads, all jobs should be running immediately in parallel */
+
+    {   unsigned chunkID;
+        size_t error = 0, dstPos = 0;
+        for (chunkID=0; chunkID<nbChunks; chunkID++) {
+            DEBUGLOG(3, "waiting for chunk %u ", chunkID);
+            PTHREAD_MUTEX_LOCK(&mtctx->jobCompleted_mutex);
+            while (mtctx->jobs[chunkID].jobCompleted==0) {
+                DEBUGLOG(4, "waiting for jobCompleted signal from chunk %u", chunkID);
+                pthread_cond_wait(&mtctx->jobCompleted_cond, &mtctx->jobCompleted_mutex);
+            }
+            pthread_mutex_unlock(&mtctx->jobCompleted_mutex);
+            DEBUGLOG(3, "ready to write chunk %u ", chunkID);
+
+            ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[chunkID].cctx);
+            mtctx->jobs[chunkID].cctx = NULL;
+            mtctx->jobs[chunkID].srcStart = NULL;
+            {   size_t const cSize = mtctx->jobs[chunkID].cSize;
+                if (ZSTD_isError(cSize)) error = cSize;
+                if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
+                if (chunkID) {   /* note : chunk 0 is already written directly into dst */
+                    if (!error) memcpy((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize);
+                    ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[chunkID].dstBuff);
+                    mtctx->jobs[chunkID].dstBuff = g_nullBuffer;
+                }
+                dstPos += cSize ;
+            }
+        }
+        if (!error) DEBUGLOG(3, "compressed size : %u  ", (U32)dstPos);
+        return error ? error : dstPos;
+    }
+
+}
+
+
+/* ====================================== */
+/* =======      Streaming API     ======= */
+/* ====================================== */
+
+static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* zcs) {
+    while (zcs->doneJobID < zcs->nextJobID) {
+        unsigned const jobID = zcs->doneJobID & zcs->jobIDMask;
+        PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex);
+        while (zcs->jobs[jobID].jobCompleted==0) {
+            DEBUGLOG(4, "waiting for jobCompleted signal from chunk %u", zcs->doneJobID);   /* we want to block when waiting for data to flush */
+            pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex);
+        }
+        pthread_mutex_unlock(&zcs->jobCompleted_mutex);
+        zcs->doneJobID++;
+    }
+}
+
+
+static size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
+                                    const void* dict, size_t dictSize, unsigned updateDict,
+                                    ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+    ZSTD_customMem const cmem = { NULL, NULL, NULL };
+    DEBUGLOG(3, "Started new compression, with windowLog : %u", params.cParams.windowLog);
+    if (zcs->nbThreads==1) return ZSTD_initCStream_advanced(zcs->cstream, dict, dictSize, params, pledgedSrcSize);
+    if (zcs->allJobsCompleted == 0) {   /* previous job not correctly finished */
+        ZSTDMT_waitForAllJobsCompleted(zcs);
+        ZSTDMT_releaseAllJobResources(zcs);
+        zcs->allJobsCompleted = 1;
+    }
+    zcs->params = params;
+    if (updateDict) {
+        ZSTD_freeCDict(zcs->cdict); zcs->cdict = NULL;
+        if (dict && dictSize) {
+            zcs->cdict = ZSTD_createCDict_advanced(dict, dictSize, 0, params, cmem);
+            if (zcs->cdict == NULL) return ERROR(memory_allocation);
+    }   }
+    zcs->frameContentSize = pledgedSrcSize;
+    zcs->targetDictSize = (zcs->overlapRLog>=9) ? 0 : (size_t)1 << (zcs->params.cParams.windowLog - zcs->overlapRLog);
+    DEBUGLOG(4, "overlapRLog : %u ", zcs->overlapRLog);
+    DEBUGLOG(3, "overlap Size : %u KB", (U32)(zcs->targetDictSize>>10));
+    zcs->targetSectionSize = zcs->sectionSize ? zcs->sectionSize : (size_t)1 << (zcs->params.cParams.windowLog + 2);
+    zcs->targetSectionSize = MAX(ZSTDMT_SECTION_SIZE_MIN, zcs->targetSectionSize);
+    zcs->targetSectionSize = MAX(zcs->targetDictSize, zcs->targetSectionSize);
+    DEBUGLOG(3, "Section Size : %u KB", (U32)(zcs->targetSectionSize>>10));
+    zcs->marginSize = zcs->targetSectionSize >> 2;
+    zcs->inBuffSize = zcs->targetDictSize + zcs->targetSectionSize + zcs->marginSize;
+    zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize);
+    if (zcs->inBuff.buffer.start == NULL) return ERROR(memory_allocation);
+    zcs->inBuff.filled = 0;
+    zcs->dictSize = 0;
+    zcs->doneJobID = 0;
+    zcs->nextJobID = 0;
+    zcs->frameEnded = 0;
+    zcs->allJobsCompleted = 0;
+    if (params.fParams.checksumFlag) XXH64_reset(&zcs->xxhState, 0);
+    return 0;
+}
+
+size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* zcs,
+                                const void* dict, size_t dictSize,
+                                ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+    return ZSTDMT_initCStream_internal(zcs, dict, dictSize, 1, params, pledgedSrcSize);
+}
+
+/* ZSTDMT_resetCStream() :
+ * pledgedSrcSize is optional and can be zero == unknown */
+size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* zcs, unsigned long long pledgedSrcSize)
+{
+    if (zcs->nbThreads==1) return ZSTD_resetCStream(zcs->cstream, pledgedSrcSize);
+    return ZSTDMT_initCStream_internal(zcs, NULL, 0, 0, zcs->params, pledgedSrcSize);
+}
+
+size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) {
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0);
+    return ZSTDMT_initCStream_internal(zcs, NULL, 0, 1, params, 0);
+}
+
+
+static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsigned endFrame)
+{
+    size_t const dstBufferCapacity = ZSTD_compressBound(srcSize);
+    buffer_t const dstBuffer = ZSTDMT_getBuffer(zcs->buffPool, dstBufferCapacity);
+    ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(zcs->cctxPool);
+    unsigned const jobID = zcs->nextJobID & zcs->jobIDMask;
+
+    if ((cctx==NULL) || (dstBuffer.start==NULL)) {
+        zcs->jobs[jobID].jobCompleted = 1;
+        zcs->nextJobID++;
+        ZSTDMT_waitForAllJobsCompleted(zcs);
+        ZSTDMT_releaseAllJobResources(zcs);
+        return ERROR(memory_allocation);
+    }
+
+    DEBUGLOG(4, "preparing job %u to compress %u bytes with %u preload ", zcs->nextJobID, (U32)srcSize, (U32)zcs->dictSize);
+    zcs->jobs[jobID].src = zcs->inBuff.buffer;
+    zcs->jobs[jobID].srcStart = zcs->inBuff.buffer.start;
+    zcs->jobs[jobID].srcSize = srcSize;
+    zcs->jobs[jobID].dictSize = zcs->dictSize;   /* note : zcs->inBuff.filled is presumed >= srcSize + dictSize */
+    zcs->jobs[jobID].params = zcs->params;
+    if (zcs->nextJobID) zcs->jobs[jobID].params.fParams.checksumFlag = 0;  /* do not calculate checksum within sections, just keep it in header for first section */
+    zcs->jobs[jobID].cdict = zcs->nextJobID==0 ? zcs->cdict : NULL;
+    zcs->jobs[jobID].fullFrameSize = zcs->frameContentSize;
+    zcs->jobs[jobID].dstBuff = dstBuffer;
+    zcs->jobs[jobID].cctx = cctx;
+    zcs->jobs[jobID].firstChunk = (zcs->nextJobID==0);
+    zcs->jobs[jobID].lastChunk = endFrame;
+    zcs->jobs[jobID].jobCompleted = 0;
+    zcs->jobs[jobID].dstFlushed = 0;
+    zcs->jobs[jobID].jobCompleted_mutex = &zcs->jobCompleted_mutex;
+    zcs->jobs[jobID].jobCompleted_cond = &zcs->jobCompleted_cond;
+
+    /* get a new buffer for next input */
+    if (!endFrame) {
+        size_t const newDictSize = MIN(srcSize + zcs->dictSize, zcs->targetDictSize);
+        zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize);
+        if (zcs->inBuff.buffer.start == NULL) {   /* not enough memory to allocate next input buffer */
+            zcs->jobs[jobID].jobCompleted = 1;
+            zcs->nextJobID++;
+            ZSTDMT_waitForAllJobsCompleted(zcs);
+            ZSTDMT_releaseAllJobResources(zcs);
+            return ERROR(memory_allocation);
+        }
+        DEBUGLOG(5, "inBuff filled to %u", (U32)zcs->inBuff.filled);
+        zcs->inBuff.filled -= srcSize + zcs->dictSize - newDictSize;
+        DEBUGLOG(5, "new job : filled to %u, with %u dict and %u src", (U32)zcs->inBuff.filled, (U32)newDictSize, (U32)(zcs->inBuff.filled - newDictSize));
+        memmove(zcs->inBuff.buffer.start, (const char*)zcs->jobs[jobID].srcStart + zcs->dictSize + srcSize - newDictSize, zcs->inBuff.filled);
+        DEBUGLOG(5, "new inBuff pre-filled");
+        zcs->dictSize = newDictSize;
+    } else {
+        zcs->inBuff.buffer = g_nullBuffer;
+        zcs->inBuff.filled = 0;
+        zcs->dictSize = 0;
+        zcs->frameEnded = 1;
+        if (zcs->nextJobID == 0)
+            zcs->params.fParams.checksumFlag = 0;   /* single chunk : checksum is calculated directly within worker thread */
+    }
+
+    DEBUGLOG(3, "posting job %u : %u bytes  (end:%u) (note : doneJob = %u=>%u)", zcs->nextJobID, (U32)zcs->jobs[jobID].srcSize, zcs->jobs[jobID].lastChunk, zcs->doneJobID, zcs->doneJobID & zcs->jobIDMask);
+    POOL_add(zcs->factory, ZSTDMT_compressChunk, &zcs->jobs[jobID]);   /* this call is blocking when thread worker pool is exhausted */
+    zcs->nextJobID++;
+    return 0;
+}
+
+
+/* ZSTDMT_flushNextJob() :
+ * output : will be updated with amount of data flushed .
+ * blockToFlush : if >0, the function will block and wait if there is no data available to flush .
+ * @return : amount of data remaining within internal buffer, 1 if unknown but > 0, 0 if no more, or an error code */
+static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned blockToFlush)
+{
+    unsigned const wJobID = zcs->doneJobID & zcs->jobIDMask;
+    if (zcs->doneJobID == zcs->nextJobID) return 0;   /* all flushed ! */
+    PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex);
+    while (zcs->jobs[wJobID].jobCompleted==0) {
+        DEBUGLOG(5, "waiting for jobCompleted signal from job %u", zcs->doneJobID);
+        if (!blockToFlush) { pthread_mutex_unlock(&zcs->jobCompleted_mutex); return 0; }  /* nothing ready to be flushed => skip */
+        pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex);  /* block when nothing available to flush */
+    }
+    pthread_mutex_unlock(&zcs->jobCompleted_mutex);
+    /* compression job completed : output can be flushed */
+    {   ZSTDMT_jobDescription job = zcs->jobs[wJobID];
+        if (!job.jobScanned) {
+            if (ZSTD_isError(job.cSize)) {
+                DEBUGLOG(5, "compression error detected ");
+                ZSTDMT_waitForAllJobsCompleted(zcs);
+                ZSTDMT_releaseAllJobResources(zcs);
+                return job.cSize;
+            }
+            ZSTDMT_releaseCCtx(zcs->cctxPool, job.cctx);
+            zcs->jobs[wJobID].cctx = NULL;
+            DEBUGLOG(5, "zcs->params.fParams.checksumFlag : %u ", zcs->params.fParams.checksumFlag);
+            if (zcs->params.fParams.checksumFlag) {
+                XXH64_update(&zcs->xxhState, (const char*)job.srcStart + job.dictSize, job.srcSize);
+                if (zcs->frameEnded && (zcs->doneJobID+1 == zcs->nextJobID)) {  /* write checksum at end of last section */
+                    U32 const checksum = (U32)XXH64_digest(&zcs->xxhState);
+                    DEBUGLOG(4, "writing checksum : %08X \n", checksum);
+                    MEM_writeLE32((char*)job.dstBuff.start + job.cSize, checksum);
+                    job.cSize += 4;
+                    zcs->jobs[wJobID].cSize += 4;
+            }   }
+            ZSTDMT_releaseBuffer(zcs->buffPool, job.src);
+            zcs->jobs[wJobID].srcStart = NULL;
+            zcs->jobs[wJobID].src = g_nullBuffer;
+            zcs->jobs[wJobID].jobScanned = 1;
+        }
+        {   size_t const toWrite = MIN(job.cSize - job.dstFlushed, output->size - output->pos);
+            DEBUGLOG(4, "Flushing %u bytes from job %u ", (U32)toWrite, zcs->doneJobID);
+            memcpy((char*)output->dst + output->pos, (const char*)job.dstBuff.start + job.dstFlushed, toWrite);
+            output->pos += toWrite;
+            job.dstFlushed += toWrite;
+        }
+        if (job.dstFlushed == job.cSize) {   /* output buffer fully flushed => move to next one */
+            ZSTDMT_releaseBuffer(zcs->buffPool, job.dstBuff);
+            zcs->jobs[wJobID].dstBuff = g_nullBuffer;
+            zcs->jobs[wJobID].jobCompleted = 0;
+            zcs->doneJobID++;
+        } else {
+            zcs->jobs[wJobID].dstFlushed = job.dstFlushed;
+        }
+        /* return value : how many bytes left in buffer ; fake it to 1 if unknown but >0 */
+        if (job.cSize > job.dstFlushed) return (job.cSize - job.dstFlushed);
+        if (zcs->doneJobID < zcs->nextJobID) return 1;   /* still some buffer to flush */
+        zcs->allJobsCompleted = zcs->frameEnded;   /* frame completed and entirely flushed */
+        return 0;   /* everything flushed */
+}   }
+
+
+size_t ZSTDMT_compressStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+    size_t const newJobThreshold = zcs->dictSize + zcs->targetSectionSize + zcs->marginSize;
+    if (zcs->frameEnded) return ERROR(stage_wrong);   /* current frame being ended. Only flush is allowed. Restart with init */
+    if (zcs->nbThreads==1) return ZSTD_compressStream(zcs->cstream, output, input);
+
+    /* fill input buffer */
+    {   size_t const toLoad = MIN(input->size - input->pos, zcs->inBuffSize - zcs->inBuff.filled);
+        memcpy((char*)zcs->inBuff.buffer.start + zcs->inBuff.filled, input->src, toLoad);
+        input->pos += toLoad;
+        zcs->inBuff.filled += toLoad;
+    }
+
+    if ( (zcs->inBuff.filled >= newJobThreshold)  /* filled enough : let's compress */
+        && (zcs->nextJobID <= zcs->doneJobID + zcs->jobIDMask) ) {   /* avoid overwriting job round buffer */
+        CHECK_F( ZSTDMT_createCompressionJob(zcs, zcs->targetSectionSize, 0) );
+    }
+
+    /* check for data to flush */
+    CHECK_F( ZSTDMT_flushNextJob(zcs, output, (zcs->inBuff.filled == zcs->inBuffSize)) ); /* block if it wasn't possible to create new job due to saturation */
+
+    /* recommended next input size : fill current input buffer */
+    return zcs->inBuffSize - zcs->inBuff.filled;   /* note : could be zero when input buffer is fully filled and no more availability to create new job */
+}
+
+
+static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned endFrame)
+{
+    size_t const srcSize = zcs->inBuff.filled - zcs->dictSize;
+
+    if (srcSize) DEBUGLOG(4, "flushing : %u bytes left to compress", (U32)srcSize);
+    if ( ((srcSize > 0) || (endFrame && !zcs->frameEnded))
+       && (zcs->nextJobID <= zcs->doneJobID + zcs->jobIDMask) ) {
+        CHECK_F( ZSTDMT_createCompressionJob(zcs, srcSize, endFrame) );
+    }
+
+    /* check if there is any data available to flush */
+    DEBUGLOG(5, "zcs->doneJobID : %u  ; zcs->nextJobID : %u ", zcs->doneJobID, zcs->nextJobID);
+    return ZSTDMT_flushNextJob(zcs, output, 1);
+}
+
+
+size_t ZSTDMT_flushStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output)
+{
+    if (zcs->nbThreads==1) return ZSTD_flushStream(zcs->cstream, output);
+    return ZSTDMT_flushStream_internal(zcs, output, 0);
+}
+
+size_t ZSTDMT_endStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output)
+{
+    if (zcs->nbThreads==1) return ZSTD_endStream(zcs->cstream, output);
+    return ZSTDMT_flushStream_internal(zcs, output, 1);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/compress/zstdmt_compress.h	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,78 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+ #ifndef ZSTDMT_COMPRESS_H
+ #define ZSTDMT_COMPRESS_H
+
+ #if defined (__cplusplus)
+ extern "C" {
+ #endif
+
+
+/* Note : All prototypes defined in this file shall be considered experimental.
+ *        There is no guarantee of API continuity (yet) on any of these prototypes */
+
+/* ===   Dependencies   === */
+#include <stddef.h>   /* size_t */
+#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_parameters */
+#include "zstd.h"     /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
+
+
+/* ===   Simple one-pass functions   === */
+
+typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
+ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbThreads);
+ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* cctx);
+
+ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* cctx,
+                           void* dst, size_t dstCapacity,
+                     const void* src, size_t srcSize,
+                           int compressionLevel);
+
+
+/* ===   Streaming functions   === */
+
+ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
+ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize);    /**< pledgedSrcSize is optional and can be zero == unknown */
+
+ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);   /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);     /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+
+
+/* ===   Advanced functions and parameters  === */
+
+#ifndef ZSTDMT_SECTION_SIZE_MIN
+#  define ZSTDMT_SECTION_SIZE_MIN (1U << 20)   /* 1 MB - Minimum size of each compression job */
+#endif
+
+ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize,  /**< dict can be released after init, a local copy is preserved within zcs */
+                                          ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize is optional and can be zero == unknown */
+
+/* ZSDTMT_parameter :
+ * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
+typedef enum {
+    ZSTDMT_p_sectionSize,        /* size of input "section". Each section is compressed in parallel. 0 means default, which is dynamically determined within compression functions */
+    ZSTDMT_p_overlapSectionLog   /* Log of overlapped section; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window */
+} ZSDTMT_parameter;
+
+/* ZSTDMT_setMTCtxParameter() :
+ * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter.
+ * The function must be called typically after ZSTD_createCCtx().
+ * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* ZSTDMT_COMPRESS_H */
--- a/contrib/python-zstandard/zstd/decompress/zstd_decompress.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd/decompress/zstd_decompress.c	Tue Apr 18 12:24:34 2017 -0400
@@ -1444,7 +1444,7 @@
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
     if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, dict, dictSize);
 #endif
-    ZSTD_decompressBegin_usingDict(dctx, dict, dictSize);
+    CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
     ZSTD_checkContinuity(dctx, dst);
     return ZSTD_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
 }
@@ -1671,9 +1671,9 @@
     }
 
     if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
-    dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
-    dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
-    dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
+    dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
+    dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
+    dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
     dictPtr += 12;
 
     dctx->litEntropy = dctx->fseEntropy = 1;
@@ -1713,39 +1713,44 @@
 /* ======   ZSTD_DDict   ====== */
 
 struct ZSTD_DDict_s {
-    void* dict;
+    void* dictBuffer;
+    const void* dictContent;
     size_t dictSize;
     ZSTD_DCtx* refContext;
 };  /* typedef'd to ZSTD_DDict within "zstd.h" */
 
-ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_customMem customMem)
+ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem)
 {
     if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
     if (!customMem.customAlloc || !customMem.customFree) return NULL;
 
     {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
-        void* const dictContent = ZSTD_malloc(dictSize, customMem);
         ZSTD_DCtx* const dctx = ZSTD_createDCtx_advanced(customMem);
 
-        if (!dictContent || !ddict || !dctx) {
-            ZSTD_free(dictContent, customMem);
+        if (!ddict || !dctx) {
             ZSTD_free(ddict, customMem);
             ZSTD_free(dctx, customMem);
             return NULL;
         }
 
-        if (dictSize) {
-            memcpy(dictContent, dict, dictSize);
+        if ((byReference) || (!dict) || (!dictSize)) {
+            ddict->dictBuffer = NULL;
+            ddict->dictContent = dict;
+        } else {
+            void* const internalBuffer = ZSTD_malloc(dictSize, customMem);
+            if (!internalBuffer) { ZSTD_free(dctx, customMem); ZSTD_free(ddict, customMem); return NULL; }
+            memcpy(internalBuffer, dict, dictSize);
+            ddict->dictBuffer = internalBuffer;
+            ddict->dictContent = internalBuffer;
         }
-        {   size_t const errorCode = ZSTD_decompressBegin_usingDict(dctx, dictContent, dictSize);
+        {   size_t const errorCode = ZSTD_decompressBegin_usingDict(dctx, ddict->dictContent, dictSize);
             if (ZSTD_isError(errorCode)) {
-                ZSTD_free(dictContent, customMem);
+                ZSTD_free(ddict->dictBuffer, customMem);
                 ZSTD_free(ddict, customMem);
                 ZSTD_free(dctx, customMem);
                 return NULL;
         }   }
 
-        ddict->dict = dictContent;
         ddict->dictSize = dictSize;
         ddict->refContext = dctx;
         return ddict;
@@ -1758,15 +1763,27 @@
 ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
 {
     ZSTD_customMem const allocator = { NULL, NULL, NULL };
-    return ZSTD_createDDict_advanced(dict, dictSize, allocator);
+    return ZSTD_createDDict_advanced(dict, dictSize, 0, allocator);
 }
 
+
+/*! ZSTD_createDDict_byReference() :
+ *  Create a digested dictionary, ready to start decompression operation without startup delay.
+ *  Dictionary content is simply referenced, and therefore stays in dictBuffer.
+ *  It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict */
+ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
+{
+    ZSTD_customMem const allocator = { NULL, NULL, NULL };
+    return ZSTD_createDDict_advanced(dictBuffer, dictSize, 1, allocator);
+}
+
+
 size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
 {
     if (ddict==NULL) return 0;   /* support free on NULL */
     {   ZSTD_customMem const cMem = ddict->refContext->customMem;
         ZSTD_freeDCtx(ddict->refContext);
-        ZSTD_free(ddict->dict, cMem);
+        ZSTD_free(ddict->dictBuffer, cMem);
         ZSTD_free(ddict, cMem);
         return 0;
     }
@@ -1775,7 +1792,7 @@
 size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
 {
     if (ddict==NULL) return 0;   /* support sizeof on NULL */
-    return sizeof(*ddict) + sizeof(ddict->refContext) + ddict->dictSize;
+    return sizeof(*ddict) + ZSTD_sizeof_DCtx(ddict->refContext) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
 }
 
 /*! ZSTD_getDictID_fromDict() :
@@ -1796,7 +1813,7 @@
 unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
 {
     if (ddict==NULL) return 0;
-    return ZSTD_getDictID_fromDict(ddict->dict, ddict->dictSize);
+    return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
 }
 
 /*! ZSTD_getDictID_fromFrame() :
@@ -1827,7 +1844,7 @@
                             const ZSTD_DDict* ddict)
 {
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
-    if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, ddict->dict, ddict->dictSize);
+    if (ZSTD_isLegacy(src, srcSize)) return ZSTD_decompressLegacy(dst, dstCapacity, src, srcSize, ddict->dictContent, ddict->dictSize);
 #endif
     ZSTD_refDCtx(dctx, ddict->refContext);
     ZSTD_checkContinuity(dctx, dst);
@@ -1919,7 +1936,7 @@
     zds->stage = zdss_loadHeader;
     zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
     ZSTD_freeDDict(zds->ddictLocal);
-    if (dict) {
+    if (dict && dictSize >= 8) {
         zds->ddictLocal = ZSTD_createDDict(dict, dictSize);
         if (zds->ddictLocal == NULL) return ERROR(memory_allocation);
     } else zds->ddictLocal = NULL;
@@ -1956,7 +1973,7 @@
     switch(paramType)
     {
         default : return ERROR(parameter_unknown);
-        case ZSTDdsp_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break;
+        case DStream_p_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break;
     }
     return 0;
 }
@@ -2007,7 +2024,7 @@
 #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
                 {   U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
                     if (legacyVersion) {
-                        const void* const dict = zds->ddict ? zds->ddict->dict : NULL;
+                        const void* const dict = zds->ddict ? zds->ddict->dictContent : NULL;
                         size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0;
                         CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext, zds->previousLegacyVersion, legacyVersion,
                                                        dict, dictSize));
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/python-zstandard/zstd/dictBuilder/cover.c	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,1021 @@
+/**
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of this source tree. An additional grant
+ * of patent rights can be found in the PATENTS file in the same directory.
+ */
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <stdio.h>  /* fprintf */
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memset */
+#include <time.h>   /* clock */
+
+#include "mem.h" /* read */
+#include "pool.h"
+#include "threading.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+/*-*************************************
+*  Constants
+***************************************/
+#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB))
+
+/*-*************************************
+*  Console display
+***************************************/
+static int g_displayLevel = 2;
+#define DISPLAY(...)                                                           \
+  {                                                                            \
+    fprintf(stderr, __VA_ARGS__);                                              \
+    fflush(stderr);                                                            \
+  }
+#define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \
+  if (displayLevel >= l) {                                                     \
+    DISPLAY(__VA_ARGS__);                                                      \
+  } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
+#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
+
+#define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \
+  if (displayLevel >= l) {                                                     \
+    if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) {             \
+      g_time = clock();                                                        \
+      DISPLAY(__VA_ARGS__);                                                    \
+      if (displayLevel >= 4)                                                   \
+        fflush(stdout);                                                        \
+    }                                                                          \
+  }
+#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
+static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
+static clock_t g_time = 0;
+
+/*-*************************************
+* Hash table
+***************************************
+* A small specialized hash map for storing activeDmers.
+* The map does not resize, so if it becomes full it will loop forever.
+* Thus, the map must be large enough to store every value.
+* The map implements linear probing and keeps its load less than 0.5.
+*/
+
+#define MAP_EMPTY_VALUE ((U32)-1)
+typedef struct COVER_map_pair_t_s {
+  U32 key;
+  U32 value;
+} COVER_map_pair_t;
+
+typedef struct COVER_map_s {
+  COVER_map_pair_t *data;
+  U32 sizeLog;
+  U32 size;
+  U32 sizeMask;
+} COVER_map_t;
+
+/**
+ * Clear the map.
+ */
+static void COVER_map_clear(COVER_map_t *map) {
+  memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
+}
+
+/**
+ * Initializes a map of the given size.
+ * Returns 1 on success and 0 on failure.
+ * The map must be destroyed with COVER_map_destroy().
+ * The map is only guaranteed to be large enough to hold size elements.
+ */
+static int COVER_map_init(COVER_map_t *map, U32 size) {
+  map->sizeLog = ZSTD_highbit32(size) + 2;
+  map->size = (U32)1 << map->sizeLog;
+  map->sizeMask = map->size - 1;
+  map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
+  if (!map->data) {
+    map->sizeLog = 0;
+    map->size = 0;
+    return 0;
+  }
+  COVER_map_clear(map);
+  return 1;
+}
+
+/**
+ * Internal hash function
+ */
+static const U32 prime4bytes = 2654435761U;
+static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
+  return (key * prime4bytes) >> (32 - map->sizeLog);
+}
+
+/**
+ * Helper function that returns the index that a key should be placed into.
+ */
+static U32 COVER_map_index(COVER_map_t *map, U32 key) {
+  const U32 hash = COVER_map_hash(map, key);
+  U32 i;
+  for (i = hash;; i = (i + 1) & map->sizeMask) {
+    COVER_map_pair_t *pos = &map->data[i];
+    if (pos->value == MAP_EMPTY_VALUE) {
+      return i;
+    }
+    if (pos->key == key) {
+      return i;
+    }
+  }
+}
+
+/**
+ * Returns the pointer to the value for key.
+ * If key is not in the map, it is inserted and the value is set to 0.
+ * The map must not be full.
+ */
+static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
+  COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
+  if (pos->value == MAP_EMPTY_VALUE) {
+    pos->key = key;
+    pos->value = 0;
+  }
+  return &pos->value;
+}
+
+/**
+ * Deletes key from the map if present.
+ */
+static void COVER_map_remove(COVER_map_t *map, U32 key) {
+  U32 i = COVER_map_index(map, key);
+  COVER_map_pair_t *del = &map->data[i];
+  U32 shift = 1;
+  if (del->value == MAP_EMPTY_VALUE) {
+    return;
+  }
+  for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
+    COVER_map_pair_t *const pos = &map->data[i];
+    /* If the position is empty we are done */
+    if (pos->value == MAP_EMPTY_VALUE) {
+      del->value = MAP_EMPTY_VALUE;
+      return;
+    }
+    /* If pos can be moved to del do so */
+    if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
+      del->key = pos->key;
+      del->value = pos->value;
+      del = pos;
+      shift = 1;
+    } else {
+      ++shift;
+    }
+  }
+}
+
+/**
+ * Destroyes a map that is inited with COVER_map_init().
+ */
+static void COVER_map_destroy(COVER_map_t *map) {
+  if (map->data) {
+    free(map->data);
+  }
+  map->data = NULL;
+  map->size = 0;
+}
+
+/*-*************************************
+* Context
+***************************************/
+
+typedef struct {
+  const BYTE *samples;
+  size_t *offsets;
+  const size_t *samplesSizes;
+  size_t nbSamples;
+  U32 *suffix;
+  size_t suffixSize;
+  U32 *freqs;
+  U32 *dmerAt;
+  unsigned d;
+} COVER_ctx_t;
+
+/* We need a global context for qsort... */
+static COVER_ctx_t *g_ctx = NULL;
+
+/*-*************************************
+*  Helper functions
+***************************************/
+
+/**
+ * Returns the sum of the sample sizes.
+ */
+static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
+  size_t sum = 0;
+  size_t i;
+  for (i = 0; i < nbSamples; ++i) {
+    sum += samplesSizes[i];
+  }
+  return sum;
+}
+
+/**
+ * Returns -1 if the dmer at lp is less than the dmer at rp.
+ * Return 0 if the dmers at lp and rp are equal.
+ * Returns 1 if the dmer at lp is greater than the dmer at rp.
+ */
+static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
+  const U32 lhs = *(const U32 *)lp;
+  const U32 rhs = *(const U32 *)rp;
+  return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
+}
+
+/**
+ * Same as COVER_cmp() except ties are broken by pointer value
+ * NOTE: g_ctx must be set to call this function.  A global is required because
+ * qsort doesn't take an opaque pointer.
+ */
+static int COVER_strict_cmp(const void *lp, const void *rp) {
+  int result = COVER_cmp(g_ctx, lp, rp);
+  if (result == 0) {
+    result = lp < rp ? -1 : 1;
+  }
+  return result;
+}
+
+/**
+ * Returns the first pointer in [first, last) whose element does not compare
+ * less than value.  If no such element exists it returns last.
+ */
+static const size_t *COVER_lower_bound(const size_t *first, const size_t *last,
+                                       size_t value) {
+  size_t count = last - first;
+  while (count != 0) {
+    size_t step = count / 2;
+    const size_t *ptr = first;
+    ptr += step;
+    if (*ptr < value) {
+      first = ++ptr;
+      count -= step + 1;
+    } else {
+      count = step;
+    }
+  }
+  return first;
+}
+
+/**
+ * Generic groupBy function.
+ * Groups an array sorted by cmp into groups with equivalent values.
+ * Calls grp for each group.
+ */
+static void
+COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
+              int (*cmp)(COVER_ctx_t *, const void *, const void *),
+              void (*grp)(COVER_ctx_t *, const void *, const void *)) {
+  const BYTE *ptr = (const BYTE *)data;
+  size_t num = 0;
+  while (num < count) {
+    const BYTE *grpEnd = ptr + size;
+    ++num;
+    while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
+      grpEnd += size;
+      ++num;
+    }
+    grp(ctx, ptr, grpEnd);
+    ptr = grpEnd;
+  }
+}
+
+/*-*************************************
+*  Cover functions
+***************************************/
+
+/**
+ * Called on each group of positions with the same dmer.
+ * Counts the frequency of each dmer and saves it in the suffix array.
+ * Fills `ctx->dmerAt`.
+ */
+static void COVER_group(COVER_ctx_t *ctx, const void *group,
+                        const void *groupEnd) {
+  /* The group consists of all the positions with the same first d bytes. */
+  const U32 *grpPtr = (const U32 *)group;
+  const U32 *grpEnd = (const U32 *)groupEnd;
+  /* The dmerId is how we will reference this dmer.
+   * This allows us to map the whole dmer space to a much smaller space, the
+   * size of the suffix array.
+   */
+  const U32 dmerId = (U32)(grpPtr - ctx->suffix);
+  /* Count the number of samples this dmer shows up in */
+  U32 freq = 0;
+  /* Details */
+  const size_t *curOffsetPtr = ctx->offsets;
+  const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
+  /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
+   * different sample than the last.
+   */
+  size_t curSampleEnd = ctx->offsets[0];
+  for (; grpPtr != grpEnd; ++grpPtr) {
+    /* Save the dmerId for this position so we can get back to it. */
+    ctx->dmerAt[*grpPtr] = dmerId;
+    /* Dictionaries only help for the first reference to the dmer.
+     * After that zstd can reference the match from the previous reference.
+     * So only count each dmer once for each sample it is in.
+     */
+    if (*grpPtr < curSampleEnd) {
+      continue;
+    }
+    freq += 1;
+    /* Binary search to find the end of the sample *grpPtr is in.
+     * In the common case that grpPtr + 1 == grpEnd we can skip the binary
+     * search because the loop is over.
+     */
+    if (grpPtr + 1 != grpEnd) {
+      const size_t *sampleEndPtr =
+          COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
+      curSampleEnd = *sampleEndPtr;
+      curOffsetPtr = sampleEndPtr + 1;
+    }
+  }
+  /* At this point we are never going to look at this segment of the suffix
+   * array again.  We take advantage of this fact to save memory.
+   * We store the frequency of the dmer in the first position of the group,
+   * which is dmerId.
+   */
+  ctx->suffix[dmerId] = freq;
+}
+
+/**
+ * A segment is a range in the source as well as the score of the segment.
+ */
+typedef struct {
+  U32 begin;
+  U32 end;
+  double score;
+} COVER_segment_t;
+
+/**
+ * Selects the best segment in an epoch.
+ * Segments of are scored according to the function:
+ *
+ * Let F(d) be the frequency of dmer d.
+ * Let S_i be the dmer at position i of segment S which has length k.
+ *
+ *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
+ *
+ * Once the dmer d is in the dictionay we set F(d) = 0.
+ */
+static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
+                                           COVER_map_t *activeDmers, U32 begin,
+                                           U32 end, COVER_params_t parameters) {
+  /* Constants */
+  const U32 k = parameters.k;
+  const U32 d = parameters.d;
+  const U32 dmersInK = k - d + 1;
+  /* Try each segment (activeSegment) and save the best (bestSegment) */
+  COVER_segment_t bestSegment = {0, 0, 0};
+  COVER_segment_t activeSegment;
+  /* Reset the activeDmers in the segment */
+  COVER_map_clear(activeDmers);
+  /* The activeSegment starts at the beginning of the epoch. */
+  activeSegment.begin = begin;
+  activeSegment.end = begin;
+  activeSegment.score = 0;
+  /* Slide the activeSegment through the whole epoch.
+   * Save the best segment in bestSegment.
+   */
+  while (activeSegment.end < end) {
+    /* The dmerId for the dmer at the next position */
+    U32 newDmer = ctx->dmerAt[activeSegment.end];
+    /* The entry in activeDmers for this dmerId */
+    U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
+    /* If the dmer isn't already present in the segment add its score. */
+    if (*newDmerOcc == 0) {
+      /* The paper suggest using the L-0.5 norm, but experiments show that it
+       * doesn't help.
+       */
+      activeSegment.score += freqs[newDmer];
+    }
+    /* Add the dmer to the segment */
+    activeSegment.end += 1;
+    *newDmerOcc += 1;
+
+    /* If the window is now too large, drop the first position */
+    if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
+      U32 delDmer = ctx->dmerAt[activeSegment.begin];
+      U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
+      activeSegment.begin += 1;
+      *delDmerOcc -= 1;
+      /* If this is the last occurence of the dmer, subtract its score */
+      if (*delDmerOcc == 0) {
+        COVER_map_remove(activeDmers, delDmer);
+        activeSegment.score -= freqs[delDmer];
+      }
+    }
+
+    /* If this segment is the best so far save it */
+    if (activeSegment.score > bestSegment.score) {
+      bestSegment = activeSegment;
+    }
+  }
+  {
+    /* Trim off the zero frequency head and tail from the segment. */
+    U32 newBegin = bestSegment.end;
+    U32 newEnd = bestSegment.begin;
+    U32 pos;
+    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+      U32 freq = freqs[ctx->dmerAt[pos]];
+      if (freq != 0) {
+        newBegin = MIN(newBegin, pos);
+        newEnd = pos + 1;
+      }
+    }
+    bestSegment.begin = newBegin;
+    bestSegment.end = newEnd;
+  }
+  {
+    /* Zero out the frequency of each dmer covered by the chosen segment. */
+    U32 pos;
+    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+      freqs[ctx->dmerAt[pos]] = 0;
+    }
+  }
+  return bestSegment;
+}
+
+/**
+ * Check the validity of the parameters.
+ * Returns non-zero if the parameters are valid and 0 otherwise.
+ */
+static int COVER_checkParameters(COVER_params_t parameters) {
+  /* k and d are required parameters */
+  if (parameters.d == 0 || parameters.k == 0) {
+    return 0;
+  }
+  /* d <= k */
+  if (parameters.d > parameters.k) {
+    return 0;
+  }
+  return 1;
+}
+
+/**
+ * Clean up a context initialized with `COVER_ctx_init()`.
+ */
+static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
+  if (!ctx) {
+    return;
+  }
+  if (ctx->suffix) {
+    free(ctx->suffix);
+    ctx->suffix = NULL;
+  }
+  if (ctx->freqs) {
+    free(ctx->freqs);
+    ctx->freqs = NULL;
+  }
+  if (ctx->dmerAt) {
+    free(ctx->dmerAt);
+    ctx->dmerAt = NULL;
+  }
+  if (ctx->offsets) {
+    free(ctx->offsets);
+    ctx->offsets = NULL;
+  }
+}
+
+/**
+ * Prepare a context for dictionary building.
+ * The context is only dependent on the parameter `d` and can used multiple
+ * times.
+ * Returns 1 on success or zero on error.
+ * The context must be destroyed with `COVER_ctx_destroy()`.
+ */
+static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
+                          const size_t *samplesSizes, unsigned nbSamples,
+                          unsigned d) {
+  const BYTE *const samples = (const BYTE *)samplesBuffer;
+  const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
+  /* Checks */
+  if (totalSamplesSize < d ||
+      totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
+    DISPLAYLEVEL(1, "Total samples size is too large, maximum size is %u MB\n",
+                 (COVER_MAX_SAMPLES_SIZE >> 20));
+    return 0;
+  }
+  /* Zero the context */
+  memset(ctx, 0, sizeof(*ctx));
+  DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples,
+               (U32)totalSamplesSize);
+  ctx->samples = samples;
+  ctx->samplesSizes = samplesSizes;
+  ctx->nbSamples = nbSamples;
+  /* Partial suffix array */
+  ctx->suffixSize = totalSamplesSize - d + 1;
+  ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+  /* Maps index to the dmerID */
+  ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+  /* The offsets of each file */
+  ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
+  if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
+    DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
+    COVER_ctx_destroy(ctx);
+    return 0;
+  }
+  ctx->freqs = NULL;
+  ctx->d = d;
+
+  /* Fill offsets from the samlesSizes */
+  {
+    U32 i;
+    ctx->offsets[0] = 0;
+    for (i = 1; i <= nbSamples; ++i) {
+      ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
+    }
+  }
+  DISPLAYLEVEL(2, "Constructing partial suffix array\n");
+  {
+    /* suffix is a partial suffix array.
+     * It only sorts suffixes by their first parameters.d bytes.
+     * The sort is stable, so each dmer group is sorted by position in input.
+     */
+    U32 i;
+    for (i = 0; i < ctx->suffixSize; ++i) {
+      ctx->suffix[i] = i;
+    }
+    /* qsort doesn't take an opaque pointer, so pass as a global */
+    g_ctx = ctx;
+    qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), &COVER_strict_cmp);
+  }
+  DISPLAYLEVEL(2, "Computing frequencies\n");
+  /* For each dmer group (group of positions with the same first d bytes):
+   * 1. For each position we set dmerAt[position] = dmerID.  The dmerID is
+   *    (groupBeginPtr - suffix).  This allows us to go from position to
+   *    dmerID so we can look up values in freq.
+   * 2. We calculate how many samples the dmer occurs in and save it in
+   *    freqs[dmerId].
+   */
+  COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx, &COVER_cmp,
+                &COVER_group);
+  ctx->freqs = ctx->suffix;
+  ctx->suffix = NULL;
+  return 1;
+}
+
+/**
+ * Given the prepared context build the dictionary.
+ */
+static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
+                                    COVER_map_t *activeDmers, void *dictBuffer,
+                                    size_t dictBufferCapacity,
+                                    COVER_params_t parameters) {
+  BYTE *const dict = (BYTE *)dictBuffer;
+  size_t tail = dictBufferCapacity;
+  /* Divide the data up into epochs of equal size.
+   * We will select at least one segment from each epoch.
+   */
+  const U32 epochs = (U32)(dictBufferCapacity / parameters.k);
+  const U32 epochSize = (U32)(ctx->suffixSize / epochs);
+  size_t epoch;
+  DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs,
+               epochSize);
+  /* Loop through the epochs until there are no more segments or the dictionary
+   * is full.
+   */
+  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) {
+    const U32 epochBegin = (U32)(epoch * epochSize);
+    const U32 epochEnd = epochBegin + epochSize;
+    size_t segmentSize;
+    /* Select a segment */
+    COVER_segment_t segment = COVER_selectSegment(
+        ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
+    /* Trim the segment if necessary and if it is empty then we are done */
+    segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
+    if (segmentSize == 0) {
+      break;
+    }
+    /* We fill the dictionary from the back to allow the best segments to be
+     * referenced with the smallest offsets.
+     */
+    tail -= segmentSize;
+    memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
+    DISPLAYUPDATE(
+        2, "\r%u%%       ",
+        (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
+  }
+  DISPLAYLEVEL(2, "\r%79s\r", "");
+  return tail;
+}
+
+/**
+ * Translate from COVER_params_t to ZDICT_params_t required for finalizing the
+ * dictionary.
+ */
+static ZDICT_params_t COVER_translateParams(COVER_params_t parameters) {
+  ZDICT_params_t zdictParams;
+  memset(&zdictParams, 0, sizeof(zdictParams));
+  zdictParams.notificationLevel = 1;
+  zdictParams.dictID = parameters.dictID;
+  zdictParams.compressionLevel = parameters.compressionLevel;
+  return zdictParams;
+}
+
+/**
+ * Constructs a dictionary using a heuristic based on the following paper:
+ *
+ * Liao, Petri, Moffat, Wirth
+ * Effective Construction of Relative Lempel-Ziv Dictionaries
+ * Published in WWW 2016.
+ */
+ZDICTLIB_API size_t COVER_trainFromBuffer(
+    void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
+    const size_t *samplesSizes, unsigned nbSamples, COVER_params_t parameters) {
+  BYTE *const dict = (BYTE *)dictBuffer;
+  COVER_ctx_t ctx;
+  COVER_map_t activeDmers;
+  /* Checks */
+  if (!COVER_checkParameters(parameters)) {
+    DISPLAYLEVEL(1, "Cover parameters incorrect\n");
+    return ERROR(GENERIC);
+  }
+  if (nbSamples == 0) {
+    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
+    return ERROR(GENERIC);
+  }
+  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+                 ZDICT_DICTSIZE_MIN);
+    return ERROR(dstSize_tooSmall);
+  }
+  /* Initialize global data */
+  g_displayLevel = parameters.notificationLevel;
+  /* Initialize context and activeDmers */
+  if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
+                      parameters.d)) {
+    return ERROR(GENERIC);
+  }
+  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
+    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
+    COVER_ctx_destroy(&ctx);
+    return ERROR(GENERIC);
+  }
+
+  DISPLAYLEVEL(2, "Building dictionary\n");
+  {
+    const size_t tail =
+        COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
+                              dictBufferCapacity, parameters);
+    ZDICT_params_t zdictParams = COVER_translateParams(parameters);
+    const size_t dictionarySize = ZDICT_finalizeDictionary(
+        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+        samplesBuffer, samplesSizes, nbSamples, zdictParams);
+    if (!ZSTD_isError(dictionarySize)) {
+      DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
+                   (U32)dictionarySize);
+    }
+    COVER_ctx_destroy(&ctx);
+    COVER_map_destroy(&activeDmers);
+    return dictionarySize;
+  }
+}
+
+/**
+ * COVER_best_t is used for two purposes:
+ * 1. Synchronizing threads.
+ * 2. Saving the best parameters and dictionary.
+ *
+ * All of the methods except COVER_best_init() are thread safe if zstd is
+ * compiled with multithreaded support.
+ */
+typedef struct COVER_best_s {
+  pthread_mutex_t mutex;
+  pthread_cond_t cond;
+  size_t liveJobs;
+  void *dict;
+  size_t dictSize;
+  COVER_params_t parameters;
+  size_t compressedSize;
+} COVER_best_t;
+
+/**
+ * Initialize the `COVER_best_t`.
+ */
+static void COVER_best_init(COVER_best_t *best) {
+  if (!best) {
+    return;
+  }
+  pthread_mutex_init(&best->mutex, NULL);
+  pthread_cond_init(&best->cond, NULL);
+  best->liveJobs = 0;
+  best->dict = NULL;
+  best->dictSize = 0;
+  best->compressedSize = (size_t)-1;
+  memset(&best->parameters, 0, sizeof(best->parameters));
+}
+
+/**
+ * Wait until liveJobs == 0.
+ */
+static void COVER_best_wait(COVER_best_t *best) {
+  if (!best) {
+    return;
+  }
+  pthread_mutex_lock(&best->mutex);
+  while (best->liveJobs != 0) {
+    pthread_cond_wait(&best->cond, &best->mutex);
+  }
+  pthread_mutex_unlock(&best->mutex);
+}
+
+/**
+ * Call COVER_best_wait() and then destroy the COVER_best_t.
+ */
+static void COVER_best_destroy(COVER_best_t *best) {
+  if (!best) {
+    return;
+  }
+  COVER_best_wait(best);
+  if (best->dict) {
+    free(best->dict);
+  }
+  pthread_mutex_destroy(&best->mutex);
+  pthread_cond_destroy(&best->cond);
+}
+
+/**
+ * Called when a thread is about to be launched.
+ * Increments liveJobs.
+ */
+static void COVER_best_start(COVER_best_t *best) {
+  if (!best) {
+    return;
+  }
+  pthread_mutex_lock(&best->mutex);
+  ++best->liveJobs;
+  pthread_mutex_unlock(&best->mutex);
+}
+
+/**
+ * Called when a thread finishes executing, both on error or success.
+ * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
+ * If this dictionary is the best so far save it and its parameters.
+ */
+static void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
+                              COVER_params_t parameters, void *dict,
+                              size_t dictSize) {
+  if (!best) {
+    return;
+  }
+  {
+    size_t liveJobs;
+    pthread_mutex_lock(&best->mutex);
+    --best->liveJobs;
+    liveJobs = best->liveJobs;
+    /* If the new dictionary is better */
+    if (compressedSize < best->compressedSize) {
+      /* Allocate space if necessary */
+      if (!best->dict || best->dictSize < dictSize) {
+        if (best->dict) {
+          free(best->dict);
+        }
+        best->dict = malloc(dictSize);
+        if (!best->dict) {
+          best->compressedSize = ERROR(GENERIC);
+          best->dictSize = 0;
+          return;
+        }
+      }
+      /* Save the dictionary, parameters, and size */
+      memcpy(best->dict, dict, dictSize);
+      best->dictSize = dictSize;
+      best->parameters = parameters;
+      best->compressedSize = compressedSize;
+    }
+    pthread_mutex_unlock(&best->mutex);
+    if (liveJobs == 0) {
+      pthread_cond_broadcast(&best->cond);
+    }
+  }
+}
+
+/**
+ * Parameters for COVER_tryParameters().
+ */
+typedef struct COVER_tryParameters_data_s {
+  const COVER_ctx_t *ctx;
+  COVER_best_t *best;
+  size_t dictBufferCapacity;
+  COVER_params_t parameters;
+} COVER_tryParameters_data_t;
+
+/**
+ * Tries a set of parameters and upates the COVER_best_t with the results.
+ * This function is thread safe if zstd is compiled with multithreaded support.
+ * It takes its parameters as an *OWNING* opaque pointer to support threading.
+ */
+static void COVER_tryParameters(void *opaque) {
+  /* Save parameters as local variables */
+  COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque;
+  const COVER_ctx_t *const ctx = data->ctx;
+  const COVER_params_t parameters = data->parameters;
+  size_t dictBufferCapacity = data->dictBufferCapacity;
+  size_t totalCompressedSize = ERROR(GENERIC);
+  /* Allocate space for hash table, dict, and freqs */
+  COVER_map_t activeDmers;
+  BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+  U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
+    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
+    goto _cleanup;
+  }
+  if (!dict || !freqs) {
+    DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
+    goto _cleanup;
+  }
+  /* Copy the frequencies because we need to modify them */
+  memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
+  /* Build the dictionary */
+  {
+    const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
+                                              dictBufferCapacity, parameters);
+    const ZDICT_params_t zdictParams = COVER_translateParams(parameters);
+    dictBufferCapacity = ZDICT_finalizeDictionary(
+        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples, zdictParams);
+    if (ZDICT_isError(dictBufferCapacity)) {
+      DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
+      goto _cleanup;
+    }
+  }
+  /* Check total compressed size */
+  {
+    /* Pointers */
+    ZSTD_CCtx *cctx;
+    ZSTD_CDict *cdict;
+    void *dst;
+    /* Local variables */
+    size_t dstCapacity;
+    size_t i;
+    /* Allocate dst with enough space to compress the maximum sized sample */
+    {
+      size_t maxSampleSize = 0;
+      for (i = 0; i < ctx->nbSamples; ++i) {
+        maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize);
+      }
+      dstCapacity = ZSTD_compressBound(maxSampleSize);
+      dst = malloc(dstCapacity);
+    }
+    /* Create the cctx and cdict */
+    cctx = ZSTD_createCCtx();
+    cdict =
+        ZSTD_createCDict(dict, dictBufferCapacity, parameters.compressionLevel);
+    if (!dst || !cctx || !cdict) {
+      goto _compressCleanup;
+    }
+    /* Compress each sample and sum their sizes (or error) */
+    totalCompressedSize = 0;
+    for (i = 0; i < ctx->nbSamples; ++i) {
+      const size_t size = ZSTD_compress_usingCDict(
+          cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i],
+          ctx->samplesSizes[i], cdict);
+      if (ZSTD_isError(size)) {
+        totalCompressedSize = ERROR(GENERIC);
+        goto _compressCleanup;
+      }
+      totalCompressedSize += size;
+    }
+  _compressCleanup:
+    ZSTD_freeCCtx(cctx);
+    ZSTD_freeCDict(cdict);
+    if (dst) {
+      free(dst);
+    }
+  }
+
+_cleanup:
+  COVER_best_finish(data->best, totalCompressedSize, parameters, dict,
+                    dictBufferCapacity);
+  free(data);
+  COVER_map_destroy(&activeDmers);
+  if (dict) {
+    free(dict);
+  }
+  if (freqs) {
+    free(freqs);
+  }
+}
+
+ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void *dictBuffer,
+                                                  size_t dictBufferCapacity,
+                                                  const void *samplesBuffer,
+                                                  const size_t *samplesSizes,
+                                                  unsigned nbSamples,
+                                                  COVER_params_t *parameters) {
+  /* constants */
+  const unsigned nbThreads = parameters->nbThreads;
+  const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
+  const unsigned kMaxD = parameters->d == 0 ? 16 : parameters->d;
+  const unsigned kMinK = parameters->k == 0 ? kMaxD : parameters->k;
+  const unsigned kMaxK = parameters->k == 0 ? 2048 : parameters->k;
+  const unsigned kSteps = parameters->steps == 0 ? 32 : parameters->steps;
+  const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
+  const unsigned kIterations =
+      (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
+  /* Local variables */
+  const int displayLevel = parameters->notificationLevel;
+  unsigned iteration = 1;
+  unsigned d;
+  unsigned k;
+  COVER_best_t best;
+  POOL_ctx *pool = NULL;
+  /* Checks */
+  if (kMinK < kMaxD || kMaxK < kMinK) {
+    LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
+    return ERROR(GENERIC);
+  }
+  if (nbSamples == 0) {
+    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
+    return ERROR(GENERIC);
+  }
+  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+                 ZDICT_DICTSIZE_MIN);
+    return ERROR(dstSize_tooSmall);
+  }
+  if (nbThreads > 1) {
+    pool = POOL_create(nbThreads, 1);
+    if (!pool) {
+      return ERROR(memory_allocation);
+    }
+  }
+  /* Initialization */
+  COVER_best_init(&best);
+  /* Turn down global display level to clean up display at level 2 and below */
+  g_displayLevel = parameters->notificationLevel - 1;
+  /* Loop through d first because each new value needs a new context */
+  LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
+                    kIterations);
+  for (d = kMinD; d <= kMaxD; d += 2) {
+    /* Initialize the context for this value of d */
+    COVER_ctx_t ctx;
+    LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
+    if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
+      COVER_best_destroy(&best);
+      return ERROR(GENERIC);
+    }
+    /* Loop through k reusing the same context */
+    for (k = kMinK; k <= kMaxK; k += kStepSize) {
+      /* Prepare the arguments */
+      COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
+          sizeof(COVER_tryParameters_data_t));
+      LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
+      if (!data) {
+        LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
+        COVER_best_destroy(&best);
+        COVER_ctx_destroy(&ctx);
+        return ERROR(GENERIC);
+      }
+      data->ctx = &ctx;
+      data->best = &best;
+      data->dictBufferCapacity = dictBufferCapacity;
+      data->parameters = *parameters;
+      data->parameters.k = k;
+      data->parameters.d = d;
+      data->parameters.steps = kSteps;
+      /* Check the parameters */
+      if (!COVER_checkParameters(data->parameters)) {
+        DISPLAYLEVEL(1, "Cover parameters incorrect\n");
+        continue;
+      }
+      /* Call the function and pass ownership of data to it */
+      COVER_best_start(&best);
+      if (pool) {
+        POOL_add(pool, &COVER_tryParameters, data);
+      } else {
+        COVER_tryParameters(data);
+      }
+      /* Print status */
+      LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%%       ",
+                         (U32)((iteration * 100) / kIterations));
+      ++iteration;
+    }
+    COVER_best_wait(&best);
+    COVER_ctx_destroy(&ctx);
+  }
+  LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
+  /* Fill the output buffer and parameters with output of the best parameters */
+  {
+    const size_t dictSize = best.dictSize;
+    if (ZSTD_isError(best.compressedSize)) {
+      COVER_best_destroy(&best);
+      return best.compressedSize;
+    }
+    *parameters = best.parameters;
+    memcpy(dictBuffer, best.dict, dictSize);
+    COVER_best_destroy(&best);
+    POOL_free(pool);
+    return dictSize;
+  }
+}
--- a/contrib/python-zstandard/zstd/dictBuilder/zdict.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.c	Tue Apr 18 12:24:34 2017 -0400
@@ -36,12 +36,11 @@
 #include <time.h>          /* clock */
 
 #include "mem.h"           /* read */
-#include "error_private.h"
 #include "fse.h"           /* FSE_normalizeCount, FSE_writeNCount */
 #define HUF_STATIC_LINKING_ONLY
-#include "huf.h"
+#include "huf.h"           /* HUF_buildCTable, HUF_writeCTable */
 #include "zstd_internal.h" /* includes zstd.h */
-#include "xxhash.h"
+#include "xxhash.h"        /* XXH64 */
 #include "divsufsort.h"
 #ifndef ZDICT_STATIC_LINKING_ONLY
 #  define ZDICT_STATIC_LINKING_ONLY
@@ -61,7 +60,7 @@
 #define NOISELENGTH 32
 
 #define MINRATIO 4
-static const int g_compressionLevel_default = 5;
+static const int g_compressionLevel_default = 6;
 static const U32 g_selectivity_default = 9;
 static const size_t g_provision_entropySize = 200;
 static const size_t g_min_fast_dictContent = 192;
@@ -307,13 +306,13 @@
         } while (length >=MINMATCHLENGTH);
 
         /* look backward */
-		length = MINMATCHLENGTH;
-		while ((length >= MINMATCHLENGTH) & (start > 0)) {
-			length = ZDICT_count(b + pos, b + suffix[start - 1]);
-			if (length >= LLIMIT) length = LLIMIT - 1;
-			lengthList[length]++;
-			if (length >= MINMATCHLENGTH) start--;
-		}
+        length = MINMATCHLENGTH;
+        while ((length >= MINMATCHLENGTH) & (start > 0)) {
+        	length = ZDICT_count(b + pos, b + suffix[start - 1]);
+        	if (length >= LLIMIT) length = LLIMIT - 1;
+        	lengthList[length]++;
+        	if (length >= MINMATCHLENGTH) start--;
+        }
 
         /* largest useful length */
         memset(cumulLength, 0, sizeof(cumulLength));
@@ -570,7 +569,7 @@
             if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_copyCCtx failed \n"); return; }
     }
     cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_ABSOLUTEMAX, src, srcSize);
-    if (ZSTD_isError(cSize)) { DISPLAYLEVEL(1, "warning : could not compress sample size %u \n", (U32)srcSize); return; }
+    if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; }
 
     if (cSize) {  /* if == 0; block is not compressible */
         const seqStore_t* seqStorePtr = ZSTD_getSeqStore(esr.zc);
@@ -825,6 +824,55 @@
 }
 
 
+
+size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
+                          const void* customDictContent, size_t dictContentSize,
+                          const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                          ZDICT_params_t params)
+{
+    size_t hSize;
+#define HBUFFSIZE 256
+    BYTE header[HBUFFSIZE];
+    int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel;
+    U32 const notificationLevel = params.notificationLevel;
+
+    /* check conditions */
+    if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);
+    if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong);
+    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
+
+    /* dictionary header */
+    MEM_writeLE32(header, ZSTD_DICT_MAGIC);
+    {   U64 const randomID = XXH64(customDictContent, dictContentSize, 0);
+        U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
+        U32 const dictID = params.dictID ? params.dictID : compliantID;
+        MEM_writeLE32(header+4, dictID);
+    }
+    hSize = 8;
+
+    /* entropy tables */
+    DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
+    DISPLAYLEVEL(2, "statistics ... \n");
+    {   size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize,
+                                  compressionLevel,
+                                  samplesBuffer, samplesSizes, nbSamples,
+                                  customDictContent, dictContentSize,
+                                  notificationLevel);
+        if (ZDICT_isError(eSize)) return eSize;
+        hSize += eSize;
+    }
+
+    /* copy elements in final buffer ; note : src and dst buffer can overlap */
+    if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize;
+    {   size_t const dictSize = hSize + dictContentSize;
+        char* dictEnd = (char*)dictBuffer + dictSize;
+        memmove(dictEnd - dictContentSize, customDictContent, dictContentSize);
+        memcpy(dictBuffer, header, hSize);
+        return dictSize;
+    }
+}
+
+
 size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
                                                  const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
                                                  ZDICT_params_t params)
--- a/contrib/python-zstandard/zstd/dictBuilder/zdict.h	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd/dictBuilder/zdict.h	Tue Apr 18 12:24:34 2017 -0400
@@ -19,15 +19,18 @@
 #include <stddef.h>  /* size_t */
 
 
-/*======  Export for Windows  ======*/
-/*!
-*  ZSTD_DLL_EXPORT :
-*  Enable exporting of functions when building a Windows DLL
-*/
-#if defined(_WIN32) && defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
-#  define ZDICTLIB_API __declspec(dllexport)
+/* =====   ZDICTLIB_API : control library symbols visibility   ===== */
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#  define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
 #else
-#  define ZDICTLIB_API
+#  define ZDICTLIB_VISIBILITY
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+#  define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+#  define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+#  define ZDICTLIB_API ZDICTLIB_VISIBILITY
 #endif
 
 
@@ -79,27 +82,114 @@
               or an error code, which can be tested by ZDICT_isError().
     note : ZDICT_trainFromBuffer_advanced() will send notifications into stderr if instructed to, using notificationLevel>0.
 */
-size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacity,
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dictBufferCapacity,
+                                const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                                ZDICT_params_t parameters);
+
+/*! COVER_params_t :
+    For all values 0 means default.
+    kMin and d are the only required parameters.
+*/
+typedef struct {
+    unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
+    unsigned d;                  /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
+    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */
+
+    unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
+    unsigned notificationLevel;  /* Write to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
+    unsigned dictID;             /* 0 means auto mode (32-bits random value); other : force dictID value */
+    int      compressionLevel;   /* 0 means default; target a specific zstd compression level */
+} COVER_params_t;
+
+
+/*! COVER_trainFromBuffer() :
+    Train a dictionary from an array of samples using the COVER algorithm.
+    Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+    supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+    The resulting dictionary will be saved into `dictBuffer`.
+    @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+              or an error code, which can be tested with ZDICT_isError().
+    Note : COVER_trainFromBuffer() requires about 9 bytes of memory for each input byte.
+    Tips : In general, a reasonable dictionary has a size of ~ 100 KB.
+           It's obviously possible to target smaller or larger ones, just by specifying different `dictBufferCapacity`.
+           In general, it's recommended to provide a few thousands samples, but this can vary a lot.
+           It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+*/
+ZDICTLIB_API size_t COVER_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
+                              const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                              COVER_params_t parameters);
+
+/*! COVER_optimizeTrainFromBuffer() :
+    The same requirements as above hold for all the parameters except `parameters`.
+    This function tries many parameter combinations and picks the best parameters.
+    `*parameters` is filled with the best parameters found, and the dictionary
+    constructed with those parameters is stored in `dictBuffer`.
+
+    All of the parameters d, k, steps are optional.
+    If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}.
+    if steps is zero it defaults to its default value.
+    If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048].
+
+    @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+              or an error code, which can be tested with ZDICT_isError().
+              On success `*parameters` contains the parameters selected.
+    Note : COVER_optimizeTrainFromBuffer() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
+*/
+ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
+                                     const void* samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
+                                     COVER_params_t *parameters);
+
+/*! ZDICT_finalizeDictionary() :
+
+    Given a custom content as a basis for dictionary, and a set of samples,
+    finalize dictionary by adding headers and statistics.
+
+    Samples must be stored concatenated in a flat buffer `samplesBuffer`,
+    supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
+
+    dictContentSize must be > ZDICT_CONTENTSIZE_MIN bytes.
+    maxDictSize must be >= dictContentSize, and must be > ZDICT_DICTSIZE_MIN bytes.
+
+    @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
+              or an error code, which can be tested by ZDICT_isError().
+    note : ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
+    note 2 : dictBuffer and customDictContent can overlap
+*/
+#define ZDICT_CONTENTSIZE_MIN 256
+#define ZDICT_DICTSIZE_MIN    512
+ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
+                                const void* customDictContent, size_t dictContentSize,
                                 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
                                 ZDICT_params_t parameters);
 
 
-/*! ZDICT_addEntropyTablesFromBuffer() :
-
-    Given a content-only dictionary (built using any 3rd party algorithm),
-    add entropy tables computed from an array of samples.
-    Samples must be stored concatenated in a flat buffer `samplesBuffer`,
-    supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
 
-    The input dictionary content must be stored *at the end* of `dictBuffer`.
-    Its size is `dictContentSize`.
-    The resulting dictionary with added entropy tables will be *written back to `dictBuffer`*,
-    starting from its beginning.
-    @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`).
-*/
+/* Deprecation warnings */
+/* It is generally possible to disable deprecation warnings from compiler,
+   for example with -Wno-deprecated-declarations for gcc
+   or _CRT_SECURE_NO_WARNINGS in Visual.
+   Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */
+#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS
+#  define ZDICT_DEPRECATED(message) ZDICTLIB_API   /* disable deprecation warnings */
+#else
+#  define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+#  if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API [[deprecated(message)]]
+#  elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__)
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))
+#  elif (ZDICT_GCC_VERSION >= 301)
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))
+#  elif defined(_MSC_VER)
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message))
+#  else
+#    pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler")
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API
+#  endif
+#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */
+
+ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead")
 size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
-                                        const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
-
+                                  const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
 
 
 #endif   /* ZDICT_STATIC_LINKING_ONLY */
--- a/contrib/python-zstandard/zstd/zstd.h	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd/zstd.h	Tue Apr 18 12:24:34 2017 -0400
@@ -20,13 +20,16 @@
 
 /* =====   ZSTDLIB_API : control library symbols visibility   ===== */
 #if defined(__GNUC__) && (__GNUC__ >= 4)
-#  define ZSTDLIB_API __attribute__ ((visibility ("default")))
-#elif defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
-#  define ZSTDLIB_API __declspec(dllexport)
+#  define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default")))
+#else
+#  define ZSTDLIB_VISIBILITY
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+#  define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY
 #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
-#  define ZSTDLIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#  define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
 #else
-#  define ZSTDLIB_API
+#  define ZSTDLIB_API ZSTDLIB_VISIBILITY
 #endif
 
 
@@ -53,7 +56,7 @@
 /*------   Version   ------*/
 #define ZSTD_VERSION_MAJOR    1
 #define ZSTD_VERSION_MINOR    1
-#define ZSTD_VERSION_RELEASE  2
+#define ZSTD_VERSION_RELEASE  3
 
 #define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
 #define ZSTD_QUOTE(str) #str
@@ -170,8 +173,8 @@
 *   When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
 *   ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
 *   ZSTD_CDict can be created once and used by multiple threads concurrently, as its usage is read-only.
-*   `dict` can be released after ZSTD_CDict creation. */
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel);
+*   `dictBuffer` can be released after ZSTD_CDict creation, as its content is copied within CDict */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, int compressionLevel);
 
 /*! ZSTD_freeCDict() :
 *   Function frees memory allocated by ZSTD_createCDict(). */
@@ -191,8 +194,8 @@
 
 /*! ZSTD_createDDict() :
 *   Create a digested dictionary, ready to start decompression operation without startup delay.
-*   `dict` can be released after creation. */
-ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize);
+*   dictBuffer can be released after DDict creation, as its content is copied inside DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
 
 /*! ZSTD_freeDDict() :
 *   Function frees memory allocated with ZSTD_createDDict() */
@@ -325,7 +328,7 @@
  * ***************************************************************************************/
 
 /* --- Constants ---*/
-#define ZSTD_MAGICNUMBER            0xFD2FB528   /* v0.8 */
+#define ZSTD_MAGICNUMBER            0xFD2FB528   /* >= v0.8.0 */
 #define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
 
 #define ZSTD_WINDOWLOG_MAX_32  25
@@ -345,8 +348,9 @@
 #define ZSTD_TARGETLENGTH_MAX 999
 
 #define ZSTD_FRAMEHEADERSIZE_MAX 18    /* for static allocation */
+#define ZSTD_FRAMEHEADERSIZE_MIN  6
 static const size_t ZSTD_frameHeaderSize_prefix = 5;
-static const size_t ZSTD_frameHeaderSize_min = 6;
+static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
 static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
 static const size_t ZSTD_skippableHeaderSize = 8;  /* magic number + skippable frame length */
 
@@ -365,9 +369,9 @@
 } ZSTD_compressionParameters;
 
 typedef struct {
-    unsigned contentSizeFlag; /**< 1: content size will be in frame header (if known). */
-    unsigned checksumFlag;    /**< 1: will generate a 22-bits checksum at end of frame, to be used for error detection by decompressor */
-    unsigned noDictIDFlag;    /**< 1: no dict ID will be saved into frame header (if dictionary compression) */
+    unsigned contentSizeFlag; /**< 1: content size will be in frame header (when known) */
+    unsigned checksumFlag;    /**< 1: generate a 32-bits checksum at end of frame, for error detection */
+    unsigned noDictIDFlag;    /**< 1: no dictID will be saved into frame header (if dictionary compression) */
 } ZSTD_frameParameters;
 
 typedef struct {
@@ -397,9 +401,23 @@
  *  Gives the amount of memory used by a given ZSTD_CCtx */
 ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
 
+typedef enum {
+    ZSTD_p_forceWindow   /* Force back-references to remain < windowSize, even when referencing Dictionary content (default:0)*/
+} ZSTD_CCtxParameter;
+/*! ZSTD_setCCtxParameter() :
+ *  Set advanced parameters, selected through enum ZSTD_CCtxParameter
+ *  @result : 0, or an error code (which can be tested with ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value);
+
+/*! ZSTD_createCDict_byReference() :
+ *  Create a digested dictionary for compression
+ *  Dictionary content is simply referenced, and therefore stays in dictBuffer.
+ *  It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
+
 /*! ZSTD_createCDict_advanced() :
  *  Create a ZSTD_CDict using external alloc and free, and customized compression parameters */
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, unsigned byReference,
                                                   ZSTD_parameters params, ZSTD_customMem customMem);
 
 /*! ZSTD_sizeof_CDict() :
@@ -455,6 +473,15 @@
  *  Gives the amount of memory used by a given ZSTD_DCtx */
 ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
 
+/*! ZSTD_createDDict_byReference() :
+ *  Create a digested dictionary, ready to start decompression operation without startup delay.
+ *  Dictionary content is simply referenced, and therefore stays in dictBuffer.
+ *  It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
+
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+                                                  unsigned byReference, ZSTD_customMem customMem);
+
 /*! ZSTD_sizeof_DDict() :
  *  Gives the amount of memory used by a given ZSTD_DDict */
 ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
@@ -463,13 +490,13 @@
  *  Provides the dictID stored within dictionary.
  *  if @return == 0, the dictionary is not conformant with Zstandard specification.
  *  It can still be loaded, but as a content-only dictionary. */
-unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
 
 /*! ZSTD_getDictID_fromDDict() :
  *  Provides the dictID of the dictionary loaded into `ddict`.
  *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
  *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
-unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
 
 /*! ZSTD_getDictID_fromFrame() :
  *  Provides the dictID required to decompressed the frame stored within `src`.
@@ -481,7 +508,7 @@
  *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
  *  - This is not a Zstandard frame.
  *  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
-unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
 
 
 /********************************************************************
@@ -491,7 +518,7 @@
 /*=====   Advanced Streaming compression functions  =====*/
 ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
 ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);   /**< pledgedSrcSize must be correct */
-ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
 ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
                                              ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize is optional and can be zero == unknown */
 ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);  /**< note : cdict will just be referenced, and must outlive compression session */
@@ -500,9 +527,9 @@
 
 
 /*=====   Advanced Streaming decompression functions  =====*/
-typedef enum { ZSTDdsp_maxWindowSize } ZSTD_DStreamParameter_e;
+typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
 ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
-ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
 ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);
 ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);  /**< note : ddict will just be referenced, and must outlive decompression session */
 ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);  /**< re-use decompression parameters from previous init; saves dictionary loading */
@@ -542,10 +569,10 @@
     In which case, it will "discard" the relevant memory section from its history.
 
   Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
-  It's possible to use a NULL,0 src content, in which case, it will write a final empty block to end the frame,
-  Without last block mark, frames will be considered unfinished (broken) by decoders.
+  It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
+  Without last block mark, frames will be considered unfinished (corrupted) by decoders.
 
-  You can then reuse `ZSTD_CCtx` (ZSTD_compressBegin()) to compress some new frame.
+  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new frame.
 */
 
 /*=====   Buffer-less streaming compression functions  =====*/
@@ -553,6 +580,7 @@
 ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
 ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize);
 ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize);
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize);
 ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
 ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
 
--- a/contrib/python-zstandard/zstd_cffi.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/python-zstandard/zstd_cffi.py	Tue Apr 18 12:24:34 2017 -0400
@@ -8,145 +8,1250 @@
 
 from __future__ import absolute_import, unicode_literals
 
-import io
+import os
+import sys
 
 from _zstd_cffi import (
     ffi,
     lib,
 )
 
+if sys.version_info[0] == 2:
+    bytes_type = str
+    int_type = long
+else:
+    bytes_type = bytes
+    int_type = int
 
-_CSTREAM_IN_SIZE = lib.ZSTD_CStreamInSize()
-_CSTREAM_OUT_SIZE = lib.ZSTD_CStreamOutSize()
+
+COMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_CStreamInSize()
+COMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_CStreamOutSize()
+DECOMPRESSION_RECOMMENDED_INPUT_SIZE = lib.ZSTD_DStreamInSize()
+DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE = lib.ZSTD_DStreamOutSize()
+
+new_nonzero = ffi.new_allocator(should_clear_after_alloc=False)
+
+
+MAX_COMPRESSION_LEVEL = lib.ZSTD_maxCLevel()
+MAGIC_NUMBER = lib.ZSTD_MAGICNUMBER
+FRAME_HEADER = b'\x28\xb5\x2f\xfd'
+ZSTD_VERSION = (lib.ZSTD_VERSION_MAJOR, lib.ZSTD_VERSION_MINOR, lib.ZSTD_VERSION_RELEASE)
+
+WINDOWLOG_MIN = lib.ZSTD_WINDOWLOG_MIN
+WINDOWLOG_MAX = lib.ZSTD_WINDOWLOG_MAX
+CHAINLOG_MIN = lib.ZSTD_CHAINLOG_MIN
+CHAINLOG_MAX = lib.ZSTD_CHAINLOG_MAX
+HASHLOG_MIN = lib.ZSTD_HASHLOG_MIN
+HASHLOG_MAX = lib.ZSTD_HASHLOG_MAX
+HASHLOG3_MAX = lib.ZSTD_HASHLOG3_MAX
+SEARCHLOG_MIN = lib.ZSTD_SEARCHLOG_MIN
+SEARCHLOG_MAX = lib.ZSTD_SEARCHLOG_MAX
+SEARCHLENGTH_MIN = lib.ZSTD_SEARCHLENGTH_MIN
+SEARCHLENGTH_MAX = lib.ZSTD_SEARCHLENGTH_MAX
+TARGETLENGTH_MIN = lib.ZSTD_TARGETLENGTH_MIN
+TARGETLENGTH_MAX = lib.ZSTD_TARGETLENGTH_MAX
+
+STRATEGY_FAST = lib.ZSTD_fast
+STRATEGY_DFAST = lib.ZSTD_dfast
+STRATEGY_GREEDY = lib.ZSTD_greedy
+STRATEGY_LAZY = lib.ZSTD_lazy
+STRATEGY_LAZY2 = lib.ZSTD_lazy2
+STRATEGY_BTLAZY2 = lib.ZSTD_btlazy2
+STRATEGY_BTOPT = lib.ZSTD_btopt
+
+COMPRESSOBJ_FLUSH_FINISH = 0
+COMPRESSOBJ_FLUSH_BLOCK = 1
+
+
+def _cpu_count():
+    # os.cpu_count() was introducd in Python 3.4.
+    try:
+        return os.cpu_count() or 0
+    except AttributeError:
+        pass
+
+    # Linux.
+    try:
+        if sys.version_info[0] == 2:
+            return os.sysconf(b'SC_NPROCESSORS_ONLN')
+        else:
+            return os.sysconf(u'SC_NPROCESSORS_ONLN')
+    except (AttributeError, ValueError):
+        pass
+
+    # TODO implement on other platforms.
+    return 0
+
+
+class ZstdError(Exception):
+    pass
 
 
-class _ZstdCompressionWriter(object):
-    def __init__(self, cstream, writer):
-        self._cstream = cstream
+class CompressionParameters(object):
+    def __init__(self, window_log, chain_log, hash_log, search_log,
+                 search_length, target_length, strategy):
+        if window_log < WINDOWLOG_MIN or window_log > WINDOWLOG_MAX:
+            raise ValueError('invalid window log value')
+
+        if chain_log < CHAINLOG_MIN or chain_log > CHAINLOG_MAX:
+            raise ValueError('invalid chain log value')
+
+        if hash_log < HASHLOG_MIN or hash_log > HASHLOG_MAX:
+            raise ValueError('invalid hash log value')
+
+        if search_log < SEARCHLOG_MIN or search_log > SEARCHLOG_MAX:
+            raise ValueError('invalid search log value')
+
+        if search_length < SEARCHLENGTH_MIN or search_length > SEARCHLENGTH_MAX:
+            raise ValueError('invalid search length value')
+
+        if target_length < TARGETLENGTH_MIN or target_length > TARGETLENGTH_MAX:
+            raise ValueError('invalid target length value')
+
+        if strategy < STRATEGY_FAST or strategy > STRATEGY_BTOPT:
+            raise ValueError('invalid strategy value')
+
+        self.window_log = window_log
+        self.chain_log = chain_log
+        self.hash_log = hash_log
+        self.search_log = search_log
+        self.search_length = search_length
+        self.target_length = target_length
+        self.strategy = strategy
+
+        zresult = lib.ZSTD_checkCParams(self.as_compression_parameters())
+        if lib.ZSTD_isError(zresult):
+            raise ValueError('invalid compression parameters: %s',
+                             ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+    def estimated_compression_context_size(self):
+        return lib.ZSTD_estimateCCtxSize(self.as_compression_parameters())
+
+    def as_compression_parameters(self):
+        p = ffi.new('ZSTD_compressionParameters *')[0]
+        p.windowLog = self.window_log
+        p.chainLog = self.chain_log
+        p.hashLog = self.hash_log
+        p.searchLog = self.search_log
+        p.searchLength = self.search_length
+        p.targetLength = self.target_length
+        p.strategy = self.strategy
+
+        return p
+
+def get_compression_parameters(level, source_size=0, dict_size=0):
+    params = lib.ZSTD_getCParams(level, source_size, dict_size)
+    return CompressionParameters(window_log=params.windowLog,
+                                 chain_log=params.chainLog,
+                                 hash_log=params.hashLog,
+                                 search_log=params.searchLog,
+                                 search_length=params.searchLength,
+                                 target_length=params.targetLength,
+                                 strategy=params.strategy)
+
+
+def estimate_compression_context_size(params):
+    if not isinstance(params, CompressionParameters):
+        raise ValueError('argument must be a CompressionParameters')
+
+    cparams = params.as_compression_parameters()
+    return lib.ZSTD_estimateCCtxSize(cparams)
+
+
+def estimate_decompression_context_size():
+    return lib.ZSTD_estimateDCtxSize()
+
+
+class ZstdCompressionWriter(object):
+    def __init__(self, compressor, writer, source_size, write_size):
+        self._compressor = compressor
         self._writer = writer
+        self._source_size = source_size
+        self._write_size = write_size
+        self._entered = False
+        self._mtcctx = compressor._cctx if compressor._multithreaded else None
 
     def __enter__(self):
+        if self._entered:
+            raise ZstdError('cannot __enter__ multiple times')
+
+        if self._mtcctx:
+            self._compressor._init_mtcstream(self._source_size)
+        else:
+            self._compressor._ensure_cstream(self._source_size)
+        self._entered = True
         return self
 
     def __exit__(self, exc_type, exc_value, exc_tb):
+        self._entered = False
+
         if not exc_type and not exc_value and not exc_tb:
             out_buffer = ffi.new('ZSTD_outBuffer *')
-            out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE)
-            out_buffer.size = _CSTREAM_OUT_SIZE
+            dst_buffer = ffi.new('char[]', self._write_size)
+            out_buffer.dst = dst_buffer
+            out_buffer.size = self._write_size
             out_buffer.pos = 0
 
             while True:
-                res = lib.ZSTD_endStream(self._cstream, out_buffer)
-                if lib.ZSTD_isError(res):
-                    raise Exception('error ending compression stream: %s' % lib.ZSTD_getErrorName)
+                if self._mtcctx:
+                    zresult = lib.ZSTDMT_endStream(self._mtcctx, out_buffer)
+                else:
+                    zresult = lib.ZSTD_endStream(self._compressor._cstream, out_buffer)
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError('error ending compression stream: %s' %
+                                    ffi.string(lib.ZSTD_getErrorName(zresult)))
 
                 if out_buffer.pos:
-                    self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+                    self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
                     out_buffer.pos = 0
 
-                if res == 0:
+                if zresult == 0:
                     break
 
+        self._compressor = None
+
         return False
 
+    def memory_size(self):
+        if not self._entered:
+            raise ZstdError('cannot determine size of an inactive compressor; '
+                            'call when a context manager is active')
+
+        return lib.ZSTD_sizeof_CStream(self._compressor._cstream)
+
     def write(self, data):
+        if not self._entered:
+            raise ZstdError('write() must be called from an active context '
+                            'manager')
+
+        total_write = 0
+
+        data_buffer = ffi.from_buffer(data)
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        in_buffer.src = data_buffer
+        in_buffer.size = len(data_buffer)
+        in_buffer.pos = 0
+
         out_buffer = ffi.new('ZSTD_outBuffer *')
-        out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE)
-        out_buffer.size = _CSTREAM_OUT_SIZE
+        dst_buffer = ffi.new('char[]', self._write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = self._write_size
+        out_buffer.pos = 0
+
+        while in_buffer.pos < in_buffer.size:
+            if self._mtcctx:
+                zresult = lib.ZSTDMT_compressStream(self._mtcctx, out_buffer,
+                                                    in_buffer)
+            else:
+                zresult = lib.ZSTD_compressStream(self._compressor._cstream, out_buffer,
+                                                  in_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd compress error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if out_buffer.pos:
+                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                total_write += out_buffer.pos
+                out_buffer.pos = 0
+
+        return total_write
+
+    def flush(self):
+        if not self._entered:
+            raise ZstdError('flush must be called from an active context manager')
+
+        total_write = 0
+
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+        dst_buffer = ffi.new('char[]', self._write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = self._write_size
         out_buffer.pos = 0
 
-        # TODO can we reuse existing memory?
-        in_buffer = ffi.new('ZSTD_inBuffer *')
-        in_buffer.src = ffi.new('char[]', data)
-        in_buffer.size = len(data)
-        in_buffer.pos = 0
-        while in_buffer.pos < in_buffer.size:
-            res = lib.ZSTD_compressStream(self._cstream, out_buffer, in_buffer)
-            if lib.ZSTD_isError(res):
-                raise Exception('zstd compress error: %s' % lib.ZSTD_getErrorName(res))
+        while True:
+            if self._mtcctx:
+                zresult = lib.ZSTDMT_flushStream(self._mtcctx, out_buffer)
+            else:
+                zresult = lib.ZSTD_flushStream(self._compressor._cstream, out_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd compress error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if not out_buffer.pos:
+                break
+
+            self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+            total_write += out_buffer.pos
+            out_buffer.pos = 0
+
+        return total_write
+
+
+class ZstdCompressionObj(object):
+    def compress(self, data):
+        if self._finished:
+            raise ZstdError('cannot call compress() after compressor finished')
+
+        data_buffer = ffi.from_buffer(data)
+        source = ffi.new('ZSTD_inBuffer *')
+        source.src = data_buffer
+        source.size = len(data_buffer)
+        source.pos = 0
+
+        chunks = []
+
+        while source.pos < len(data):
+            if self._mtcctx:
+                zresult = lib.ZSTDMT_compressStream(self._mtcctx,
+                                                    self._out, source)
+            else:
+                zresult = lib.ZSTD_compressStream(self._compressor._cstream, self._out,
+                                                  source)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd compress error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if self._out.pos:
+                chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
+                self._out.pos = 0
+
+        return b''.join(chunks)
 
-            if out_buffer.pos:
-                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
-                out_buffer.pos = 0
+    def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH):
+        if flush_mode not in (COMPRESSOBJ_FLUSH_FINISH, COMPRESSOBJ_FLUSH_BLOCK):
+            raise ValueError('flush mode not recognized')
+
+        if self._finished:
+            raise ZstdError('compressor object already finished')
+
+        assert self._out.pos == 0
+
+        if flush_mode == COMPRESSOBJ_FLUSH_BLOCK:
+            if self._mtcctx:
+                zresult = lib.ZSTDMT_flushStream(self._mtcctx, self._out)
+            else:
+                zresult = lib.ZSTD_flushStream(self._compressor._cstream, self._out)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd compress error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            # Output buffer is guaranteed to hold full block.
+            assert zresult == 0
+
+            if self._out.pos:
+                result = ffi.buffer(self._out.dst, self._out.pos)[:]
+                self._out.pos = 0
+                return result
+            else:
+                return b''
+
+        assert flush_mode == COMPRESSOBJ_FLUSH_FINISH
+        self._finished = True
+
+        chunks = []
+
+        while True:
+            if self._mtcctx:
+                zresult = lib.ZSTDMT_endStream(self._mtcctx, self._out)
+            else:
+                zresult = lib.ZSTD_endStream(self._compressor._cstream, self._out)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('error ending compression stream: %s' %
+                                ffi.string(lib.ZSTD_getErroName(zresult)))
+
+            if self._out.pos:
+                chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:])
+                self._out.pos = 0
+
+            if not zresult:
+                break
+
+        return b''.join(chunks)
 
 
 class ZstdCompressor(object):
-    def __init__(self, level=3, dict_data=None, compression_params=None):
-        if dict_data:
-            raise Exception('dict_data not yet supported')
-        if compression_params:
-            raise Exception('compression_params not yet supported')
+    def __init__(self, level=3, dict_data=None, compression_params=None,
+                 write_checksum=False, write_content_size=False,
+                 write_dict_id=True, threads=0):
+        if level < 1:
+            raise ValueError('level must be greater than 0')
+        elif level > lib.ZSTD_maxCLevel():
+            raise ValueError('level must be less than %d' % lib.ZSTD_maxCLevel())
+
+        if threads < 0:
+            threads = _cpu_count()
 
         self._compression_level = level
+        self._dict_data = dict_data
+        self._cparams = compression_params
+        self._fparams = ffi.new('ZSTD_frameParameters *')[0]
+        self._fparams.checksumFlag = write_checksum
+        self._fparams.contentSizeFlag = write_content_size
+        self._fparams.noDictIDFlag = not write_dict_id
 
-    def compress(self, data):
-        # Just use the stream API for now.
-        output = io.BytesIO()
-        with self.write_to(output) as compressor:
-            compressor.write(data)
-        return output.getvalue()
+        if threads:
+            cctx = lib.ZSTDMT_createCCtx(threads)
+            if cctx == ffi.NULL:
+                raise MemoryError()
+
+            self._cctx = ffi.gc(cctx, lib.ZSTDMT_freeCCtx)
+            self._multithreaded = True
+        else:
+            cctx = lib.ZSTD_createCCtx()
+            if cctx == ffi.NULL:
+                raise MemoryError()
+
+            self._cctx = ffi.gc(cctx, lib.ZSTD_freeCCtx)
+            self._multithreaded = False
+
+        self._cstream = None
+
+    def compress(self, data, allow_empty=False):
+        if len(data) == 0 and self._fparams.contentSizeFlag and not allow_empty:
+            raise ValueError('cannot write empty inputs when writing content sizes')
+
+        if self._multithreaded and self._dict_data:
+            raise ZstdError('compress() cannot be used with both dictionaries and multi-threaded compression')
+
+        if self._multithreaded and self._cparams:
+            raise ZstdError('compress() cannot be used with both compression parameters and multi-threaded compression')
+
+        # TODO use a CDict for performance.
+        dict_data = ffi.NULL
+        dict_size = 0
+
+        if self._dict_data:
+            dict_data = self._dict_data.as_bytes()
+            dict_size = len(self._dict_data)
+
+        params = ffi.new('ZSTD_parameters *')[0]
+        if self._cparams:
+            params.cParams = self._cparams.as_compression_parameters()
+        else:
+            params.cParams = lib.ZSTD_getCParams(self._compression_level, len(data),
+                                                 dict_size)
+        params.fParams = self._fparams
+
+        dest_size = lib.ZSTD_compressBound(len(data))
+        out = new_nonzero('char[]', dest_size)
 
-    def copy_stream(self, ifh, ofh):
-        cstream = self._get_cstream()
+        if self._multithreaded:
+            zresult = lib.ZSTDMT_compressCCtx(self._cctx,
+                                              ffi.addressof(out), dest_size,
+                                              data, len(data),
+                                              self._compression_level)
+        else:
+            zresult = lib.ZSTD_compress_advanced(self._cctx,
+                                                 ffi.addressof(out), dest_size,
+                                                 data, len(data),
+                                                 dict_data, dict_size,
+                                                 params)
+
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError('cannot compress: %s' %
+                            ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+        return ffi.buffer(out, zresult)[:]
+
+    def compressobj(self, size=0):
+        if self._multithreaded:
+            self._init_mtcstream(size)
+        else:
+            self._ensure_cstream(size)
+
+        cobj = ZstdCompressionObj()
+        cobj._out = ffi.new('ZSTD_outBuffer *')
+        cobj._dst_buffer = ffi.new('char[]', COMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+        cobj._out.dst = cobj._dst_buffer
+        cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE
+        cobj._out.pos = 0
+        cobj._compressor = self
+        cobj._finished = False
+
+        if self._multithreaded:
+            cobj._mtcctx = self._cctx
+        else:
+            cobj._mtcctx = None
+
+        return cobj
+
+    def copy_stream(self, ifh, ofh, size=0,
+                    read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+                    write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+
+        if not hasattr(ifh, 'read'):
+            raise ValueError('first argument must have a read() method')
+        if not hasattr(ofh, 'write'):
+            raise ValueError('second argument must have a write() method')
+
+        mt = self._multithreaded
+        if mt:
+            self._init_mtcstream(size)
+        else:
+            self._ensure_cstream(size)
 
         in_buffer = ffi.new('ZSTD_inBuffer *')
         out_buffer = ffi.new('ZSTD_outBuffer *')
 
-        out_buffer.dst = ffi.new('char[]', _CSTREAM_OUT_SIZE)
-        out_buffer.size = _CSTREAM_OUT_SIZE
+        dst_buffer = ffi.new('char[]', write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = write_size
         out_buffer.pos = 0
 
         total_read, total_write = 0, 0
 
         while True:
-            data = ifh.read(_CSTREAM_IN_SIZE)
+            data = ifh.read(read_size)
             if not data:
                 break
 
-            total_read += len(data)
-
-            in_buffer.src = ffi.new('char[]', data)
-            in_buffer.size = len(data)
+            data_buffer = ffi.from_buffer(data)
+            total_read += len(data_buffer)
+            in_buffer.src = data_buffer
+            in_buffer.size = len(data_buffer)
             in_buffer.pos = 0
 
             while in_buffer.pos < in_buffer.size:
-                res = lib.ZSTD_compressStream(cstream, out_buffer, in_buffer)
-                if lib.ZSTD_isError(res):
-                    raise Exception('zstd compress error: %s' %
-                                    lib.ZSTD_getErrorName(res))
+                if mt:
+                    zresult = lib.ZSTDMT_compressStream(self._cctx, out_buffer, in_buffer)
+                else:
+                    zresult = lib.ZSTD_compressStream(self._cstream,
+                                                      out_buffer, in_buffer)
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError('zstd compress error: %s' %
+                                    ffi.string(lib.ZSTD_getErrorName(zresult)))
 
                 if out_buffer.pos:
                     ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
-                    total_write = out_buffer.pos
+                    total_write += out_buffer.pos
                     out_buffer.pos = 0
 
         # We've finished reading. Flush the compressor.
         while True:
-            res = lib.ZSTD_endStream(cstream, out_buffer)
-            if lib.ZSTD_isError(res):
-                raise Exception('error ending compression stream: %s' %
-                                lib.ZSTD_getErrorName(res))
+            if mt:
+                zresult = lib.ZSTDMT_endStream(self._cctx, out_buffer)
+            else:
+                zresult = lib.ZSTD_endStream(self._cstream, out_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('error ending compression stream: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
 
             if out_buffer.pos:
                 ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
                 total_write += out_buffer.pos
                 out_buffer.pos = 0
 
-            if res == 0:
+            if zresult == 0:
                 break
 
         return total_read, total_write
 
-    def write_to(self, writer):
-        return _ZstdCompressionWriter(self._get_cstream(), writer)
+    def write_to(self, writer, size=0,
+                 write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+
+        if not hasattr(writer, 'write'):
+            raise ValueError('must pass an object with a write() method')
+
+        return ZstdCompressionWriter(self, writer, size, write_size)
+
+    def read_from(self, reader, size=0,
+                  read_size=COMPRESSION_RECOMMENDED_INPUT_SIZE,
+                  write_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+        if hasattr(reader, 'read'):
+            have_read = True
+        elif hasattr(reader, '__getitem__'):
+            have_read = False
+            buffer_offset = 0
+            size = len(reader)
+        else:
+            raise ValueError('must pass an object with a read() method or '
+                             'conforms to buffer protocol')
+
+        if self._multithreaded:
+            self._init_mtcstream(size)
+        else:
+            self._ensure_cstream(size)
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+
+        in_buffer.src = ffi.NULL
+        in_buffer.size = 0
+        in_buffer.pos = 0
+
+        dst_buffer = ffi.new('char[]', write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = write_size
+        out_buffer.pos = 0
+
+        while True:
+            # We should never have output data sitting around after a previous
+            # iteration.
+            assert out_buffer.pos == 0
+
+            # Collect input data.
+            if have_read:
+                read_result = reader.read(read_size)
+            else:
+                remaining = len(reader) - buffer_offset
+                slice_size = min(remaining, read_size)
+                read_result = reader[buffer_offset:buffer_offset + slice_size]
+                buffer_offset += slice_size
+
+            # No new input data. Break out of the read loop.
+            if not read_result:
+                break
 
-    def _get_cstream(self):
+            # Feed all read data into the compressor and emit output until
+            # exhausted.
+            read_buffer = ffi.from_buffer(read_result)
+            in_buffer.src = read_buffer
+            in_buffer.size = len(read_buffer)
+            in_buffer.pos = 0
+
+            while in_buffer.pos < in_buffer.size:
+                if self._multithreaded:
+                    zresult = lib.ZSTDMT_compressStream(self._cctx, out_buffer, in_buffer)
+                else:
+                    zresult = lib.ZSTD_compressStream(self._cstream, out_buffer, in_buffer)
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError('zstd compress error: %s' %
+                                    ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+                if out_buffer.pos:
+                    data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                    out_buffer.pos = 0
+                    yield data
+
+            assert out_buffer.pos == 0
+
+            # And repeat the loop to collect more data.
+            continue
+
+        # If we get here, input is exhausted. End the stream and emit what
+        # remains.
+        while True:
+            assert out_buffer.pos == 0
+            if self._multithreaded:
+                zresult = lib.ZSTDMT_endStream(self._cctx, out_buffer)
+            else:
+                zresult = lib.ZSTD_endStream(self._cstream, out_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('error ending compression stream: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if out_buffer.pos:
+                data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                out_buffer.pos = 0
+                yield data
+
+            if zresult == 0:
+                break
+
+    def _ensure_cstream(self, size):
+        if self._cstream:
+            zresult = lib.ZSTD_resetCStream(self._cstream, size)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('could not reset CStream: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            return
+
         cstream = lib.ZSTD_createCStream()
+        if cstream == ffi.NULL:
+            raise MemoryError()
+
         cstream = ffi.gc(cstream, lib.ZSTD_freeCStream)
 
-        res = lib.ZSTD_initCStream(cstream, self._compression_level)
-        if lib.ZSTD_isError(res):
+        dict_data = ffi.NULL
+        dict_size = 0
+        if self._dict_data:
+            dict_data = self._dict_data.as_bytes()
+            dict_size = len(self._dict_data)
+
+        zparams = ffi.new('ZSTD_parameters *')[0]
+        if self._cparams:
+            zparams.cParams = self._cparams.as_compression_parameters()
+        else:
+            zparams.cParams = lib.ZSTD_getCParams(self._compression_level,
+                                                  size, dict_size)
+        zparams.fParams = self._fparams
+
+        zresult = lib.ZSTD_initCStream_advanced(cstream, dict_data, dict_size,
+                                                zparams, size)
+        if lib.ZSTD_isError(zresult):
             raise Exception('cannot init CStream: %s' %
-                            lib.ZSTD_getErrorName(res))
+                            ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+        self._cstream = cstream
+
+    def _init_mtcstream(self, size):
+        assert self._multithreaded
+
+        dict_data = ffi.NULL
+        dict_size = 0
+        if self._dict_data:
+            dict_data = self._dict_data.as_bytes()
+            dict_size = len(self._dict_data)
+
+        zparams = ffi.new('ZSTD_parameters *')[0]
+        if self._cparams:
+            zparams.cParams = self._cparams.as_compression_parameters()
+        else:
+            zparams.cParams = lib.ZSTD_getCParams(self._compression_level,
+                                                  size, dict_size)
+
+        zparams.fParams = self._fparams
+
+        zresult = lib.ZSTDMT_initCStream_advanced(self._cctx, dict_data, dict_size,
+                                                  zparams, size)
+
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError('cannot init CStream: %s' %
+                            ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+
+class FrameParameters(object):
+    def __init__(self, fparams):
+        self.content_size = fparams.frameContentSize
+        self.window_size = fparams.windowSize
+        self.dict_id = fparams.dictID
+        self.has_checksum = bool(fparams.checksumFlag)
+
+
+def get_frame_parameters(data):
+    if not isinstance(data, bytes_type):
+        raise TypeError('argument must be bytes')
+
+    params = ffi.new('ZSTD_frameParams *')
+
+    zresult = lib.ZSTD_getFrameParams(params, data, len(data))
+    if lib.ZSTD_isError(zresult):
+        raise ZstdError('cannot get frame parameters: %s' %
+                        ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+    if zresult:
+        raise ZstdError('not enough data for frame parameters; need %d bytes' %
+                        zresult)
+
+    return FrameParameters(params[0])
+
+
+class ZstdCompressionDict(object):
+    def __init__(self, data, k=0, d=0):
+        assert isinstance(data, bytes_type)
+        self._data = data
+        self.k = k
+        self.d = d
+
+    def __len__(self):
+        return len(self._data)
+
+    def dict_id(self):
+        return int_type(lib.ZDICT_getDictID(self._data, len(self._data)))
+
+    def as_bytes(self):
+        return self._data
+
+
+def train_dictionary(dict_size, samples, selectivity=0, level=0,
+                     notifications=0, dict_id=0):
+    if not isinstance(samples, list):
+        raise TypeError('samples must be a list')
+
+    total_size = sum(map(len, samples))
+
+    samples_buffer = new_nonzero('char[]', total_size)
+    sample_sizes = new_nonzero('size_t[]', len(samples))
+
+    offset = 0
+    for i, sample in enumerate(samples):
+        if not isinstance(sample, bytes_type):
+            raise ValueError('samples must be bytes')
+
+        l = len(sample)
+        ffi.memmove(samples_buffer + offset, sample, l)
+        offset += l
+        sample_sizes[i] = l
+
+    dict_data = new_nonzero('char[]', dict_size)
+
+    dparams = ffi.new('ZDICT_params_t *')[0]
+    dparams.selectivityLevel = selectivity
+    dparams.compressionLevel = level
+    dparams.notificationLevel = notifications
+    dparams.dictID = dict_id
+
+    zresult = lib.ZDICT_trainFromBuffer_advanced(
+        ffi.addressof(dict_data), dict_size,
+        ffi.addressof(samples_buffer),
+        ffi.addressof(sample_sizes, 0), len(samples),
+        dparams)
+
+    if lib.ZDICT_isError(zresult):
+        raise ZstdError('Cannot train dict: %s' %
+                        ffi.string(lib.ZDICT_getErrorName(zresult)))
+
+    return ZstdCompressionDict(ffi.buffer(dict_data, zresult)[:])
+
+
+def train_cover_dictionary(dict_size, samples, k=0, d=0,
+                           notifications=0, dict_id=0, level=0, optimize=False,
+                           steps=0, threads=0):
+    if not isinstance(samples, list):
+        raise TypeError('samples must be a list')
+
+    if threads < 0:
+        threads = _cpu_count()
+
+    total_size = sum(map(len, samples))
+
+    samples_buffer = new_nonzero('char[]', total_size)
+    sample_sizes = new_nonzero('size_t[]', len(samples))
+
+    offset = 0
+    for i, sample in enumerate(samples):
+        if not isinstance(sample, bytes_type):
+            raise ValueError('samples must be bytes')
+
+        l = len(sample)
+        ffi.memmove(samples_buffer + offset, sample, l)
+        offset += l
+        sample_sizes[i] = l
+
+    dict_data = new_nonzero('char[]', dict_size)
+
+    dparams = ffi.new('COVER_params_t *')[0]
+    dparams.k = k
+    dparams.d = d
+    dparams.steps = steps
+    dparams.nbThreads = threads
+    dparams.notificationLevel = notifications
+    dparams.dictID = dict_id
+    dparams.compressionLevel = level
+
+    if optimize:
+        zresult = lib.COVER_optimizeTrainFromBuffer(
+            ffi.addressof(dict_data), dict_size,
+            ffi.addressof(samples_buffer),
+            ffi.addressof(sample_sizes, 0), len(samples),
+            ffi.addressof(dparams))
+    else:
+        zresult = lib.COVER_trainFromBuffer(
+            ffi.addressof(dict_data), dict_size,
+            ffi.addressof(samples_buffer),
+            ffi.addressof(sample_sizes, 0), len(samples),
+            dparams)
+
+    if lib.ZDICT_isError(zresult):
+        raise ZstdError('cannot train dict: %s' %
+                        ffi.string(lib.ZDICT_getErrorName(zresult)))
+
+    return ZstdCompressionDict(ffi.buffer(dict_data, zresult)[:],
+                               k=dparams.k, d=dparams.d)
+
+
+class ZstdDecompressionObj(object):
+    def __init__(self, decompressor):
+        self._decompressor = decompressor
+        self._finished = False
+
+    def decompress(self, data):
+        if self._finished:
+            raise ZstdError('cannot use a decompressobj multiple times')
+
+        assert(self._decompressor._dstream)
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+
+        data_buffer = ffi.from_buffer(data)
+        in_buffer.src = data_buffer
+        in_buffer.size = len(data_buffer)
+        in_buffer.pos = 0
+
+        dst_buffer = ffi.new('char[]', DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = len(dst_buffer)
+        out_buffer.pos = 0
+
+        chunks = []
+
+        while in_buffer.pos < in_buffer.size:
+            zresult = lib.ZSTD_decompressStream(self._decompressor._dstream,
+                                                out_buffer, in_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd decompressor error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if zresult == 0:
+                self._finished = True
+                self._decompressor = None
+
+            if out_buffer.pos:
+                chunks.append(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                out_buffer.pos = 0
+
+        return b''.join(chunks)
+
+
+class ZstdDecompressionWriter(object):
+    def __init__(self, decompressor, writer, write_size):
+        self._decompressor = decompressor
+        self._writer = writer
+        self._write_size = write_size
+        self._entered = False
+
+    def __enter__(self):
+        if self._entered:
+            raise ZstdError('cannot __enter__ multiple times')
+
+        self._decompressor._ensure_dstream()
+        self._entered = True
+
+        return self
+
+    def __exit__(self, exc_type, exc_value, exc_tb):
+        self._entered = False
+
+    def memory_size(self):
+        if not self._decompressor._dstream:
+            raise ZstdError('cannot determine size of inactive decompressor '
+                            'call when context manager is active')
+
+        return lib.ZSTD_sizeof_DStream(self._decompressor._dstream)
+
+    def write(self, data):
+        if not self._entered:
+            raise ZstdError('write must be called from an active context manager')
+
+        total_write = 0
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+
+        data_buffer = ffi.from_buffer(data)
+        in_buffer.src = data_buffer
+        in_buffer.size = len(data_buffer)
+        in_buffer.pos = 0
+
+        dst_buffer = ffi.new('char[]', self._write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = len(dst_buffer)
+        out_buffer.pos = 0
+
+        dstream = self._decompressor._dstream
+
+        while in_buffer.pos < in_buffer.size:
+            zresult = lib.ZSTD_decompressStream(dstream, out_buffer, in_buffer)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('zstd decompress error: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            if out_buffer.pos:
+                self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:])
+                total_write += out_buffer.pos
+                out_buffer.pos = 0
+
+        return total_write
+
+
+class ZstdDecompressor(object):
+    def __init__(self, dict_data=None):
+        self._dict_data = dict_data
+
+        dctx = lib.ZSTD_createDCtx()
+        if dctx == ffi.NULL:
+            raise MemoryError()
+
+        self._refdctx = ffi.gc(dctx, lib.ZSTD_freeDCtx)
+        self._dstream = None
 
-        return cstream
+    @property
+    def _ddict(self):
+        if self._dict_data:
+            dict_data = self._dict_data.as_bytes()
+            dict_size = len(self._dict_data)
+
+            ddict = lib.ZSTD_createDDict(dict_data, dict_size)
+            if ddict == ffi.NULL:
+                raise ZstdError('could not create decompression dict')
+        else:
+            ddict = None
+
+        self.__dict__['_ddict'] = ddict
+        return ddict
+
+    def decompress(self, data, max_output_size=0):
+        data_buffer = ffi.from_buffer(data)
+
+        orig_dctx = new_nonzero('char[]', lib.ZSTD_sizeof_DCtx(self._refdctx))
+        dctx = ffi.cast('ZSTD_DCtx *', orig_dctx)
+        lib.ZSTD_copyDCtx(dctx, self._refdctx)
+
+        ddict = self._ddict
+
+        output_size = lib.ZSTD_getDecompressedSize(data_buffer, len(data_buffer))
+        if output_size:
+            result_buffer = ffi.new('char[]', output_size)
+            result_size = output_size
+        else:
+            if not max_output_size:
+                raise ZstdError('input data invalid or missing content size '
+                                'in frame header')
+
+            result_buffer = ffi.new('char[]', max_output_size)
+            result_size = max_output_size
+
+        if ddict:
+            zresult = lib.ZSTD_decompress_usingDDict(dctx,
+                                                     result_buffer, result_size,
+                                                     data_buffer, len(data_buffer),
+                                                     ddict)
+        else:
+            zresult = lib.ZSTD_decompressDCtx(dctx,
+                                              result_buffer, result_size,
+                                              data_buffer, len(data_buffer))
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError('decompression error: %s' %
+                            ffi.string(lib.ZSTD_getErrorName(zresult)))
+        elif output_size and zresult != output_size:
+            raise ZstdError('decompression error: decompressed %d bytes; expected %d' %
+                            (zresult, output_size))
+
+        return ffi.buffer(result_buffer, zresult)[:]
+
+    def decompressobj(self):
+        self._ensure_dstream()
+        return ZstdDecompressionObj(self)
+
+    def read_from(self, reader, read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+                  write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+                  skip_bytes=0):
+        if skip_bytes >= read_size:
+            raise ValueError('skip_bytes must be smaller than read_size')
+
+        if hasattr(reader, 'read'):
+            have_read = True
+        elif hasattr(reader, '__getitem__'):
+            have_read = False
+            buffer_offset = 0
+            size = len(reader)
+        else:
+            raise ValueError('must pass an object with a read() method or '
+                             'conforms to buffer protocol')
+
+        if skip_bytes:
+            if have_read:
+                reader.read(skip_bytes)
+            else:
+                if skip_bytes > size:
+                    raise ValueError('skip_bytes larger than first input chunk')
+
+                buffer_offset = skip_bytes
+
+        self._ensure_dstream()
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+
+        dst_buffer = ffi.new('char[]', write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = len(dst_buffer)
+        out_buffer.pos = 0
+
+        while True:
+            assert out_buffer.pos == 0
+
+            if have_read:
+                read_result = reader.read(read_size)
+            else:
+                remaining = size - buffer_offset
+                slice_size = min(remaining, read_size)
+                read_result = reader[buffer_offset:buffer_offset + slice_size]
+                buffer_offset += slice_size
+
+            # No new input. Break out of read loop.
+            if not read_result:
+                break
+
+            # Feed all read data into decompressor and emit output until
+            # exhausted.
+            read_buffer = ffi.from_buffer(read_result)
+            in_buffer.src = read_buffer
+            in_buffer.size = len(read_buffer)
+            in_buffer.pos = 0
+
+            while in_buffer.pos < in_buffer.size:
+                assert out_buffer.pos == 0
+
+                zresult = lib.ZSTD_decompressStream(self._dstream, out_buffer, in_buffer)
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError('zstd decompress error: %s' %
+                                    ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+                if out_buffer.pos:
+                    data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
+                    out_buffer.pos = 0
+                    yield data
+
+                if zresult == 0:
+                    return
+
+            # Repeat loop to collect more input data.
+            continue
+
+        # If we get here, input is exhausted.
+
+    def write_to(self, writer, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+        if not hasattr(writer, 'write'):
+            raise ValueError('must pass an object with a write() method')
+
+        return ZstdDecompressionWriter(self, writer, write_size)
+
+    def copy_stream(self, ifh, ofh,
+                    read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
+                    write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
+        if not hasattr(ifh, 'read'):
+            raise ValueError('first argument must have a read() method')
+        if not hasattr(ofh, 'write'):
+            raise ValueError('second argument must have a write() method')
+
+        self._ensure_dstream()
+
+        in_buffer = ffi.new('ZSTD_inBuffer *')
+        out_buffer = ffi.new('ZSTD_outBuffer *')
+
+        dst_buffer = ffi.new('char[]', write_size)
+        out_buffer.dst = dst_buffer
+        out_buffer.size = write_size
+        out_buffer.pos = 0
+
+        total_read, total_write = 0, 0
+
+        # Read all available input.
+        while True:
+            data = ifh.read(read_size)
+            if not data:
+                break
+
+            data_buffer = ffi.from_buffer(data)
+            total_read += len(data_buffer)
+            in_buffer.src = data_buffer
+            in_buffer.size = len(data_buffer)
+            in_buffer.pos = 0
+
+            # Flush all read data to output.
+            while in_buffer.pos < in_buffer.size:
+                zresult = lib.ZSTD_decompressStream(self._dstream, out_buffer, in_buffer)
+                if lib.ZSTD_isError(zresult):
+                    raise ZstdError('zstd decompressor error: %s' %
+                                    ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+                if out_buffer.pos:
+                    ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos))
+                    total_write += out_buffer.pos
+                    out_buffer.pos = 0
+
+            # Continue loop to keep reading.
+
+        return total_read, total_write
+
+    def decompress_content_dict_chain(self, frames):
+        if not isinstance(frames, list):
+            raise TypeError('argument must be a list')
+
+        if not frames:
+            raise ValueError('empty input chain')
+
+        # First chunk should not be using a dictionary. We handle it specially.
+        chunk = frames[0]
+        if not isinstance(chunk, bytes_type):
+            raise ValueError('chunk 0 must be bytes')
+
+        # All chunks should be zstd frames and should have content size set.
+        chunk_buffer = ffi.from_buffer(chunk)
+        params = ffi.new('ZSTD_frameParams *')
+        zresult = lib.ZSTD_getFrameParams(params, chunk_buffer, len(chunk_buffer))
+        if lib.ZSTD_isError(zresult):
+            raise ValueError('chunk 0 is not a valid zstd frame')
+        elif zresult:
+            raise ValueError('chunk 0 is too small to contain a zstd frame')
+
+        if not params.frameContentSize:
+            raise ValueError('chunk 0 missing content size in frame')
+
+        dctx = lib.ZSTD_createDCtx()
+        if dctx == ffi.NULL:
+            raise MemoryError()
+
+        dctx = ffi.gc(dctx, lib.ZSTD_freeDCtx)
+
+        last_buffer = ffi.new('char[]', params.frameContentSize)
+
+        zresult = lib.ZSTD_decompressDCtx(dctx, last_buffer, len(last_buffer),
+                                          chunk_buffer, len(chunk_buffer))
+        if lib.ZSTD_isError(zresult):
+            raise ZstdError('could not decompress chunk 0: %s' %
+                            ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+        # Special case of chain length of 1
+        if len(frames) == 1:
+            return ffi.buffer(last_buffer, len(last_buffer))[:]
+
+        i = 1
+        while i < len(frames):
+            chunk = frames[i]
+            if not isinstance(chunk, bytes_type):
+                raise ValueError('chunk %d must be bytes' % i)
+
+            chunk_buffer = ffi.from_buffer(chunk)
+            zresult = lib.ZSTD_getFrameParams(params, chunk_buffer, len(chunk_buffer))
+            if lib.ZSTD_isError(zresult):
+                raise ValueError('chunk %d is not a valid zstd frame' % i)
+            elif zresult:
+                raise ValueError('chunk %d is too small to contain a zstd frame' % i)
+
+            if not params.frameContentSize:
+                raise ValueError('chunk %d missing content size in frame' % i)
+
+            dest_buffer = ffi.new('char[]', params.frameContentSize)
+
+            zresult = lib.ZSTD_decompress_usingDict(dctx, dest_buffer, len(dest_buffer),
+                                                    chunk_buffer, len(chunk_buffer),
+                                                    last_buffer, len(last_buffer))
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('could not decompress chunk %d' % i)
+
+            last_buffer = dest_buffer
+            i += 1
+
+        return ffi.buffer(last_buffer, len(last_buffer))[:]
+
+    def _ensure_dstream(self):
+        if self._dstream:
+            zresult = lib.ZSTD_resetDStream(self._dstream)
+            if lib.ZSTD_isError(zresult):
+                raise ZstdError('could not reset DStream: %s' %
+                                ffi.string(lib.ZSTD_getErrorName(zresult)))
+
+            return
+
+        self._dstream = lib.ZSTD_createDStream()
+        if self._dstream == ffi.NULL:
+            raise MemoryError()
+
+        self._dstream = ffi.gc(self._dstream, lib.ZSTD_freeDStream)
+
+        if self._dict_data:
+            zresult = lib.ZSTD_initDStream_usingDict(self._dstream,
+                                                     self._dict_data.as_bytes(),
+                                                     len(self._dict_data))
+        else:
+            zresult = lib.ZSTD_initDStream(self._dstream)
+
+        if lib.ZSTD_isError(zresult):
+            self._dstream = None
+            raise ZstdError('could not initialize DStream: %s' %
+                            ffi.string(lib.ZSTD_getErrorName(zresult)))
--- a/contrib/undumprevlog	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/undumprevlog	Tue Apr 18 12:24:34 2017 -0400
@@ -9,15 +9,15 @@
 from mercurial import (
     node,
     revlog,
-    scmutil,
     transaction,
     util,
+    vfs as vfsmod,
 )
 
 for fp in (sys.stdin, sys.stdout, sys.stderr):
     util.setbinary(fp)
 
-opener = scmutil.opener('.', False)
+opener = vfsmod.vfs('.', False)
 tr = transaction.transaction(sys.stderr.write, opener, {'store': opener},
                              "undump.journal")
 while True:
--- a/contrib/win32/mercurial.ini	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/win32/mercurial.ini	Tue Apr 18 12:24:34 2017 -0400
@@ -19,6 +19,8 @@
 editor = notepad
 ; show changed files and be a bit more verbose if True
 ; verbose = True
+; colorize commands output
+; color = auto
  
 ; username data to appear in commits
 ; it usually takes the form: Joe User <joe.user@host.com>
@@ -40,7 +42,6 @@
 ;bugzilla =
 ;children =
 ;churn =
-;color =
 ;convert =
 ;eol =
 ;extdiff =
--- a/contrib/wix/help.wxs	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/wix/help.wxs	Tue Apr 18 12:24:34 2017 -0400
@@ -15,6 +15,8 @@
     <DirectoryRef Id="INSTALLDIR">
       <Directory Id="helpdir" Name="help" FileSource="$(var.SourceDir)">
         <Component Id="help.root" Guid="$(var.help.root.guid)" Win64='$(var.IsX64)'>
+          <File Name="bundlespec.txt" />
+          <File Name="color.txt" />
           <File Name="config.txt" KeyPath="yes" />
           <File Name="dates.txt" />
           <File Name="diffs.txt" />
@@ -25,6 +27,7 @@
           <File Name="hgignore.txt" />
           <File Name="hgweb.txt" />
           <File Name="merge-tools.txt" />
+          <File Name="pager.txt" />
           <File Name="patterns.txt" />
           <File Name="phases.txt" />
           <File Name="revisions.txt" />
@@ -37,6 +40,7 @@
         <Directory Id="help.internaldir" Name="internals">
           <Component Id="help.internals" Guid="$(var.help.internals.guid)" Win64='$(var.IsX64)'>
             <File Id="internals.bundles.txt"      Name="bundles.txt" KeyPath="yes" />
+            <File Id="internals.censor.txt"       Name="censor.txt" KeyPath="yes" />
             <File Id="internals.changegroups.txt" Name="changegroups.txt" />
             <File Id="internals.requirements.txt" Name="requirements.txt" />
             <File Id="internals.revlogs.txt"      Name="revlogs.txt" />
--- a/contrib/wix/templates.wxs	Tue Apr 18 11:22:42 2017 -0400
+++ b/contrib/wix/templates.wxs	Tue Apr 18 12:24:34 2017 -0400
@@ -32,6 +32,7 @@
           <File Name="map-cmdline.changelog" KeyPath="yes" />
           <File Name="map-cmdline.compact" />
           <File Name="map-cmdline.default" />
+          <File Name="map-cmdline.show" />
           <File Name="map-cmdline.bisect" />
           <File Name="map-cmdline.xml" />
           <File Name="map-cmdline.status" />
@@ -225,6 +226,7 @@
             <File Id="static.coal.file.png"      Name="coal-file.png" />
             <File Id="static.coal.folder.png"    Name="coal-folder.png" />
             <File Id="static.excanvas.js"        Name="excanvas.js" />
+            <File Id="static.followlines.js"     Name="followlines.js" />
             <File Id="static.mercurial.js"       Name="mercurial.js" />
             <File Id="static.hgicon.png"         Name="hgicon.png" />
             <File Id="static.hglogo.png"         Name="hglogo.png" />
--- a/hgext/automv.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/automv.py	Tue Apr 18 12:24:34 2017 -0400
@@ -4,7 +4,7 @@
 #
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
-"""Check for unrecorded moves at commit time (EXPERIMENTAL)
+"""check for unrecorded moves at commit time (EXPERIMENTAL)
 
 This extension checks at commit/amend time if any of the committed files
 comes from an unrecorded mv.
--- a/hgext/bugzilla.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/bugzilla.py	Tue Apr 18 12:24:34 2017 -0400
@@ -15,14 +15,16 @@
 The bug references can optionally include an update for Bugzilla of the
 hours spent working on the bug. Bugs can also be marked fixed.
 
-Three basic modes of access to Bugzilla are provided:
+Four basic modes of access to Bugzilla are provided:
+
+1. Access via the Bugzilla REST-API. Requires bugzilla 5.0 or later.
 
-1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
+2. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
 
-2. Check data via the Bugzilla XMLRPC interface and submit bug change
+3. Check data via the Bugzilla XMLRPC interface and submit bug change
    via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
 
-3. Writing directly to the Bugzilla database. Only Bugzilla installations
+4. Writing directly to the Bugzilla database. Only Bugzilla installations
    using MySQL are supported. Requires Python MySQLdb.
 
 Writing directly to the database is susceptible to schema changes, and
@@ -50,11 +52,16 @@
 Bugzilla is used instead as the source of the comment. Marking bugs fixed
 works on all supported Bugzilla versions.
 
+Access via the REST-API needs either a Bugzilla username and password
+or an apikey specified in the configuration. Comments are made under
+the given username or the user assoicated with the apikey in Bugzilla.
+
 Configuration items common to all access modes:
 
 bugzilla.version
   The access type to use. Values recognized are:
 
+  :``restapi``:      Bugzilla REST-API, Bugzilla 5.0 and later.
   :``xmlrpc``:       Bugzilla XMLRPC interface.
   :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
   :``3.0``:          MySQL access, Bugzilla 3.0 and later.
@@ -135,7 +142,7 @@
 committer email to Bugzilla user email. See also ``bugzilla.usermap``.
 Contains entries of the form ``committer = Bugzilla user``.
 
-XMLRPC access mode configuration:
+XMLRPC and REST-API access mode configuration:
 
 bugzilla.bzurl
   The base URL for the Bugzilla installation.
@@ -148,6 +155,13 @@
 bugzilla.password
   The password for Bugzilla login.
 
+REST-API access mode uses the options listed above as well as:
+
+bugzilla.apikey
+  An apikey generated on the Bugzilla instance for api access.
+  Using an apikey removes the need to store the user and password
+  options.
+
 XMLRPC+email access mode uses the XMLRPC access mode configuration items,
 and also:
 
@@ -279,6 +293,7 @@
 
 from __future__ import absolute_import
 
+import json
 import re
 import time
 
@@ -288,10 +303,10 @@
     cmdutil,
     error,
     mail,
+    url,
     util,
 )
 
-urlparse = util.urlparse
 xmlrpclib = util.xmlrpclib
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
@@ -641,7 +656,7 @@
         self.bztoken = login.get('token', '')
 
     def transport(self, uri):
-        if urlparse.urlparse(uri, "http")[0] == "https":
+        if util.urlreq.urlparse(uri, "http")[0] == "https":
             return cookiesafetransport()
         else:
             return cookietransport()
@@ -773,6 +788,136 @@
             cmds.append(self.makecommandline("resolution", self.fixresolution))
         self.send_bug_modify_email(bugid, cmds, text, committer)
 
+class NotFound(LookupError):
+    pass
+
+class bzrestapi(bzaccess):
+    """Read and write bugzilla data using the REST API available since
+    Bugzilla 5.0.
+    """
+    def __init__(self, ui):
+        bzaccess.__init__(self, ui)
+        bz = self.ui.config('bugzilla', 'bzurl',
+                            'http://localhost/bugzilla/')
+        self.bzroot = '/'.join([bz, 'rest'])
+        self.apikey = self.ui.config('bugzilla', 'apikey', '')
+        self.user = self.ui.config('bugzilla', 'user', 'bugs')
+        self.passwd = self.ui.config('bugzilla', 'password')
+        self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
+        self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
+                                            'FIXED')
+
+    def apiurl(self, targets, include_fields=None):
+        url = '/'.join([self.bzroot] + [str(t) for t in targets])
+        qv = {}
+        if self.apikey:
+            qv['api_key'] = self.apikey
+        elif self.user and self.passwd:
+            qv['login'] = self.user
+            qv['password'] = self.passwd
+        if include_fields:
+            qv['include_fields'] = include_fields
+        if qv:
+            url = '%s?%s' % (url, util.urlreq.urlencode(qv))
+        return url
+
+    def _fetch(self, burl):
+        try:
+            resp = url.open(self.ui, burl)
+            return json.loads(resp.read())
+        except util.urlerr.httperror as inst:
+            if inst.code == 401:
+                raise error.Abort(_('authorization failed'))
+            if inst.code == 404:
+                raise NotFound()
+            else:
+                raise
+
+    def _submit(self, burl, data, method='POST'):
+        data = json.dumps(data)
+        if method == 'PUT':
+            class putrequest(util.urlreq.request):
+                def get_method(self):
+                    return 'PUT'
+            request_type = putrequest
+        else:
+            request_type = util.urlreq.request
+        req = request_type(burl, data,
+                           {'Content-Type': 'application/json'})
+        try:
+            resp = url.opener(self.ui).open(req)
+            return json.loads(resp.read())
+        except util.urlerr.httperror as inst:
+            if inst.code == 401:
+                raise error.Abort(_('authorization failed'))
+            if inst.code == 404:
+                raise NotFound()
+            else:
+                raise
+
+    def filter_real_bug_ids(self, bugs):
+        '''remove bug IDs that do not exist in Bugzilla from bugs.'''
+        badbugs = set()
+        for bugid in bugs:
+            burl = self.apiurl(('bug', bugid), include_fields='status')
+            try:
+                self._fetch(burl)
+            except NotFound:
+                badbugs.add(bugid)
+        for bugid in badbugs:
+            del bugs[bugid]
+
+    def filter_cset_known_bug_ids(self, node, bugs):
+        '''remove bug IDs where node occurs in comment text from bugs.'''
+        sn = short(node)
+        for bugid in bugs.keys():
+            burl = self.apiurl(('bug', bugid, 'comment'), include_fields='text')
+            result = self._fetch(burl)
+            comments = result['bugs'][str(bugid)]['comments']
+            if any(sn in c['text'] for c in comments):
+                self.ui.status(_('bug %d already knows about changeset %s\n') %
+                               (bugid, sn))
+                del bugs[bugid]
+
+    def updatebug(self, bugid, newstate, text, committer):
+        '''update the specified bug. Add comment text and set new states.
+
+        If possible add the comment as being from the committer of
+        the changeset. Otherwise use the default Bugzilla user.
+        '''
+        bugmod = {}
+        if 'hours' in newstate:
+            bugmod['work_time'] = newstate['hours']
+        if 'fix' in newstate:
+            bugmod['status'] = self.fixstatus
+            bugmod['resolution'] = self.fixresolution
+        if bugmod:
+            # if we have to change the bugs state do it here
+            bugmod['comment'] = {
+                'comment': text,
+                'is_private': False,
+                'is_markdown': False,
+            }
+            burl = self.apiurl(('bug', bugid))
+            self._submit(burl, bugmod, method='PUT')
+            self.ui.debug('updated bug %s\n' % bugid)
+        else:
+            burl = self.apiurl(('bug', bugid, 'comment'))
+            self._submit(burl, {
+                'comment': text,
+                'is_private': False,
+                'is_markdown': False,
+            })
+            self.ui.debug('added comment to bug %s\n' % bugid)
+
+    def notify(self, bugs, committer):
+        '''Force sending of Bugzilla notification emails.
+
+        Only required if the access method does not trigger notification
+        emails automatically.
+        '''
+        pass
+
 class bugzilla(object):
     # supported versions of bugzilla. different versions have
     # different schemas.
@@ -781,7 +926,8 @@
         '2.18': bzmysql_2_18,
         '3.0':  bzmysql_3_0,
         'xmlrpc': bzxmlrpc,
-        'xmlrpc+email': bzxmlrpcemail
+        'xmlrpc+email': bzxmlrpcemail,
+        'restapi': bzrestapi,
         }
 
     _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
--- a/hgext/clonebundles.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/clonebundles.py	Tue Apr 18 12:24:34 2017 -0400
@@ -177,7 +177,7 @@
     # Only advertise if a manifest exists. This does add some I/O to requests.
     # But this should be cheaper than a wasted network round trip due to
     # missing file.
-    if repo.opener.exists('clonebundles.manifest'):
+    if repo.vfs.exists('clonebundles.manifest'):
         caps.append('clonebundles')
 
     return caps
--- a/hgext/color.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/color.py	Tue Apr 18 12:24:34 2017 -0400
@@ -5,652 +5,27 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
-'''colorize output from some commands
-
-The color extension colorizes output from several Mercurial commands.
-For example, the diff command shows additions in green and deletions
-in red, while the status command shows modified files in magenta. Many
-other commands have analogous colors. It is possible to customize
-these colors.
-
-Effects
--------
-
-Other effects in addition to color, like bold and underlined text, are
-also available. By default, the terminfo database is used to find the
-terminal codes used to change color and effect.  If terminfo is not
-available, then effects are rendered with the ECMA-48 SGR control
-function (aka ANSI escape codes).
-
-The available effects in terminfo mode are 'blink', 'bold', 'dim',
-'inverse', 'invisible', 'italic', 'standout', and 'underline'; in
-ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and
-'underline'.  How each is rendered depends on the terminal emulator.
-Some may not be available for a given terminal type, and will be
-silently ignored.
-
-If the terminfo entry for your terminal is missing codes for an effect
-or has the wrong codes, you can add or override those codes in your
-configuration::
-
-  [color]
-  terminfo.dim = \E[2m
-
-where '\E' is substituted with an escape character.
+'''enable Mercurial color mode (DEPRECATED)
 
-Labels
-------
-
-Text receives color effects depending on the labels that it has. Many
-default Mercurial commands emit labelled text. You can also define
-your own labels in templates using the label function, see :hg:`help
-templates`. A single portion of text may have more than one label. In
-that case, effects given to the last label will override any other
-effects. This includes the special "none" effect, which nullifies
-other effects.
-
-Labels are normally invisible. In order to see these labels and their
-position in the text, use the global --color=debug option. The same
-anchor text may be associated to multiple labels, e.g.
-
-  [log.changeset changeset.secret|changeset:   22611:6f0a53c8f587]
-
-The following are the default effects for some default labels. Default
-effects may be overridden from your configuration file::
-
-  [color]
-  status.modified = blue bold underline red_background
-  status.added = green bold
-  status.removed = red bold blue_background
-  status.deleted = cyan bold underline
-  status.unknown = magenta bold underline
-  status.ignored = black bold
-
-  # 'none' turns off all effects
-  status.clean = none
-  status.copied = none
-
-  qseries.applied = blue bold underline
-  qseries.unapplied = black bold
-  qseries.missing = red bold
+This extension enables Mercurial color mode. The feature is now directly
+available in Mercurial core. You can access it using::
 
-  diff.diffline = bold
-  diff.extended = cyan bold
-  diff.file_a = red bold
-  diff.file_b = green bold
-  diff.hunk = magenta
-  diff.deleted = red
-  diff.inserted = green
-  diff.changed = white
-  diff.tab =
-  diff.trailingwhitespace = bold red_background
-
-  # Blank so it inherits the style of the surrounding label
-  changeset.public =
-  changeset.draft =
-  changeset.secret =
-
-  resolve.unresolved = red bold
-  resolve.resolved = green bold
-
-  bookmarks.active = green
-
-  branches.active = none
-  branches.closed = black bold
-  branches.current = green
-  branches.inactive = none
-
-  tags.normal = green
-  tags.local = black bold
-
-  rebase.rebased = blue
-  rebase.remaining = red bold
-
-  shelve.age = cyan
-  shelve.newest = green bold
-  shelve.name = blue bold
-
-  histedit.remaining = red bold
-
-Custom colors
--------------
+  [ui]
+  color = auto
 
-Because there are only eight standard colors, this module allows you
-to define color names for other color slots which might be available
-for your terminal type, assuming terminfo mode.  For instance::
-
-  color.brightblue = 12
-  color.pink = 207
-  color.orange = 202
-
-to set 'brightblue' to color slot 12 (useful for 16 color terminals
-that have brighter colors defined in the upper eight) and, 'pink' and
-'orange' to colors in 256-color xterm's default color cube.  These
-defined colors may then be used as any of the pre-defined eight,
-including appending '_background' to set the background to that color.
-
-Modes
------
-
-By default, the color extension will use ANSI mode (or win32 mode on
-Windows) if it detects a terminal. To override auto mode (to enable
-terminfo mode, for example), set the following configuration option::
-
-  [color]
-  mode = terminfo
-
-Any value other than 'ansi', 'win32', 'terminfo', or 'auto' will
-disable color.
-
-Note that on some systems, terminfo mode may cause problems when using
-color with the pager extension and less -R. less with the -R option
-will only display ECMA-48 color codes, and terminfo mode may sometimes
-emit codes that less doesn't understand. You can work around this by
-either using ansi mode (or auto mode), or by using less -r (which will
-pass through all terminal control codes, not just color control
-codes).
-
-On some systems (such as MSYS in Windows), the terminal may support
-a different color mode than the pager (activated via the "pager"
-extension). It is possible to define separate modes depending on whether
-the pager is active::
-
-  [color]
-  mode = auto
-  pagermode = ansi
-
-If ``pagermode`` is not defined, the ``mode`` will be used.
+See :hg:`help color` for details.
 '''
 
 from __future__ import absolute_import
 
-from mercurial.i18n import _
-from mercurial import (
-    cmdutil,
-    color,
-    commands,
-    dispatch,
-    encoding,
-    extensions,
-    pycompat,
-    subrepo,
-    ui as uimod,
-    util,
-)
+from mercurial import color
 
-cmdtable = {}
-command = cmdutil.command(cmdtable)
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
 # be specifying the version(s) of Mercurial they are tested with, or
 # leave the attribute unspecified.
 testedwith = 'ships-with-hg-core'
 
-# start and stop parameters for effects
-_effects = {'none': 0, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33,
-            'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37, 'bold': 1,
-            'italic': 3, 'underline': 4, 'inverse': 7, 'dim': 2,
-            'black_background': 40, 'red_background': 41,
-            'green_background': 42, 'yellow_background': 43,
-            'blue_background': 44, 'purple_background': 45,
-            'cyan_background': 46, 'white_background': 47}
-
-def _terminfosetup(ui, mode):
-    '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
-
-    # If we failed to load curses, we go ahead and return.
-    if not _terminfo_params:
-        return
-    # Otherwise, see what the config file says.
-    if mode not in ('auto', 'terminfo'):
-        return
-
-    _terminfo_params.update((key[6:], (False, int(val), ''))
-        for key, val in ui.configitems('color')
-        if key.startswith('color.'))
-    _terminfo_params.update((key[9:], (True, '', val.replace('\\E', '\x1b')))
-        for key, val in ui.configitems('color')
-        if key.startswith('terminfo.'))
-
-    try:
-        curses.setupterm()
-    except curses.error as e:
-        _terminfo_params.clear()
-        return
-
-    for key, (b, e, c) in _terminfo_params.items():
-        if not b:
-            continue
-        if not c and not curses.tigetstr(e):
-            # Most terminals don't support dim, invis, etc, so don't be
-            # noisy and use ui.debug().
-            ui.debug("no terminfo entry for %s\n" % e)
-            del _terminfo_params[key]
-    if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
-        # Only warn about missing terminfo entries if we explicitly asked for
-        # terminfo mode.
-        if mode == "terminfo":
-            ui.warn(_("no terminfo entry for setab/setaf: reverting to "
-              "ECMA-48 color\n"))
-        _terminfo_params.clear()
-
-def _modesetup(ui, coloropt):
-    if coloropt == 'debug':
-        return 'debug'
-
-    auto = (coloropt == 'auto')
-    always = not auto and util.parsebool(coloropt)
-    if not always and not auto:
-        return None
-
-    formatted = (always or (encoding.environ.get('TERM') != 'dumb'
-                 and ui.formatted()))
-
-    mode = ui.config('color', 'mode', 'auto')
-
-    # If pager is active, color.pagermode overrides color.mode.
-    if getattr(ui, 'pageractive', False):
-        mode = ui.config('color', 'pagermode', mode)
-
-    realmode = mode
-    if mode == 'auto':
-        if pycompat.osname == 'nt':
-            term = encoding.environ.get('TERM')
-            # TERM won't be defined in a vanilla cmd.exe environment.
-
-            # UNIX-like environments on Windows such as Cygwin and MSYS will
-            # set TERM. They appear to make a best effort attempt at setting it
-            # to something appropriate. However, not all environments with TERM
-            # defined support ANSI. Since "ansi" could result in terminal
-            # gibberish, we error on the side of selecting "win32". However, if
-            # w32effects is not defined, we almost certainly don't support
-            # "win32", so don't even try.
-            if (term and 'xterm' in term) or not w32effects:
-                realmode = 'ansi'
-            else:
-                realmode = 'win32'
-        else:
-            realmode = 'ansi'
-
-    def modewarn():
-        # only warn if color.mode was explicitly set and we're in
-        # a formatted terminal
-        if mode == realmode and ui.formatted():
-            ui.warn(_('warning: failed to set color mode to %s\n') % mode)
-
-    if realmode == 'win32':
-        _terminfo_params.clear()
-        if not w32effects:
-            modewarn()
-            return None
-        _effects.update(w32effects)
-    elif realmode == 'ansi':
-        _terminfo_params.clear()
-    elif realmode == 'terminfo':
-        _terminfosetup(ui, mode)
-        if not _terminfo_params:
-            ## FIXME Shouldn't we return None in this case too?
-            modewarn()
-            realmode = 'ansi'
-    else:
-        return None
-
-    if always or (auto and formatted):
-        return realmode
-    return None
-
-try:
-    import curses
-    # Mapping from effect name to terminfo attribute name (or raw code) or
-    # color number.  This will also force-load the curses module.
-    _terminfo_params = {'none': (True, 'sgr0', ''),
-                        'standout': (True, 'smso', ''),
-                        'underline': (True, 'smul', ''),
-                        'reverse': (True, 'rev', ''),
-                        'inverse': (True, 'rev', ''),
-                        'blink': (True, 'blink', ''),
-                        'dim': (True, 'dim', ''),
-                        'bold': (True, 'bold', ''),
-                        'invisible': (True, 'invis', ''),
-                        'italic': (True, 'sitm', ''),
-                        'black': (False, curses.COLOR_BLACK, ''),
-                        'red': (False, curses.COLOR_RED, ''),
-                        'green': (False, curses.COLOR_GREEN, ''),
-                        'yellow': (False, curses.COLOR_YELLOW, ''),
-                        'blue': (False, curses.COLOR_BLUE, ''),
-                        'magenta': (False, curses.COLOR_MAGENTA, ''),
-                        'cyan': (False, curses.COLOR_CYAN, ''),
-                        'white': (False, curses.COLOR_WHITE, '')}
-except ImportError:
-    _terminfo_params = {}
-
-def _effect_str(effect):
-    '''Helper function for render_effects().'''
-
-    bg = False
-    if effect.endswith('_background'):
-        bg = True
-        effect = effect[:-11]
-    try:
-        attr, val, termcode = _terminfo_params[effect]
-    except KeyError:
-        return ''
-    if attr:
-        if termcode:
-            return termcode
-        else:
-            return curses.tigetstr(val)
-    elif bg:
-        return curses.tparm(curses.tigetstr('setab'), val)
-    else:
-        return curses.tparm(curses.tigetstr('setaf'), val)
-
-def render_effects(text, effects):
-    'Wrap text in commands to turn on each effect.'
-    if not text:
-        return text
-    if not _terminfo_params:
-        start = [str(_effects[e]) for e in ['none'] + effects.split()]
-        start = '\033[' + ';'.join(start) + 'm'
-        stop = '\033[' + str(_effects['none']) + 'm'
-    else:
-        start = ''.join(_effect_str(effect)
-                        for effect in ['none'] + effects.split())
-        stop = _effect_str('none')
-    return ''.join([start, text, stop])
-
-def valideffect(effect):
-    'Determine if the effect is valid or not.'
-    good = False
-    if not _terminfo_params and effect in _effects:
-        good = True
-    elif effect in _terminfo_params or effect[:-11] in _terminfo_params:
-        good = True
-    return good
-
-def configstyles(ui):
-    for status, cfgeffects in ui.configitems('color'):
-        if '.' not in status or status.startswith(('color.', 'terminfo.')):
-            continue
-        cfgeffects = ui.configlist('color', status)
-        if cfgeffects:
-            good = []
-            for e in cfgeffects:
-                if valideffect(e):
-                    good.append(e)
-                else:
-                    ui.warn(_("ignoring unknown color/effect %r "
-                              "(configured in color.%s)\n")
-                            % (e, status))
-            color._styles[status] = ' '.join(good)
-
-class colorui(uimod.ui):
-    _colormode = 'ansi'
-    def write(self, *args, **opts):
-        if self._colormode is None:
-            return super(colorui, self).write(*args, **opts)
-
-        label = opts.get('label', '')
-        if self._buffers and not opts.get('prompt', False):
-            if self._bufferapplylabels:
-                self._buffers[-1].extend(self.label(a, label) for a in args)
-            else:
-                self._buffers[-1].extend(args)
-        elif self._colormode == 'win32':
-            for a in args:
-                win32print(a, super(colorui, self).write, **opts)
-        else:
-            return super(colorui, self).write(
-                *[self.label(a, label) for a in args], **opts)
-
-    def write_err(self, *args, **opts):
-        if self._colormode is None:
-            return super(colorui, self).write_err(*args, **opts)
-
-        label = opts.get('label', '')
-        if self._bufferstates and self._bufferstates[-1][0]:
-            return self.write(*args, **opts)
-        if self._colormode == 'win32':
-            for a in args:
-                win32print(a, super(colorui, self).write_err, **opts)
-        else:
-            return super(colorui, self).write_err(
-                *[self.label(a, label) for a in args], **opts)
-
-    def showlabel(self, msg, label):
-        if label and msg:
-            if msg[-1] == '\n':
-                return "[%s|%s]\n" % (label, msg[:-1])
-            else:
-                return "[%s|%s]" % (label, msg)
-        else:
-            return msg
-
-    def label(self, msg, label):
-        if self._colormode is None:
-            return super(colorui, self).label(msg, label)
-
-        if self._colormode == 'debug':
-            return self.showlabel(msg, label)
-
-        effects = []
-        for l in label.split():
-            s = color._styles.get(l, '')
-            if s:
-                effects.append(s)
-            elif valideffect(l):
-                effects.append(l)
-        effects = ' '.join(effects)
-        if effects:
-            return '\n'.join([render_effects(line, effects)
-                              for line in msg.split('\n')])
-        return msg
-
-def uisetup(ui):
-    if ui.plain():
-        return
-    if not isinstance(ui, colorui):
-        colorui.__bases__ = (ui.__class__,)
-        ui.__class__ = colorui
-    def colorcmd(orig, ui_, opts, cmd, cmdfunc):
-        mode = _modesetup(ui_, opts['color'])
-        colorui._colormode = mode
-        if mode and mode != 'debug':
-            configstyles(ui_)
-        return orig(ui_, opts, cmd, cmdfunc)
-    def colorgit(orig, gitsub, commands, env=None, stream=False, cwd=None):
-        if gitsub.ui._colormode and len(commands) and commands[0] == "diff":
-                # insert the argument in the front,
-                # the end of git diff arguments is used for paths
-                commands.insert(1, '--color')
-        return orig(gitsub, commands, env, stream, cwd)
-    extensions.wrapfunction(dispatch, '_runcommand', colorcmd)
-    extensions.wrapfunction(subrepo.gitsubrepo, '_gitnodir', colorgit)
-
 def extsetup(ui):
-    commands.globalopts.append(
-        ('', 'color', 'auto',
-         # i18n: 'always', 'auto', 'never', and 'debug' are keywords
-         # and should not be translated
-         _("when to colorize (boolean, always, auto, never, or debug)"),
-         _('TYPE')))
-
-@command('debugcolor',
-        [('', 'style', None, _('show all configured styles'))],
-        'hg debugcolor')
-def debugcolor(ui, repo, **opts):
-    """show available color, effects or style"""
-    ui.write(('color mode: %s\n') % ui._colormode)
-    if opts.get('style'):
-        return _debugdisplaystyle(ui)
-    else:
-        return _debugdisplaycolor(ui)
-
-def _debugdisplaycolor(ui):
-    oldstyle = color._styles.copy()
-    try:
-        color._styles.clear()
-        for effect in _effects.keys():
-            color._styles[effect] = effect
-        if _terminfo_params:
-            for k, v in ui.configitems('color'):
-                if k.startswith('color.'):
-                    color._styles[k] = k[6:]
-                elif k.startswith('terminfo.'):
-                    color._styles[k] = k[9:]
-        ui.write(_('available colors:\n'))
-        # sort label with a '_' after the other to group '_background' entry.
-        items = sorted(color._styles.items(),
-                       key=lambda i: ('_' in i[0], i[0], i[1]))
-        for colorname, label in items:
-            ui.write(('%s\n') % colorname, label=label)
-    finally:
-        color._styles.clear()
-        color._styles.update(oldstyle)
-
-def _debugdisplaystyle(ui):
-    ui.write(_('available style:\n'))
-    width = max(len(s) for s in color._styles)
-    for label, effects in sorted(color._styles.items()):
-        ui.write('%s' % label, label=label)
-        if effects:
-            # 50
-            ui.write(': ')
-            ui.write(' ' * (max(0, width - len(label))))
-            ui.write(', '.join(ui.label(e, e) for e in effects.split()))
-        ui.write('\n')
-
-if pycompat.osname != 'nt':
-    w32effects = None
-else:
-    import ctypes
-    import re
-
-    _kernel32 = ctypes.windll.kernel32
-
-    _WORD = ctypes.c_ushort
-
-    _INVALID_HANDLE_VALUE = -1
-
-    class _COORD(ctypes.Structure):
-        _fields_ = [('X', ctypes.c_short),
-                    ('Y', ctypes.c_short)]
-
-    class _SMALL_RECT(ctypes.Structure):
-        _fields_ = [('Left', ctypes.c_short),
-                    ('Top', ctypes.c_short),
-                    ('Right', ctypes.c_short),
-                    ('Bottom', ctypes.c_short)]
-
-    class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
-        _fields_ = [('dwSize', _COORD),
-                    ('dwCursorPosition', _COORD),
-                    ('wAttributes', _WORD),
-                    ('srWindow', _SMALL_RECT),
-                    ('dwMaximumWindowSize', _COORD)]
-
-    _STD_OUTPUT_HANDLE = 0xfffffff5 # (DWORD)-11
-    _STD_ERROR_HANDLE = 0xfffffff4  # (DWORD)-12
-
-    _FOREGROUND_BLUE = 0x0001
-    _FOREGROUND_GREEN = 0x0002
-    _FOREGROUND_RED = 0x0004
-    _FOREGROUND_INTENSITY = 0x0008
-
-    _BACKGROUND_BLUE = 0x0010
-    _BACKGROUND_GREEN = 0x0020
-    _BACKGROUND_RED = 0x0040
-    _BACKGROUND_INTENSITY = 0x0080
-
-    _COMMON_LVB_REVERSE_VIDEO = 0x4000
-    _COMMON_LVB_UNDERSCORE = 0x8000
-
-    # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
-    w32effects = {
-        'none': -1,
-        'black': 0,
-        'red': _FOREGROUND_RED,
-        'green': _FOREGROUND_GREEN,
-        'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
-        'blue': _FOREGROUND_BLUE,
-        'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
-        'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
-        'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
-        'bold': _FOREGROUND_INTENSITY,
-        'black_background': 0x100,                  # unused value > 0x0f
-        'red_background': _BACKGROUND_RED,
-        'green_background': _BACKGROUND_GREEN,
-        'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
-        'blue_background': _BACKGROUND_BLUE,
-        'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
-        'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
-        'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
-                             _BACKGROUND_BLUE),
-        'bold_background': _BACKGROUND_INTENSITY,
-        'underline': _COMMON_LVB_UNDERSCORE,  # double-byte charsets only
-        'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
-    }
-
-    passthrough = set([_FOREGROUND_INTENSITY,
-                       _BACKGROUND_INTENSITY,
-                       _COMMON_LVB_UNDERSCORE,
-                       _COMMON_LVB_REVERSE_VIDEO])
-
-    stdout = _kernel32.GetStdHandle(
-                  _STD_OUTPUT_HANDLE)  # don't close the handle returned
-    if stdout is None or stdout == _INVALID_HANDLE_VALUE:
-        w32effects = None
-    else:
-        csbi = _CONSOLE_SCREEN_BUFFER_INFO()
-        if not _kernel32.GetConsoleScreenBufferInfo(
-                    stdout, ctypes.byref(csbi)):
-            # stdout may not support GetConsoleScreenBufferInfo()
-            # when called from subprocess or redirected
-            w32effects = None
-        else:
-            origattr = csbi.wAttributes
-            ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
-                                re.MULTILINE | re.DOTALL)
-
-    def win32print(text, orig, **opts):
-        label = opts.get('label', '')
-        attr = origattr
-
-        def mapcolor(val, attr):
-            if val == -1:
-                return origattr
-            elif val in passthrough:
-                return attr | val
-            elif val > 0x0f:
-                return (val & 0x70) | (attr & 0x8f)
-            else:
-                return (val & 0x07) | (attr & 0xf8)
-
-        # determine console attributes based on labels
-        for l in label.split():
-            style = color._styles.get(l, '')
-            for effect in style.split():
-                try:
-                    attr = mapcolor(w32effects[effect], attr)
-                except KeyError:
-                    # w32effects could not have certain attributes so we skip
-                    # them if not found
-                    pass
-        # hack to ensure regexp finds data
-        if not text.startswith('\033['):
-            text = '\033[m' + text
-
-        # Look for ANSI-like codes embedded in text
-        m = re.match(ansire, text)
-
-        try:
-            while m:
-                for sattr in m.group(1).split(';'):
-                    if sattr:
-                        attr = mapcolor(int(sattr), attr)
-                _kernel32.SetConsoleTextAttribute(stdout, attr)
-                orig(m.group(2), **opts)
-                m = re.match(ansire, m.group(3))
-        finally:
-            # Explicitly reset original attributes
-            _kernel32.SetConsoleTextAttribute(stdout, origattr)
+    # change default color config
+    color._enabledbydefault = True
--- a/hgext/convert/cvsps.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/convert/cvsps.py	Tue Apr 18 12:24:34 2017 -0400
@@ -622,7 +622,7 @@
     # Sort changesets by date
 
     odd = set()
-    def cscmp(l, r, odd=odd):
+    def cscmp(l, r):
         d = sum(l.date) - sum(r.date)
         if d:
             return d
--- a/hgext/convert/hg.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/convert/hg.py	Tue Apr 18 12:24:34 2017 -0400
@@ -90,10 +90,10 @@
             self.wlock.release()
 
     def revmapfile(self):
-        return self.repo.join("shamap")
+        return self.repo.vfs.join("shamap")
 
     def authorfile(self):
-        return self.repo.join("authormap")
+        return self.repo.vfs.join("authormap")
 
     def setbranch(self, branch, pbranches):
         if not self.clonebranches:
@@ -625,7 +625,7 @@
 
     def converted(self, rev, destrev):
         if self.convertfp is None:
-            self.convertfp = open(self.repo.join('shamap'), 'a')
+            self.convertfp = open(self.repo.vfs.join('shamap'), 'a')
         self.convertfp.write('%s %s\n' % (destrev, rev))
         self.convertfp.flush()
 
--- a/hgext/convert/p4.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/convert/p4.py	Tue Apr 18 12:24:34 2017 -0400
@@ -161,7 +161,12 @@
             d = self._fetch_revision(change)
             c = self._construct_commit(d, parents)
 
-            shortdesc = c.desc.splitlines(True)[0].rstrip('\r\n')
+            descarr = c.desc.splitlines(True)
+            if len(descarr) > 0:
+                shortdesc = descarr[0].rstrip('\r\n')
+            else:
+                shortdesc = '**empty changelist description**'
+
             t = '%s %s' % (c.rev, repr(shortdesc)[1:-1])
             ui.status(util.ellipsis(t, 80) + '\n')
 
--- a/hgext/convert/subversion.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/convert/subversion.py	Tue Apr 18 12:24:34 2017 -0400
@@ -13,8 +13,8 @@
     encoding,
     error,
     pycompat,
-    scmutil,
     util,
+    vfs as vfsmod,
 )
 
 from . import common
@@ -1146,8 +1146,8 @@
             self.run0('checkout', path, wcpath)
 
             self.wc = wcpath
-        self.opener = scmutil.opener(self.wc)
-        self.wopener = scmutil.opener(self.wc)
+        self.opener = vfsmod.vfs(self.wc)
+        self.wopener = vfsmod.vfs(self.wc)
         self.childmap = mapfile(ui, self.join('hg-childmap'))
         if util.checkexec(self.wc):
             self.is_exec = util.isexec
@@ -1186,7 +1186,7 @@
                 # best bet is to assume they are in local
                 # encoding. They will be passed to command line calls
                 # later anyway, so they better be.
-                m.add(encoding.tolocal(name.encode('utf-8')))
+                m.add(encoding.unitolocal(name))
                 break
         return m
 
@@ -1306,7 +1306,7 @@
             self.setexec = []
 
         fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
-        fp = os.fdopen(fd, 'w')
+        fp = os.fdopen(fd, pycompat.sysstr('w'))
         fp.write(commit.desc)
         fp.close()
         try:
--- a/hgext/eol.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/eol.py	Tue Apr 18 12:24:34 2017 -0400
@@ -101,6 +101,7 @@
     error,
     extensions,
     match,
+    pycompat,
     util,
 )
 
@@ -112,11 +113,6 @@
 
 # Matches a lone LF, i.e., one that is not part of CRLF.
 singlelf = re.compile('(^|[^\r])\n')
-# Matches a single EOL which can either be a CRLF where repeated CR
-# are removed or a LF. We do not care about old Macintosh files, so a
-# stray CR is an error.
-eolre = re.compile('\r*\n')
-
 
 def inconsistenteol(data):
     return '\r\n' in data and singlelf.search(data)
@@ -130,7 +126,7 @@
     if (ui.configbool('eol', 'fix-trailing-newline', False)
         and s and s[-1] != '\n'):
         s = s + '\n'
-    return eolre.sub('\n', s)
+    return util.tolf(s)
 
 def tocrlf(s, params, ui, **kwargs):
     """Filter to convert to CRLF EOLs."""
@@ -141,7 +137,7 @@
     if (ui.configbool('eol', 'fix-trailing-newline', False)
         and s and s[-1] != '\n'):
         s = s + '\n'
-    return eolre.sub('\r\n', s)
+    return util.tocrlf(s)
 
 def isbinary(s, params):
     """Filter to do nothing with the file."""
@@ -170,7 +166,7 @@
 
         isrepolf = self.cfg.get('repository', 'native') != 'CRLF'
         self._encode['NATIVE'] = isrepolf and 'to-lf' or 'to-crlf'
-        iswdlf = ui.config('eol', 'native', os.linesep) in ('LF', '\n')
+        iswdlf = ui.config('eol', 'native', pycompat.oslinesep) in ('LF', '\n')
         self._decode['NATIVE'] = iswdlf and 'to-lf' or 'to-crlf'
 
         include = []
@@ -223,7 +219,7 @@
                 if node is None:
                     # Cannot use workingctx.data() since it would load
                     # and cache the filters before we configure them.
-                    data = repo.wfile('.hgeol').read()
+                    data = repo.wvfs('.hgeol').read()
                 else:
                     data = repo[node]['.hgeol'].data()
                 return eolfile(ui, repo.root, data)
@@ -314,7 +310,7 @@
 
             oldeol = None
             try:
-                cachemtime = os.path.getmtime(self.join("eol.cache"))
+                cachemtime = os.path.getmtime(self.vfs.join("eol.cache"))
             except OSError:
                 cachemtime = 0
             else:
--- a/hgext/extdiff.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/extdiff.py	Tue Apr 18 12:24:34 2017 -0400
@@ -273,7 +273,7 @@
         cmdline = re.sub(regex, quote, cmdline)
 
         ui.debug('running %r in %s\n' % (cmdline, tmproot))
-        ui.system(cmdline, cwd=tmproot)
+        ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
 
         for copy_fn, working_fn, mtime in fns_and_mtime:
             if os.lstat(copy_fn).st_mtime != mtime:
@@ -342,7 +342,7 @@
     def __init__(self, path, cmdline):
         # We can't pass non-ASCII through docstrings (and path is
         # in an unknown encoding anyway)
-        docpath = path.encode("string-escape")
+        docpath = util.escapestr(path)
         self.__doc__ = self.__doc__ % {'path': util.uirepr(docpath)}
         self._cmdline = cmdline
 
--- a/hgext/fsmonitor/__init__.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/fsmonitor/__init__.py	Tue Apr 18 12:24:34 2017 -0400
@@ -91,14 +91,17 @@
 
 from __future__ import absolute_import
 
+import codecs
 import hashlib
 import os
 import stat
+import sys
 
 from mercurial.i18n import _
 from mercurial import (
     context,
     encoding,
+    error,
     extensions,
     localrepo,
     merge,
@@ -110,6 +113,7 @@
 from mercurial import match as matchmod
 
 from . import (
+    pywatchman,
     state,
     watchmanclient,
 )
@@ -159,6 +163,28 @@
     sha1.update('\0')
     return sha1.hexdigest()
 
+_watchmanencoding = pywatchman.encoding.get_local_encoding()
+_fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
+_fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
+
+def _watchmantofsencoding(path):
+    """Fix path to match watchman and local filesystem encoding
+
+    watchman's paths encoding can differ from filesystem encoding. For example,
+    on Windows, it's always utf-8.
+    """
+    try:
+        decoded = path.decode(_watchmanencoding)
+    except UnicodeDecodeError as e:
+        raise error.Abort(str(e), hint='watchman encoding error')
+
+    try:
+        encoded = decoded.encode(_fsencoding, 'strict')
+    except UnicodeEncodeError as e:
+        raise error.Abort(str(e))
+
+    return encoded
+
 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
     '''Replacement for dirstate.walk, hooking into Watchman.
 
@@ -303,6 +329,8 @@
     # for name case changes.
     for entry in result['files']:
         fname = entry['name']
+        if _fixencoding:
+            fname = _watchmantofsencoding(fname)
         if switch_slashes:
             fname = fname.replace('\\', '/')
         if normalize:
--- a/hgext/fsmonitor/state.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/fsmonitor/state.py	Tue Apr 18 12:24:34 2017 -0400
@@ -20,7 +20,7 @@
 
 class state(object):
     def __init__(self, repo):
-        self._opener = repo.opener
+        self._vfs = repo.vfs
         self._ui = repo.ui
         self._rootdir = pathutil.normasprefix(repo.root)
         self._lastclock = None
@@ -33,7 +33,7 @@
 
     def get(self):
         try:
-            file = self._opener('fsmonitor.state', 'rb')
+            file = self._vfs('fsmonitor.state', 'rb')
         except IOError as inst:
             if inst.errno != errno.ENOENT:
                 raise
@@ -91,7 +91,7 @@
             return
 
         try:
-            file = self._opener('fsmonitor.state', 'wb', atomictemp=True)
+            file = self._vfs('fsmonitor.state', 'wb', atomictemp=True)
         except (IOError, OSError):
             self._ui.warn(_("warning: unable to write out fsmonitor state\n"))
             return
--- a/hgext/gpg.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/gpg.py	Tue Apr 18 12:24:34 2017 -0400
@@ -18,6 +18,7 @@
     error,
     match,
     node as hgnode,
+    pycompat,
     util,
 )
 
@@ -44,11 +45,11 @@
         try:
             # create temporary files
             fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
-            fp = os.fdopen(fd, 'wb')
+            fp = os.fdopen(fd, pycompat.sysstr('wb'))
             fp.write(sig)
             fp.close()
             fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
-            fp = os.fdopen(fd, 'wb')
+            fp = os.fdopen(fd, pycompat.sysstr('wb'))
             fp.write(data)
             fp.close()
             gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
@@ -280,7 +281,7 @@
             raise error.Abort(_("working copy of .hgsigs is changed "),
                              hint=_("please commit .hgsigs manually"))
 
-    sigsfile = repo.wfile(".hgsigs", "ab")
+    sigsfile = repo.wvfs(".hgsigs", "ab")
     sigsfile.write(sigmessage)
     sigsfile.close()
 
--- a/hgext/hgk.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/hgk.py	Tue Apr 18 12:24:34 2017 -0400
@@ -71,8 +71,10 @@
     inferrepo=True)
 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
     """diff trees from two commits"""
-    def __difftree(repo, node1, node2, files=[]):
+    def __difftree(repo, node1, node2, files=None):
         assert node2 is not None
+        if files is None:
+            files = []
         mmap = repo[node1].manifest()
         mmap2 = repo[node2].manifest()
         m = scmutil.match(repo[node1], files)
@@ -345,4 +347,4 @@
 
     cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
     ui.debug("running %s\n" % cmd)
-    ui.system(cmd)
+    ui.system(cmd, blockedtag='hgk_view')
--- a/hgext/histedit.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/histedit.py	Tue Apr 18 12:24:34 2017 -0400
@@ -36,7 +36,7 @@
  #  p, pick = use commit
  #  e, edit = use commit, but stop for amending
  #  f, fold = use commit, but combine it with the one above
- #  r, roll = like fold, but discard this commit's description
+ #  r, roll = like fold, but discard this commit's description and date
  #  d, drop = remove commit from history
  #  m, mess = edit commit message without changing commit content
  #
@@ -58,7 +58,7 @@
  #  p, pick = use commit
  #  e, edit = use commit, but stop for amending
  #  f, fold = use commit, but combine it with the one above
- #  r, roll = like fold, but discard this commit's description
+ #  r, roll = like fold, but discard this commit's description and date
  #  d, drop = remove commit from history
  #  m, mess = edit commit message without changing commit content
  #
@@ -71,11 +71,11 @@
  ***
  Add delta
 
-Edit the commit message to your liking, then close the editor. For
-this example, let's assume that the commit message was changed to
-``Add beta and delta.`` After histedit has run and had a chance to
-remove any old or temporary revisions it needed, the history looks
-like this::
+Edit the commit message to your liking, then close the editor. The date used
+for the commit will be the later of the two commits' dates. For this example,
+let's assume that the commit message was changed to ``Add beta and delta.``
+After histedit has run and had a chance to remove any old or temporary
+revisions it needed, the history looks like this::
 
  @  2[tip]   989b4d060121   2009-04-27 18:04 -0500   durin42
  |    Add beta and delta.
@@ -97,9 +97,10 @@
 allowing you to edit files freely, or even use ``hg record`` to commit
 some changes as a separate commit. When you're done, any remaining
 uncommitted changes will be committed as well. When done, run ``hg
-histedit --continue`` to finish this step. You'll be prompted for a
-new commit message, but the default commit message will be the
-original message for the ``edit`` ed revision.
+histedit --continue`` to finish this step. If there are uncommitted
+changes, you'll be prompted for a new commit message, but the default
+commit message will be the original message for the ``edit`` ed
+revision, and the date of the original commit will be preserved.
 
 The ``message`` operation will give you a chance to revise a commit
 message without changing the contents. It's a shortcut for doing
@@ -167,6 +168,15 @@
   [histedit]
   dropmissing = True
 
+By default, histedit will close the transaction after each action. For
+performance purposes, you can configure histedit to use a single transaction
+across the entire histedit. WARNING: This setting introduces a significant risk
+of losing the work you've done in a histedit if the histedit aborts
+unexpectedly::
+
+  [histedit]
+  singletransaction = True
+
 """
 
 from __future__ import absolute_import
@@ -268,6 +278,7 @@
         self.lock = lock
         self.wlock = wlock
         self.backupfile = None
+        self.tr = None
         if replacements is None:
             self.replacements = []
         else:
@@ -299,8 +310,15 @@
         self.replacements = replacements
         self.backupfile = backupfile
 
-    def write(self):
-        fp = self.repo.vfs('histedit-state', 'w')
+    def write(self, tr=None):
+        if tr:
+            tr.addfilegenerator('histedit-state', ('histedit-state',),
+                                self._write, location='plain')
+        else:
+            with self.repo.vfs("histedit-state", "w") as f:
+                self._write(f)
+
+    def _write(self, fp):
         fp.write('v1\n')
         fp.write('%s\n' % node.hex(self.parentctxnode))
         fp.write('%s\n' % node.hex(self.topmost))
@@ -316,7 +334,6 @@
         if not backupfile:
             backupfile = ''
         fp.write('%s\n' % backupfile)
-        fp.close()
 
     def _load(self):
         fp = self.repo.vfs('histedit-state', 'r')
@@ -501,16 +518,12 @@
     """
     phasemin = src.phase()
     def commitfunc(**kwargs):
-        phasebackup = repo.ui.backupconfig('phases', 'new-commit')
-        try:
-            repo.ui.setconfig('phases', 'new-commit', phasemin,
-                              'histedit')
+        overrides = {('phases', 'new-commit'): phasemin}
+        with repo.ui.configoverride(overrides, 'histedit'):
             extra = kwargs.get('extra', {}).copy()
             extra['histedit_source'] = src.hex()
             kwargs['extra'] = extra
             return repo.commit(**kwargs)
-        finally:
-            repo.ui.restoreconfig(phasebackup)
     return commitfunc
 
 def applychanges(ui, repo, ctx, opts):
@@ -724,6 +737,15 @@
         """
         return True
 
+    def firstdate(self):
+        """Returns true if the rule should preserve the date of the first
+        change.
+
+        This exists mainly so that 'rollup' rules can be a subclass of
+        'fold'.
+        """
+        return False
+
     def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
         parent = ctx.parents()[0].node()
         repo.ui.pushbuffer()
@@ -742,21 +764,21 @@
                 [oldctx.description()]) + '\n'
         commitopts['message'] = newmessage
         # date
-        commitopts['date'] = max(ctx.date(), oldctx.date())
+        if self.firstdate():
+            commitopts['date'] = ctx.date()
+        else:
+            commitopts['date'] = max(ctx.date(), oldctx.date())
         extra = ctx.extra().copy()
         # histedit_source
         # note: ctx is likely a temporary commit but that the best we can do
         #       here. This is sufficient to solve issue3681 anyway.
         extra['histedit_source'] = '%s,%s' % (ctx.hex(), oldctx.hex())
         commitopts['extra'] = extra
-        phasebackup = repo.ui.backupconfig('phases', 'new-commit')
-        try:
-            phasemin = max(ctx.phase(), oldctx.phase())
-            repo.ui.setconfig('phases', 'new-commit', phasemin, 'histedit')
+        phasemin = max(ctx.phase(), oldctx.phase())
+        overrides = {('phases', 'new-commit'): phasemin}
+        with repo.ui.configoverride(overrides, 'histedit'):
             n = collapse(repo, ctx, repo[newnode], commitopts,
                          skipprompt=self.skipprompt())
-        finally:
-            repo.ui.restoreconfig(phasebackup)
         if n is None:
             return ctx, []
         repo.ui.pushbuffer()
@@ -809,7 +831,7 @@
         return True
 
 @action(["roll", "r"],
-        _("like fold, but discard this commit's description"))
+        _("like fold, but discard this commit's description and date"))
 class rollup(fold):
     def mergedescs(self):
         return False
@@ -817,6 +839,9 @@
     def skipprompt(self):
         return True
 
+    def firstdate(self):
+        return True
+
 @action(["drop", "d"],
         _('remove commit from history'))
 class drop(histeditaction):
@@ -884,11 +909,11 @@
 
     - `mess` to reword the changeset commit message
 
-    - `fold` to combine it with the preceding changeset
+    - `fold` to combine it with the preceding changeset (using the later date)
 
-    - `roll` like fold, but discarding this commit's description
+    - `roll` like fold, but discarding this commit's description and date
 
-    - `edit` to edit this changeset
+    - `edit` to edit this changeset (preserving date)
 
     There are a number of ways to select the root changeset:
 
@@ -992,7 +1017,8 @@
 
 def _readfile(ui, path):
     if path == '-':
-        return ui.fin.read()
+        with ui.timeblockedsection('histedit'):
+            return ui.fin.read()
     else:
         with open(path, 'rb') as f:
             return f.read()
@@ -1082,17 +1108,45 @@
 
     total = len(state.actions)
     pos = 0
-    while state.actions:
-        state.write()
-        actobj = state.actions.pop(0)
-        pos += 1
-        ui.progress(_("editing"), pos, actobj.torule(),
-                    _('changes'), total)
-        ui.debug('histedit: processing %s %s\n' % (actobj.verb,\
-                                                   actobj.torule()))
-        parentctx, replacement_ = actobj.run()
-        state.parentctxnode = parentctx.node()
-        state.replacements.extend(replacement_)
+    state.tr = None
+
+    # Force an initial state file write, so the user can run --abort/continue
+    # even if there's an exception before the first transaction serialize.
+    state.write()
+    try:
+        # Don't use singletransaction by default since it rolls the entire
+        # transaction back if an unexpected exception happens (like a
+        # pretxncommit hook throws, or the user aborts the commit msg editor).
+        if ui.configbool("histedit", "singletransaction", False):
+            # Don't use a 'with' for the transaction, since actions may close
+            # and reopen a transaction. For example, if the action executes an
+            # external process it may choose to commit the transaction first.
+            state.tr = repo.transaction('histedit')
+
+        while state.actions:
+            state.write(tr=state.tr)
+            actobj = state.actions[0]
+            pos += 1
+            ui.progress(_("editing"), pos, actobj.torule(),
+                        _('changes'), total)
+            ui.debug('histedit: processing %s %s\n' % (actobj.verb,\
+                                                       actobj.torule()))
+            parentctx, replacement_ = actobj.run()
+            state.parentctxnode = parentctx.node()
+            state.replacements.extend(replacement_)
+            state.actions.pop(0)
+
+        if state.tr is not None:
+            state.tr.close()
+    except error.InterventionRequired:
+        if state.tr is not None:
+            state.tr.close()
+        raise
+    except Exception:
+        if state.tr is not None:
+            state.tr.abort()
+        raise
+
     state.write()
     ui.progress(_("editing"), None)
 
@@ -1115,29 +1169,13 @@
                     for n in succs[1:]:
                         ui.debug(m % node.short(n))
 
-    supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
-    if supportsmarkers:
-        # Only create markers if the temp nodes weren't already removed.
-        obsolete.createmarkers(repo, ((repo[t],()) for t in sorted(tmpnodes)
-                                       if t in repo))
-    else:
-        cleanupnode(ui, repo, 'temp', tmpnodes)
+    safecleanupnode(ui, repo, 'temp', tmpnodes)
 
     if not state.keep:
         if mapping:
             movebookmarks(ui, repo, mapping, state.topmost, ntm)
             # TODO update mq state
-        if supportsmarkers:
-            markers = []
-            # sort by revision number because it sound "right"
-            for prec in sorted(mapping, key=repo.changelog.rev):
-                succs = mapping[prec]
-                markers.append((repo[prec],
-                                tuple(repo[s] for s in succs)))
-            if markers:
-                obsolete.createmarkers(repo, markers)
-        else:
-            cleanupnode(ui, repo, 'replaced', mapping)
+        safecleanupnode(ui, repo, 'replaced', mapping)
 
     state.clear()
     if os.path.exists(repo.sjoin('undo')):
@@ -1154,7 +1192,7 @@
 
         # Recover our old commits if necessary
         if not state.topmost in repo and state.backupfile:
-            backupfile = repo.join(state.backupfile)
+            backupfile = repo.vfs.join(state.backupfile)
             f = hg.openpath(ui, backupfile)
             gen = exchange.readbundle(ui, f, backupfile)
             with repo.transaction('histedit.abort') as tr:
@@ -1340,7 +1378,7 @@
     # Save edit rules in .hg/histedit-last-edit.txt in case
     # the user needs to ask for help after something
     # surprising happens.
-    f = open(repo.join('histedit-last-edit.txt'), 'w')
+    f = open(repo.vfs.join('histedit-last-edit.txt'), 'w')
     f.write(rules)
     f.close()
 
@@ -1564,6 +1602,34 @@
             # This would reduce bundle overhead
             repair.strip(ui, repo, c)
 
+def safecleanupnode(ui, repo, name, nodes):
+    """strip or obsolete nodes
+
+    nodes could be either a set or dict which maps to replacements.
+    nodes could be unknown (outside the repo).
+    """
+    supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
+    if supportsmarkers:
+        if util.safehasattr(nodes, 'get'):
+            # nodes is a dict-like mapping
+            # use unfiltered repo for successors in case they are hidden
+            urepo = repo.unfiltered()
+            def getmarker(prec):
+                succs = tuple(urepo[n] for n in nodes.get(prec, ()))
+                return (repo[prec], succs)
+        else:
+            # nodes is a set-like
+            def getmarker(prec):
+                return (repo[prec], ())
+        # sort by revision number because it sound "right"
+        sortednodes = sorted([n for n in nodes if n in repo],
+                             key=repo.changelog.rev)
+        markers = [getmarker(t) for t in sortednodes]
+        if markers:
+            obsolete.createmarkers(repo, markers)
+    else:
+        return cleanupnode(ui, repo, name, nodes)
+
 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
     if isinstance(nodelist, str):
         nodelist = [nodelist]
@@ -1581,7 +1647,7 @@
 extensions.wrapfunction(repair, 'strip', stripwrapper)
 
 def summaryhook(ui, repo):
-    if not os.path.exists(repo.join('histedit-state')):
+    if not os.path.exists(repo.vfs.join('histedit-state')):
         return
     state = histeditstate(repo)
     state.read()
--- a/hgext/journal.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/journal.py	Tue Apr 18 12:24:34 2017 -0400
@@ -4,7 +4,7 @@
 #
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
-"""Track previous positions of bookmarks (EXPERIMENTAL)
+"""track previous positions of bookmarks (EXPERIMENTAL)
 
 This extension adds a new command: `hg journal`, which shows you where
 bookmarks were previously located.
@@ -163,7 +163,7 @@
             # to copy. move shared date over from source to destination but
             # move the local file first
             if repo.vfs.exists('namejournal'):
-                journalpath = repo.join('namejournal')
+                journalpath = repo.vfs.join('namejournal')
                 util.rename(journalpath, journalpath + '.bak')
             storage = repo.journal
             local = storage._open(
--- a/hgext/keyword.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/keyword.py	Tue Apr 18 12:24:34 2017 -0400
@@ -438,7 +438,7 @@
             # simulate hgrc parsing
             rcmaps = '[keywordmaps]\n%s\n' % '\n'.join(args)
             repo.vfs.write('hgrc', rcmaps)
-            ui.readconfig(repo.join('hgrc'))
+            ui.readconfig(repo.vfs.join('hgrc'))
         kwmaps = dict(ui.configitems('keywordmaps'))
     elif opts.get('default'):
         if svn:
--- a/hgext/largefiles/basestore.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/largefiles/basestore.py	Tue Apr 18 12:24:34 2017 -0400
@@ -130,7 +130,7 @@
                     key = (filename, fctx.filenode())
                     if key not in verified:
                         verified.add(key)
-                        expectedhash = fctx.data()[0:40]
+                        expectedhash = lfutil.readasstandin(fctx)
                         filestocheck.append((cset, filename, expectedhash))
 
         failed = self._verifyfiles(contents, filestocheck)
--- a/hgext/largefiles/lfcommands.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/largefiles/lfcommands.py	Tue Apr 18 12:24:34 2017 -0400
@@ -220,7 +220,8 @@
                 normalfiles.add(f)
 
         if f in lfiles:
-            dstfiles.append(lfutil.standin(f))
+            fstandin = lfutil.standin(f)
+            dstfiles.append(fstandin)
             # largefile in manifest if it has not been removed/renamed
             if f in ctx.manifest():
                 fctx = ctx.filectx(f)
@@ -236,7 +237,7 @@
                 if f not in lfiletohash or lfiletohash[f] != hash:
                     rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
                     executable = 'x' in ctx[f].flags()
-                    lfutil.writestandin(rdst, lfutil.standin(f), hash,
+                    lfutil.writestandin(rdst, fstandin, hash,
                         executable)
                     lfiletohash[f] = hash
         else:
@@ -244,10 +245,10 @@
             dstfiles.append(f)
 
     def getfilectx(repo, memctx, f):
-        if lfutil.isstandin(f):
+        srcfname = lfutil.splitstandin(f)
+        if srcfname is not None:
             # if the file isn't in the manifest then it was removed
-            # or renamed, raise IOError to indicate this
-            srcfname = lfutil.splitstandin(f)
+            # or renamed, return None to indicate this
             try:
                 fctx = ctx.filectx(srcfname)
             except error.LookupError:
@@ -402,9 +403,10 @@
         lfiles = set(lfiles) & set(filelist)
     toget = []
 
+    ctx = repo[node]
     for lfile in lfiles:
         try:
-            expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
+            expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
         except IOError as err:
             if err.errno == errno.ENOENT:
                 continue # node must be None and standin wasn't found in wctx
@@ -456,6 +458,7 @@
         update = {}
         updated, removed = 0, 0
         wvfs = repo.wvfs
+        wctx = repo[None]
         for lfile in lfiles:
             rellfile = lfile
             rellfileorig = os.path.relpath(
@@ -471,9 +474,9 @@
                     shutil.copyfile(wvfs.join(rellfile),
                                     wvfs.join(rellfileorig))
                     wvfs.unlinkpath(relstandinorig)
-                expecthash = lfutil.readstandin(repo, lfile)
+                expecthash = lfutil.readasstandin(wctx[relstandin])
                 if expecthash != '':
-                    if lfile not in repo[None]: # not switched to normal file
+                    if lfile not in wctx: # not switched to normal file
                         wvfs.unlinkpath(rellfile, ignoremissing=True)
                     # use normallookup() to allocate an entry in largefiles
                     # dirstate to prevent lfilesrepo.status() from reporting
@@ -486,7 +489,7 @@
                 # largefile is converted back to a normal file: the standin
                 # disappears, but a new (normal) file appears as the lfile.
                 if (wvfs.exists(rellfile) and
-                    repo.dirstate.normalize(lfile) not in repo[None]):
+                    repo.dirstate.normalize(lfile) not in wctx):
                     wvfs.unlinkpath(rellfile)
                     removed += 1
 
--- a/hgext/largefiles/lfutil.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/largefiles/lfutil.py	Tue Apr 18 12:24:34 2017 -0400
@@ -27,6 +27,7 @@
     pycompat,
     scmutil,
     util,
+    vfs as vfsmod,
 )
 
 shortname = '.hglf'
@@ -144,7 +145,7 @@
     '''
     vfs = repo.vfs
     lfstoredir = longname
-    opener = scmutil.opener(vfs.join(lfstoredir))
+    opener = vfsmod.vfs(vfs.join(lfstoredir))
     lfdirstate = largefilesdirstate(opener, ui, repo.root,
                                      repo.dirstate._validate)
 
@@ -164,16 +165,16 @@
     return lfdirstate
 
 def lfdirstatestatus(lfdirstate, repo):
-    wctx = repo['.']
+    pctx = repo['.']
     match = matchmod.always(repo.root, repo.getcwd())
     unsure, s = lfdirstate.status(match, [], False, False, False)
     modified, clean = s.modified, s.clean
     for lfile in unsure:
         try:
-            fctx = wctx[standin(lfile)]
+            fctx = pctx[standin(lfile)]
         except LookupError:
             fctx = None
-        if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
+        if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
             modified.append(lfile)
         else:
             clean.append(lfile)
@@ -201,7 +202,7 @@
     file with the given hash.'''
     if not forcelocal and repo.shared():
         return repo.vfs.reljoin(repo.sharedpath, longname, hash)
-    return repo.join(longname, hash)
+    return repo.vfs.join(longname, hash)
 
 def findstorepath(repo, hash):
     '''Search through the local store path(s) to find the file for the given
@@ -244,9 +245,9 @@
         return False
     return True
 
-def copytostore(repo, rev, file, uploaded=False):
+def copytostore(repo, ctx, file, fstandin):
     wvfs = repo.wvfs
-    hash = readstandin(repo, file, rev)
+    hash = readasstandin(ctx[fstandin])
     if instore(repo, hash):
         return
     if wvfs.exists(file):
@@ -260,9 +261,9 @@
 
     ctx = repo[node]
     for filename in ctx.files():
-        if isstandin(filename) and filename in ctx.manifest():
-            realfile = splitstandin(filename)
-            copytostore(repo, ctx.node(), realfile)
+        realfile = splitstandin(filename)
+        if realfile is not None and filename in ctx.manifest():
+            copytostore(repo, ctx, realfile, filename)
 
 def copytostoreabsolute(repo, file, hash):
     if inusercache(repo.ui, hash):
@@ -341,19 +342,24 @@
     else:
         return None
 
-def updatestandin(repo, standin):
-    file = repo.wjoin(splitstandin(standin))
-    if repo.wvfs.exists(splitstandin(standin)):
+def updatestandin(repo, lfile, standin):
+    """Re-calculate hash value of lfile and write it into standin
+
+    This assumes that "lfutil.standin(lfile) == standin", for efficiency.
+    """
+    file = repo.wjoin(lfile)
+    if repo.wvfs.exists(lfile):
         hash = hashfile(file)
         executable = getexecutable(file)
         writestandin(repo, standin, hash, executable)
     else:
-        raise error.Abort(_('%s: file not found!') % splitstandin(standin))
+        raise error.Abort(_('%s: file not found!') % lfile)
 
-def readstandin(repo, filename, node=None):
-    '''read hex hash from standin for filename at given node, or working
-    directory if no node is given'''
-    return repo[node][standin(filename)].data().strip()
+def readasstandin(fctx):
+    '''read hex hash from given filectx of standin file
+
+    This encapsulates how "standin" data is stored into storage layer.'''
+    return fctx.data().strip()
 
 def writestandin(repo, standin, hash, executable):
     '''write hash to <repo.root>/<standin>'''
@@ -368,17 +374,11 @@
         outfile.write(data)
     return hasher.hexdigest()
 
-def hashrepofile(repo, file):
-    return hashfile(repo.wjoin(file))
-
 def hashfile(file):
     if not os.path.exists(file):
         return ''
-    hasher = hashlib.sha1('')
     with open(file, 'rb') as fd:
-        for data in util.filechunkiter(fd):
-            hasher.update(data)
-    return hasher.hexdigest()
+        return hexsha1(fd)
 
 def getexecutable(filename):
     mode = os.stat(filename).st_mode
@@ -399,11 +399,11 @@
         url = join(url, a)
     return url
 
-def hexsha1(data):
+def hexsha1(fileobj):
     """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
     object data"""
     h = hashlib.sha1()
-    for chunk in util.filechunkiter(data):
+    for chunk in util.filechunkiter(fileobj):
         h.update(chunk)
     return h.hexdigest()
 
@@ -429,10 +429,11 @@
 def getstandinsstate(repo):
     standins = []
     matcher = getstandinmatcher(repo)
+    wctx = repo[None]
     for standin in repo.dirstate.walk(matcher, [], False, False):
         lfile = splitstandin(standin)
         try:
-            hash = readstandin(repo, lfile)
+            hash = readasstandin(wctx[standin])
         except IOError:
             hash = None
         standins.append((lfile, hash))
@@ -477,12 +478,17 @@
 
     lfdirstate = openlfdirstate(repo.ui, repo)
     for f in ctx.files():
-        if isstandin(f):
-            lfile = splitstandin(f)
+        lfile = splitstandin(f)
+        if lfile is not None:
             synclfdirstate(repo, lfdirstate, lfile, False)
     lfdirstate.write()
 
     # As part of committing, copy all of the largefiles into the cache.
+    #
+    # Using "node" instead of "ctx" implies additional "repo[node]"
+    # lookup while copyalltostore(), but can omit redundant check for
+    # files comming from the 2nd parent, which should exist in store
+    # at merging.
     copyalltostore(repo, node)
 
 def getlfilestoupdate(oldstandins, newstandins):
@@ -522,7 +528,7 @@
                     files.add(f)
         for fn in files:
             if isstandin(fn) and fn in ctx:
-                addfunc(fn, ctx[fn].data().strip())
+                addfunc(fn, readasstandin(ctx[fn]))
     repo.ui.progress(_('finding outgoing largefiles'), None)
 
 def updatestandinsbymatch(repo, match):
@@ -553,13 +559,13 @@
         # removed/renamed)
         for lfile in lfiles:
             if lfile in modifiedfiles:
-                if repo.wvfs.exists(standin(lfile)):
+                fstandin = standin(lfile)
+                if repo.wvfs.exists(fstandin):
                     # this handles the case where a rebase is being
                     # performed and the working copy is not updated
                     # yet.
                     if repo.wvfs.exists(lfile):
-                        updatestandin(repo,
-                            standin(lfile))
+                        updatestandin(repo, lfile, fstandin)
 
         return match
 
@@ -585,7 +591,7 @@
     for fstandin in standins:
         lfile = splitstandin(fstandin)
         if lfdirstate[lfile] != 'r':
-            updatestandin(repo, fstandin)
+            updatestandin(repo, lfile, fstandin)
 
     # Cook up a new matcher that only matches regular files or
     # standins corresponding to the big files requested by the
--- a/hgext/largefiles/overrides.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/largefiles/overrides.py	Tue Apr 18 12:24:34 2017 -0400
@@ -22,8 +22,8 @@
     match as matchmod,
     pathutil,
     registrar,
-    revset,
     scmutil,
+    smartset,
     util,
 )
 
@@ -223,7 +223,7 @@
 
             if not opts.get('dry_run'):
                 if not after:
-                    util.unlinkpath(repo.wjoin(f), ignoremissing=True)
+                    repo.wvfs.unlinkpath(f, ignoremissing=True)
 
         if opts.get('dry_run'):
             return result
@@ -233,7 +233,7 @@
         # function handle this.
         if not isaddremove:
             for f in remove:
-                util.unlinkpath(repo.wjoin(f), ignoremissing=True)
+                repo.wvfs.unlinkpath(f, ignoremissing=True)
         repo[None].forget(remove)
 
         for f in remove:
@@ -351,7 +351,7 @@
             pats.update(fixpats(f, tostandin) for f in p)
         else:
             def tostandin(f):
-                if lfutil.splitstandin(f):
+                if lfutil.isstandin(f):
                     return f
                 return lfutil.standin(f)
             pats.update(fixpats(f, tostandin) for f in p)
@@ -365,10 +365,9 @@
             # support naming a directory on the command line with only
             # largefiles.  The original directory is kept to support normal
             # files.
-            if standin in repo[ctx.node()]:
+            if standin in ctx:
                 m._files[i] = standin
-            elif m._files[i] not in repo[ctx.node()] \
-                    and repo.wvfs.isdir(standin):
+            elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
                 m._files.append(standin)
 
         m._fileroots = set(m._files)
@@ -554,9 +553,9 @@
         return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
                       labels=labels)
 
-    ahash = fca.data().strip().lower()
-    dhash = fcd.data().strip().lower()
-    ohash = fco.data().strip().lower()
+    ahash = lfutil.readasstandin(fca).lower()
+    dhash = lfutil.readasstandin(fcd).lower()
+    ohash = lfutil.readasstandin(fco).lower()
     if (ohash != ahash and
         ohash != dhash and
         (dhash == ahash or
@@ -649,10 +648,13 @@
             m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
             m._fileroots = set(m._files)
             origmatchfn = m.matchfn
-            m.matchfn = lambda f: (lfutil.isstandin(f) and
-                                (f in manifest) and
-                                origmatchfn(lfutil.splitstandin(f)) or
-                                None)
+            def matchfn(f):
+                lfile = lfutil.splitstandin(f)
+                return (lfile is not None and
+                        (f in manifest) and
+                        origmatchfn(lfile) or
+                        None)
+            m.matchfn = matchfn
             return m
         oldmatch = installmatchfn(overridematch)
         listpats = []
@@ -694,7 +696,7 @@
 
                     # The file is gone, but this deletes any empty parent
                     # directories as a side-effect.
-                    util.unlinkpath(repo.wjoin(srclfile), True)
+                    repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
                     lfdirstate.remove(srclfile)
                 else:
                     util.copyfile(repo.wjoin(srclfile),
@@ -734,10 +736,11 @@
         s = lfutil.lfdirstatestatus(lfdirstate, repo)
         lfdirstate.write()
         for lfile in s.modified:
-            lfutil.updatestandin(repo, lfutil.standin(lfile))
+            lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
         for lfile in s.deleted:
-            if (repo.wvfs.exists(lfutil.standin(lfile))):
-                repo.wvfs.unlink(lfutil.standin(lfile))
+            fstandin = lfutil.standin(lfile)
+            if (repo.wvfs.exists(fstandin)):
+                repo.wvfs.unlink(fstandin)
 
         oldstandins = lfutil.getstandinsstate(repo)
 
@@ -755,20 +758,23 @@
             lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
                                                False)
 
-            def tostandin(f):
+            wctx = repo[None]
+            matchfiles = []
+            for f in m._files:
                 standin = lfutil.standin(f)
                 if standin in ctx or standin in mctx:
-                    return standin
-                elif standin in repo[None] or lfdirstate[f] == 'r':
-                    return None
-                return f
-            m._files = [tostandin(f) for f in m._files]
-            m._files = [f for f in m._files if f is not None]
+                    matchfiles.append(standin)
+                elif standin in wctx or lfdirstate[f] == 'r':
+                    continue
+                else:
+                    matchfiles.append(f)
+            m._files = matchfiles
             m._fileroots = set(m._files)
             origmatchfn = m.matchfn
             def matchfn(f):
-                if lfutil.isstandin(f):
-                    return (origmatchfn(lfutil.splitstandin(f)) and
+                lfile = lfutil.splitstandin(f)
+                if lfile is not None:
+                    return (origmatchfn(lfile) and
                             (f in ctx or f in mctx))
                 return origmatchfn(f)
             m.matchfn = matchfn
@@ -855,7 +861,7 @@
         firstpulled = repo.firstpulled
     except AttributeError:
         raise error.Abort(_("pulled() only available in --lfrev"))
-    return revset.baseset([r for r in subset if r >= firstpulled])
+    return smartset.baseset([r for r in subset if r >= firstpulled])
 
 def overrideclone(orig, ui, source, dest=None, **opts):
     d = dest
@@ -897,6 +903,14 @@
 
     return result
 
+def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
+    orig(sourcerepo, destrepo, bookmarks, defaultpath)
+
+    # If largefiles is required for this repo, permanently enable it locally
+    if 'largefiles' in destrepo.requirements:
+        with destrepo.vfs('hgrc', 'a+', text=True) as fp:
+            fp.write('\n[extensions]\nlargefiles=\n')
+
 def overriderebase(orig, ui, repo, **opts):
     if not util.safehasattr(repo, '_largefilesenabled'):
         return orig(ui, repo, **opts)
@@ -968,18 +982,19 @@
     for f in ctx:
         ff = ctx.flags(f)
         getdata = ctx[f].data
-        if lfutil.isstandin(f):
+        lfile = lfutil.splitstandin(f)
+        if lfile is not None:
             if node is not None:
                 path = lfutil.findfile(repo, getdata().strip())
 
                 if path is None:
                     raise error.Abort(
                        _('largefile %s not found in repo store or system cache')
-                       % lfutil.splitstandin(f))
+                       % lfile)
             else:
-                path = lfutil.splitstandin(f)
+                path = lfile
 
-            f = lfutil.splitstandin(f)
+            f = lfile
 
             getdata = lambda: util.readfile(path)
         write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
@@ -993,9 +1008,9 @@
 
     archiver.done()
 
-def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
+def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
     if not repo._repo.lfstatus:
-        return orig(repo, archiver, prefix, match)
+        return orig(repo, archiver, prefix, match, decode)
 
     repo._get(repo._state + ('hg',))
     rev = repo._state[1]
@@ -1010,24 +1025,27 @@
         if match and not match(f):
             return
         data = getdata()
+        if decode:
+            data = repo._repo.wwritedata(name, data)
 
         archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
 
     for f in ctx:
         ff = ctx.flags(f)
         getdata = ctx[f].data
-        if lfutil.isstandin(f):
+        lfile = lfutil.splitstandin(f)
+        if lfile is not None:
             if ctx.node() is not None:
                 path = lfutil.findfile(repo._repo, getdata().strip())
 
                 if path is None:
                     raise error.Abort(
                        _('largefile %s not found in repo store or system cache')
-                       % lfutil.splitstandin(f))
+                       % lfile)
             else:
-                path = lfutil.splitstandin(f)
+                path = lfile
 
-            f = lfutil.splitstandin(f)
+            f = lfile
 
             getdata = lambda: util.readfile(os.path.join(prefix, path))
 
@@ -1037,7 +1055,7 @@
         sub = ctx.workingsub(subpath)
         submatch = matchmod.subdirmatcher(subpath, match)
         sub._repo.lfstatus = True
-        sub.archive(archiver, prefix + repo._path + '/', submatch)
+        sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
 
 # If a largefile is modified, the change is not reflected in its
 # standin until a commit. cmdutil.bailifchanged() raises an exception
@@ -1068,12 +1086,13 @@
         s = repo.status(match=m, clean=True)
     finally:
         repo.lfstatus = False
+    manifest = repo[None].manifest()
     forget = sorted(s.modified + s.added + s.deleted + s.clean)
-    forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
+    forget = [f for f in forget if lfutil.standin(f) in manifest]
 
     for f in forget:
-        if lfutil.standin(f) not in repo.dirstate and not \
-                repo.wvfs.isdir(lfutil.standin(f)):
+        fstandin = lfutil.standin(f)
+        if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
             ui.warn(_('not removing %s: file is already untracked\n')
                     % m.rel(f))
             bad.append(f)
@@ -1094,7 +1113,7 @@
         lfdirstate.write()
         standins = [lfutil.standin(f) for f in forget]
         for f in standins:
-            util.unlinkpath(repo.wjoin(f), ignoremissing=True)
+            repo.wvfs.unlinkpath(f, ignoremissing=True)
         rejected = repo[None].forget(standins)
 
     bad.extend(f for f in rejected if f in m.files())
@@ -1346,7 +1365,7 @@
                     data = repo.wwritedata(f, data)
                 fp.write(data)
             else:
-                hash = lfutil.readstandin(repo, lf, ctx.rev())
+                hash = lfutil.readasstandin(ctx[f])
                 if not lfutil.inusercache(repo.ui, hash):
                     store = storefactory.openstore(repo)
                     success, missing = store.get([(lf, hash)])
@@ -1388,19 +1407,25 @@
                                       [], False, True, False)
         oldclean = set(s.clean)
         pctx = repo['.']
+        dctx = repo[node]
         for lfile in unsure + s.modified:
             lfileabs = repo.wvfs.join(lfile)
             if not repo.wvfs.exists(lfileabs):
                 continue
-            lfhash = lfutil.hashrepofile(repo, lfile)
+            lfhash = lfutil.hashfile(lfileabs)
             standin = lfutil.standin(lfile)
             lfutil.writestandin(repo, standin, lfhash,
                                 lfutil.getexecutable(lfileabs))
             if (standin in pctx and
-                lfhash == lfutil.readstandin(repo, lfile, '.')):
+                lfhash == lfutil.readasstandin(pctx[standin])):
                 oldclean.add(lfile)
         for lfile in s.added:
-            lfutil.updatestandin(repo, lfutil.standin(lfile))
+            fstandin = lfutil.standin(lfile)
+            if fstandin not in dctx:
+                # in this case, content of standin file is meaningless
+                # (in dctx, lfile is unknown, or normal file)
+                continue
+            lfutil.updatestandin(repo, lfile, fstandin)
         # mark all clean largefiles as dirty, just in case the update gets
         # interrupted before largefiles and lfdirstate are synchronized
         for lfile in oldclean:
@@ -1431,7 +1456,11 @@
 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
     result = orig(repo, files, *args, **kwargs)
 
-    filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
+    filelist = []
+    for f in files:
+        lf = lfutil.splitstandin(f)
+        if lf is not None:
+            filelist.append(lf)
     if filelist:
         lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
                                 printmessage=False, normallookup=True)
--- a/hgext/largefiles/reposetup.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/largefiles/reposetup.py	Tue Apr 18 12:24:34 2017 -0400
@@ -172,7 +172,7 @@
                             if standin not in ctx1:
                                 # from second parent
                                 modified.append(lfile)
-                            elif ctx1[standin].data().strip() \
+                            elif lfutil.readasstandin(ctx1[standin]) \
                                     != lfutil.hashfile(self.wjoin(lfile)):
                                 modified.append(lfile)
                             else:
@@ -188,7 +188,7 @@
                             standin = lfutil.standin(lfile)
                             if standin in ctx1:
                                 abslfile = self.wjoin(lfile)
-                                if ((ctx1[standin].data().strip() !=
+                                if ((lfutil.readasstandin(ctx1[standin]) !=
                                      lfutil.hashfile(abslfile)) or
                                     (checkexec and
                                      ('x' in ctx1.flags(standin)) !=
@@ -272,7 +272,9 @@
         # contents updated to reflect the hash of their largefile.
         # Do that here.
         def commit(self, text="", user=None, date=None, match=None,
-                force=False, editor=False, extra={}):
+                force=False, editor=False, extra=None):
+            if extra is None:
+                extra = {}
             orig = super(lfilesrepo, self).commit
 
             with self.wlock():
--- a/hgext/largefiles/uisetup.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/largefiles/uisetup.py	Tue Apr 18 12:24:34 2017 -0400
@@ -120,6 +120,7 @@
                  _('download all versions of all largefiles'))]
     entry[1].extend(cloneopt)
     entry = extensions.wrapfunction(hg, 'clone', overrides.hgclone)
+    entry = extensions.wrapfunction(hg, 'postshare', overrides.hgpostshare)
 
     entry = extensions.wrapcommand(commands.table, 'cat',
                                    overrides.overridecat)
--- a/hgext/logtoprocess.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/logtoprocess.py	Tue Apr 18 12:24:34 2017 -0400
@@ -4,7 +4,7 @@
 #
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
-"""Send ui.log() data to a subprocess (EXPERIMENTAL)
+"""send ui.log() data to a subprocess (EXPERIMENTAL)
 
 This extension lets you specify a shell command per ui.log() event,
 sending all remaining arguments to as environment variables to that command.
--- a/hgext/mq.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/mq.py	Tue Apr 18 12:24:34 2017 -0400
@@ -14,7 +14,7 @@
 Known patches are represented as patch files in the .hg/patches
 directory. Applied patches are both patch files and changesets.
 
-Common tasks (use :hg:`help command` for more details)::
+Common tasks (use :hg:`help COMMAND` for more details)::
 
   create new patch                          qnew
   import existing patch                     qimport
@@ -89,10 +89,12 @@
     phases,
     pycompat,
     registrar,
-    revset,
+    revsetlang,
     scmutil,
+    smartset,
     subrepo,
     util,
+    vfs as vfsmod,
 )
 
 release = lockmod.release
@@ -403,18 +405,12 @@
     if phase is None:
         if repo.ui.configbool('mq', 'secret', False):
             phase = phases.secret
+    overrides = {('ui', 'allowemptycommit'): True}
     if phase is not None:
-        phasebackup = repo.ui.backupconfig('phases', 'new-commit')
-    allowemptybackup = repo.ui.backupconfig('ui', 'allowemptycommit')
-    try:
-        if phase is not None:
-            repo.ui.setconfig('phases', 'new-commit', phase, 'mq')
+        overrides[('phases', 'new-commit')] = phase
+    with repo.ui.configoverride(overrides, 'mq'):
         repo.ui.setconfig('ui', 'allowemptycommit', True)
         return repo.commit(*args, **kwargs)
-    finally:
-        repo.ui.restoreconfig(allowemptybackup)
-        if phase is not None:
-            repo.ui.restoreconfig(phasebackup)
 
 class AbortNoCleanup(error.Abort):
     pass
@@ -433,7 +429,7 @@
         except IOError:
             curpath = os.path.join(path, 'patches')
         self.path = patchdir or curpath
-        self.opener = scmutil.opener(self.path)
+        self.opener = vfsmod.vfs(self.path)
         self.ui = ui
         self.baseui = baseui
         self.applieddirty = False
@@ -719,7 +715,9 @@
                     util.rename(absf, absorig)
 
     def printdiff(self, repo, diffopts, node1, node2=None, files=None,
-                  fp=None, changes=None, opts={}):
+                  fp=None, changes=None, opts=None):
+        if opts is None:
+            opts = {}
         stat = opts.get('stat')
         m = scmutil.match(repo[node1], files, opts)
         cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2,  m,
@@ -1118,6 +1116,10 @@
         if name in self._reserved:
             raise error.Abort(_('"%s" cannot be used as the name of a patch')
                              % name)
+        if name != name.strip():
+            # whitespace is stripped by parseseries()
+            raise error.Abort(_('patch name cannot begin or end with '
+                                'whitespace'))
         for prefix in ('.hg', '.mq'):
             if name.startswith(prefix):
                 raise error.Abort(_('patch name cannot begin with "%s"')
@@ -1477,7 +1479,7 @@
                 # created while patching
                 for f in all_files:
                     if f not in repo.dirstate:
-                        util.unlinkpath(repo.wjoin(f), ignoremissing=True)
+                        repo.wvfs.unlinkpath(f, ignoremissing=True)
                 self.ui.warn(_('done\n'))
                 raise
 
@@ -1580,7 +1582,7 @@
                 self.backup(repo, tobackup)
                 repo.dirstate.beginparentchange()
                 for f in a:
-                    util.unlinkpath(repo.wjoin(f), ignoremissing=True)
+                    repo.wvfs.unlinkpath(f, ignoremissing=True)
                     repo.dirstate.drop(f)
                 for f in m + r:
                     fctx = ctx[f]
@@ -2675,6 +2677,7 @@
 
     Returns 0 on success.
     """
+    ui.pager('qdiff')
     repo.mq.diff(repo, pats, opts)
     return 0
 
@@ -2917,7 +2920,7 @@
     opts = fixkeepchangesopts(ui, opts)
     if opts.get('merge'):
         if opts.get('name'):
-            newpath = repo.join(opts.get('name'))
+            newpath = repo.vfs.join(opts.get('name'))
         else:
             newpath, i = lastsavename(q.path)
         if not newpath:
@@ -2957,7 +2960,7 @@
     opts = fixkeepchangesopts(ui, opts)
     localupdate = True
     if opts.get('name'):
-        q = queue(ui, repo.baseui, repo.path, repo.join(opts.get('name')))
+        q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get('name')))
         ui.warn(_('using patch queue: %s\n') % q.path)
         localupdate = False
     else:
@@ -3311,9 +3314,9 @@
 
     def _queuedir(name):
         if name == 'patches':
-            return repo.join('patches')
+            return repo.vfs.join('patches')
         else:
-            return repo.join('patches-' + name)
+            return repo.vfs.join('patches-' + name)
 
     def _validname(name):
         for n in name:
@@ -3336,7 +3339,7 @@
                 continue
             fh.write('%s\n' % (queue,))
         fh.close()
-        util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
+        repo.vfs.rename('patches.queues.new', _allqueues)
 
     if not name or opts.get('list') or opts.get('active'):
         current = _getcurrent()
@@ -3389,7 +3392,7 @@
                 else:
                     fh.write('%s\n' % (queue,))
             fh.close()
-            util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
+            repo.vfs.rename('patches.queues.new', _allqueues)
             _setactivenocheck(name)
         elif opts.get('delete'):
             _delete(name)
@@ -3435,7 +3438,9 @@
                     raise error.Abort(errmsg)
 
         def commit(self, text="", user=None, date=None, match=None,
-                   force=False, editor=False, extra={}):
+                   force=False, editor=False, extra=None):
+            if extra is None:
+                extra = {}
             self.abortifwdirpatched(
                 _('cannot commit over an applied mq patch'),
                 force)
@@ -3567,9 +3572,9 @@
 def revsetmq(repo, subset, x):
     """Changesets managed by MQ.
     """
-    revset.getargs(x, 0, 0, _("mq takes no arguments"))
+    revsetlang.getargs(x, 0, 0, _("mq takes no arguments"))
     applied = set([repo[r.node].rev() for r in repo.mq.applied])
-    return revset.baseset([r for r in subset if r in applied])
+    return smartset.baseset([r for r in subset if r in applied])
 
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = [revsetmq]
--- a/hgext/pager.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/pager.py	Tue Apr 18 12:24:34 2017 -0400
@@ -12,68 +12,22 @@
 #
 # Run 'hg help pager' to get info on configuration.
 
-'''browse command output with an external pager
-
-To set the pager that should be used, set the application variable::
-
-  [pager]
-  pager = less -FRX
-
-If no pager is set, the pager extensions uses the environment variable
-$PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
-
-You can disable the pager for certain commands by adding them to the
-pager.ignore list::
+'''browse command output with an external pager (DEPRECATED)
 
-  [pager]
-  ignore = version, help, update
-
-You can also enable the pager only for certain commands using
-pager.attend. Below is the default list of commands to be paged::
-
-  [pager]
-  attend = annotate, cat, diff, export, glog, log, qdiff
-
-Setting pager.attend to an empty value will cause all commands to be
-paged.
-
-If pager.attend is present, pager.ignore will be ignored.
-
-Lastly, you can enable and disable paging for individual commands with
-the attend-<command> option. This setting takes precedence over
-existing attend and ignore options and defaults::
+Forcibly enable paging for individual commands that don't typically
+request pagination with the attend-<command> option. This setting
+takes precedence over ignore options and defaults::
 
   [pager]
   attend-cat = false
-
-To ignore global commands like :hg:`version` or :hg:`help`, you have
-to specify them in your user configuration file.
-
-To control whether the pager is used at all for an individual command,
-you can use --pager=<value>::
-
-  - use as needed: `auto`.
-  - require the pager: `yes` or `on`.
-  - suppress the pager: `no` or `off` (any unrecognized value
-  will also work).
-
 '''
 from __future__ import absolute_import
 
-import atexit
-import os
-import signal
-import subprocess
-import sys
-
-from mercurial.i18n import _
 from mercurial import (
     cmdutil,
     commands,
     dispatch,
-    encoding,
     extensions,
-    util,
     )
 
 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
@@ -82,58 +36,12 @@
 # leave the attribute unspecified.
 testedwith = 'ships-with-hg-core'
 
-def _runpager(ui, p):
-    pager = subprocess.Popen(p, shell=True, bufsize=-1,
-                             close_fds=util.closefds, stdin=subprocess.PIPE,
-                             stdout=util.stdout, stderr=util.stderr)
-
-    # back up original file objects and descriptors
-    olduifout = ui.fout
-    oldstdout = util.stdout
-    stdoutfd = os.dup(util.stdout.fileno())
-    stderrfd = os.dup(util.stderr.fileno())
-
-    # create new line-buffered stdout so that output can show up immediately
-    ui.fout = util.stdout = newstdout = os.fdopen(util.stdout.fileno(), 'wb', 1)
-    os.dup2(pager.stdin.fileno(), util.stdout.fileno())
-    if ui._isatty(util.stderr):
-        os.dup2(pager.stdin.fileno(), util.stderr.fileno())
-
-    @atexit.register
-    def killpager():
-        if util.safehasattr(signal, "SIGINT"):
-            signal.signal(signal.SIGINT, signal.SIG_IGN)
-        pager.stdin.close()
-        ui.fout = olduifout
-        util.stdout = oldstdout
-        # close new stdout while it's associated with pager; otherwise stdout
-        # fd would be closed when newstdout is deleted
-        newstdout.close()
-        # restore original fds: stdout is open again
-        os.dup2(stdoutfd, util.stdout.fileno())
-        os.dup2(stderrfd, util.stderr.fileno())
-        pager.wait()
-
 def uisetup(ui):
-    class pagerui(ui.__class__):
-        def _runpager(self, pagercmd):
-            _runpager(self, pagercmd)
-
-    ui.__class__ = pagerui
 
     def pagecmd(orig, ui, options, cmd, cmdfunc):
-        p = ui.config("pager", "pager", encoding.environ.get("PAGER"))
-        usepager = False
-        always = util.parsebool(options['pager'])
         auto = options['pager'] == 'auto'
-
-        if not p or '--debugger' in sys.argv or not ui.formatted():
-            pass
-        elif always:
-            usepager = True
-        elif not auto:
+        if auto and not ui.pageractive:
             usepager = False
-        else:
             attend = ui.configlist('pager', 'attend', attended)
             ignore = ui.configlist('pager', 'ignore')
             cmds, _ = cmdutil.findcmd(cmd, commands.table)
@@ -148,27 +56,18 @@
                     usepager = True
                     break
 
-        setattr(ui, 'pageractive', usepager)
-
-        if usepager:
-            ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
-            ui.setconfig('ui', 'interactive', False, 'pager')
-            if util.safehasattr(signal, "SIGPIPE"):
-                signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-            ui._runpager(p)
+            if usepager:
+                # Slight hack: the attend list is supposed to override
+                # the ignore list for the pager extension, but the
+                # core code doesn't know about attend, so we have to
+                # lobotomize the ignore list so that the extension's
+                # behavior is preserved.
+                ui.setconfig('pager', 'ignore', '', 'pager')
+                ui.pager('extension-via-attend-' + cmd)
+            else:
+                ui.disablepager()
         return orig(ui, options, cmd, cmdfunc)
 
-    # Wrap dispatch._runcommand after color is loaded so color can see
-    # ui.pageractive. Otherwise, if we loaded first, color's wrapped
-    # dispatch._runcommand would run without having access to ui.pageractive.
-    def afterloaded(loaded):
-        extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
-    extensions.afterloaded('color', afterloaded)
-
-def extsetup(ui):
-    commands.globalopts.append(
-        ('', 'pager', 'auto',
-         _("when to paginate (boolean, always, auto, or never)"),
-         _('TYPE')))
+    extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
 
 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
--- a/hgext/patchbomb.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/patchbomb.py	Tue Apr 18 12:24:34 2017 -0400
@@ -60,6 +60,14 @@
   intro=never  # never include an introduction message
   intro=always # always include an introduction message
 
+You can specify a template for flags to be added in subject prefixes. Flags
+specified by --flag option are exported as ``{flags}`` keyword::
+
+  [patchbomb]
+  flagtemplate = "{separate(' ',
+                            ifeq(branch, 'default', '', branch|upper),
+                            flags)}"
+
 You can set patchbomb to always ask for confirmation by setting
 ``patchbomb.confirm`` to true.
 '''
@@ -75,13 +83,14 @@
 from mercurial import (
     cmdutil,
     commands,
-    encoding,
     error,
+    formatter,
     hg,
     mail,
     node as nodemod,
     patch,
     scmutil,
+    templater,
     util,
 )
 stringio = util.stringio
@@ -135,7 +144,32 @@
         intro = 1 < number
     return intro
 
-def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered,
+def _formatflags(ui, repo, rev, flags):
+    """build flag string optionally by template"""
+    tmpl = ui.config('patchbomb', 'flagtemplate')
+    if not tmpl:
+        return ' '.join(flags)
+    out = util.stringio()
+    opts = {'template': templater.unquotestring(tmpl)}
+    with formatter.templateformatter(ui, out, 'patchbombflag', opts) as fm:
+        fm.startitem()
+        fm.context(ctx=repo[rev])
+        fm.write('flags', '%s', fm.formatlist(flags, name='flag'))
+    return out.getvalue()
+
+def _formatprefix(ui, repo, rev, flags, idx, total, numbered):
+    """build prefix to patch subject"""
+    flag = _formatflags(ui, repo, rev, flags)
+    if flag:
+        flag = ' ' + flag
+
+    if not numbered:
+        return '[PATCH%s]' % flag
+    else:
+        tlen = len(str(total))
+        return '[PATCH %0*d of %d%s]' % (tlen, idx, total, flag)
+
+def makepatch(ui, repo, rev, patchlines, opts, _charsets, idx, total, numbered,
               patchname=None):
 
     desc = []
@@ -202,16 +236,13 @@
     else:
         msg = mail.mimetextpatch(body, display=opts.get('test'))
 
-    flag = ' '.join(opts.get('flag'))
-    if flag:
-        flag = ' ' + flag
-
+    prefix = _formatprefix(ui, repo, rev, opts.get('flag'), idx, total,
+                           numbered)
     subj = desc[0].strip().rstrip('. ')
     if not numbered:
-        subj = '[PATCH%s] %s' % (flag, opts.get('subject') or subj)
+        subj = ' '.join([prefix, opts.get('subject') or subj])
     else:
-        tlen = len(str(total))
-        subj = '[PATCH %0*d of %d%s] %s' % (tlen, idx, total, flag, subj)
+        subj = ' '.join([prefix, subj])
     msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
     msg['X-Mercurial-Node'] = node
     msg['X-Mercurial-Series-Index'] = '%i' % idx
@@ -303,19 +334,16 @@
     msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
     return [(msg, subj, None)]
 
-def _makeintro(repo, sender, patches, **opts):
+def _makeintro(repo, sender, revs, patches, **opts):
     """make an introduction email, asking the user for content if needed
 
     email is returned as (subject, body, cumulative-diffstat)"""
     ui = repo.ui
     _charsets = mail._charsets(ui)
-    tlen = len(str(len(patches)))
 
-    flag = opts.get('flag') or ''
-    if flag:
-        flag = ' ' + ' '.join(flag)
-    prefix = '[PATCH %0*d of %d%s]' % (tlen, 0, len(patches), flag)
-
+    # use the last revision which is likely to be a bookmarked head
+    prefix = _formatprefix(ui, repo, revs.last(), opts.get('flag'),
+                           0, len(patches), numbered=True)
     subj = (opts.get('subject') or
             prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
     if not subj:
@@ -337,7 +365,7 @@
                                      opts.get('test'))
     return (msg, subj, diffstat)
 
-def _getpatchmsgs(repo, sender, patches, patchnames=None, **opts):
+def _getpatchmsgs(repo, sender, revs, patchnames=None, **opts):
     """return a list of emails from a list of patches
 
     This involves introduction message creation if necessary.
@@ -346,6 +374,7 @@
     """
     ui = repo.ui
     _charsets = mail._charsets(ui)
+    patches = list(_getpatches(repo, revs, **opts))
     msgs = []
 
     ui.write(_('this patch series consists of %d patches.\n\n')
@@ -353,7 +382,7 @@
 
     # build the intro message, or skip it if the user declines
     if introwanted(ui, opts, len(patches)):
-        msg = _makeintro(repo, sender, patches, **opts)
+        msg = _makeintro(repo, sender, revs, patches, **opts)
         if msg:
             msgs.append(msg)
 
@@ -362,10 +391,11 @@
 
     # now generate the actual patch messages
     name = None
-    for i, p in enumerate(patches):
+    assert len(revs) == len(patches)
+    for i, (r, p) in enumerate(zip(revs, patches)):
         if patchnames:
             name = patchnames[i]
-        msg = makepatch(ui, repo, p, opts, _charsets, i + 1,
+        msg = makepatch(ui, repo, r, p, opts, _charsets, i + 1,
                         len(patches), numbered, name)
         msgs.append(msg)
 
@@ -467,9 +497,7 @@
     With -n/--test, all steps will run, but mail will not be sent.
     You will be prompted for an email recipient address, a subject and
     an introductory message describing the patches of your patchbomb.
-    Then when all is done, patchbomb messages are displayed. If the
-    PAGER environment variable is set, your pager will be fired up once
-    for each patchbomb message, so you can verify everything is alright.
+    Then when all is done, patchbomb messages are displayed.
 
     In case email sending fails, you will find a backup of your series
     introductory message in ``.hg/last-email.txt``.
@@ -511,14 +539,12 @@
     mbox = opts.get('mbox')
     outgoing = opts.get('outgoing')
     rev = opts.get('rev')
-    # internal option used by pbranches
-    patches = opts.get('patches')
 
     if not (opts.get('test') or mbox):
         # really sending
         mail.validateconfig(ui)
 
-    if not (revs or rev or outgoing or bundle or patches):
+    if not (revs or rev or outgoing or bundle):
         raise error.Abort(_('specify at least one changeset with -r or -o'))
 
     if outgoing and bundle:
@@ -590,17 +616,13 @@
               ui.config('patchbomb', 'from') or
               prompt(ui, 'From', ui.username()))
 
-    if patches:
-        msgs = _getpatchmsgs(repo, sender, patches, opts.get('patchnames'),
-                             **opts)
-    elif bundle:
+    if bundle:
         bundledata = _getbundle(repo, dest, **opts)
         bundleopts = opts.copy()
         bundleopts.pop('bundle', None)  # already processed
         msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts)
     else:
-        _patches = list(_getpatches(repo, revs, **opts))
-        msgs = _getpatchmsgs(repo, sender, _patches, **opts)
+        msgs = _getpatchmsgs(repo, sender, revs, **opts)
 
     showaddrs = []
 
@@ -693,20 +715,14 @@
             m['Reply-To'] = ', '.join(replyto)
         if opts.get('test'):
             ui.status(_('displaying '), subj, ' ...\n')
-            ui.flush()
-            if 'PAGER' in encoding.environ and not ui.plain():
-                fp = util.popen(encoding.environ['PAGER'], 'w')
-            else:
-                fp = ui
-            generator = emailmod.Generator.Generator(fp, mangle_from_=False)
+            ui.pager('email')
+            generator = emailmod.Generator.Generator(ui, mangle_from_=False)
             try:
                 generator.flatten(m, 0)
-                fp.write('\n')
+                ui.write('\n')
             except IOError as inst:
                 if inst.errno != errno.EPIPE:
                     raise
-            if fp is not ui:
-                fp.close()
         else:
             if not sendmail:
                 sendmail = mail.connect(ui, mbox=mbox)
--- a/hgext/rebase.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/rebase.py	Tue Apr 18 12:24:34 2017 -0400
@@ -47,6 +47,7 @@
     repoview,
     revset,
     scmutil,
+    smartset,
     util,
 )
 
@@ -118,8 +119,8 @@
     # i18n: "_rebasedefaultdest" is a keyword
     sourceset = None
     if x is not None:
-        sourceset = revset.getset(repo, revset.fullreposet(repo), x)
-    return subset & revset.baseset([_destrebase(repo, sourceset)])
+        sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
+    return subset & smartset.baseset([_destrebase(repo, sourceset)])
 
 class rebaseruntime(object):
     """This class is a container for rebase runtime state"""
@@ -158,6 +159,37 @@
         self.keepopen = opts.get('keepopen', False)
         self.obsoletenotrebased = {}
 
+    def storestatus(self, tr=None):
+        """Store the current status to allow recovery"""
+        if tr:
+            tr.addfilegenerator('rebasestate', ('rebasestate',),
+                                self._writestatus, location='plain')
+        else:
+            with self.repo.vfs("rebasestate", "w") as f:
+                self._writestatus(f)
+
+    def _writestatus(self, f):
+        repo = self.repo.unfiltered()
+        f.write(repo[self.originalwd].hex() + '\n')
+        f.write(repo[self.target].hex() + '\n')
+        f.write(repo[self.external].hex() + '\n')
+        f.write('%d\n' % int(self.collapsef))
+        f.write('%d\n' % int(self.keepf))
+        f.write('%d\n' % int(self.keepbranchesf))
+        f.write('%s\n' % (self.activebookmark or ''))
+        for d, v in self.state.iteritems():
+            oldrev = repo[d].hex()
+            if v >= 0:
+                newrev = repo[v].hex()
+            elif v == revtodo:
+                # To maintain format compatibility, we have to use nullid.
+                # Please do remove this special case when upgrading the format.
+                newrev = hex(nullid)
+            else:
+                newrev = v
+            f.write("%s:%s\n" % (oldrev, newrev))
+        repo.ui.debug('rebase status stored\n')
+
     def restorestatus(self):
         """Restore a previously stored status"""
         repo = self.repo
@@ -218,7 +250,7 @@
         repo.ui.debug('computed skipped revs: %s\n' %
                         (' '.join(str(r) for r in sorted(skipped)) or None))
         repo.ui.debug('rebase status resumed\n')
-        _setrebasesetvisibility(repo, state.keys())
+        _setrebasesetvisibility(repo, set(state.keys()) | set([originalwd]))
 
         self.originalwd = originalwd
         self.target = target
@@ -251,7 +283,7 @@
     def _prepareabortorcontinue(self, isabort):
         try:
             self.restorestatus()
-            self.collapsemsg = restorecollapsemsg(self.repo)
+            self.collapsemsg = restorecollapsemsg(self.repo, isabort)
         except error.RepoLookupError:
             if isabort:
                 clearstatus(self.repo)
@@ -294,11 +326,11 @@
             self.ui.status(_('nothing to rebase\n'))
             return _nothingtorebase()
 
-        root = min(rebaseset)
-        if not self.keepf and not self.repo[root].mutable():
-            raise error.Abort(_("can't rebase public changeset %s")
-                             % self.repo[root],
-                             hint=_("see 'hg help phases' for details"))
+        for root in self.repo.set('roots(%ld)', rebaseset):
+            if not self.keepf and not root.mutable():
+                raise error.Abort(_("can't rebase public changeset %s")
+                                  % root,
+                                  hint=_("see 'hg help phases' for details"))
 
         (self.originalwd, self.target, self.state) = result
         if self.collapsef:
@@ -311,7 +343,7 @@
         if dest.closesbranch() and not self.keepbranchesf:
             self.ui.status(_('reopening closed branch head %s\n') % dest)
 
-    def _performrebase(self):
+    def _performrebase(self, tr):
         repo, ui, opts = self.repo, self.ui, self.opts
         if self.keepbranchesf:
             # insert _savebranch at the start of extrafns so if
@@ -337,6 +369,10 @@
         if self.activebookmark:
             bookmarks.deactivate(repo)
 
+        # Store the state before we begin so users can run 'hg rebase --abort'
+        # if we fail before the transaction closes.
+        self.storestatus()
+
         sortedrevs = repo.revs('sort(%ld, -topo)', self.state)
         cands = [k for k, v in self.state.iteritems() if v == revtodo]
         total = len(cands)
@@ -357,10 +393,7 @@
                                              self.state,
                                              self.targetancestors,
                                              self.obsoletenotrebased)
-                storestatus(repo, self.originalwd, self.target,
-                            self.state, self.collapsef, self.keepf,
-                            self.keepbranchesf, self.external,
-                            self.activebookmark)
+                self.storestatus(tr=tr)
                 storecollapsemsg(repo, self.collapsemsg)
                 if len(repo[None].parents()) == 2:
                     repo.ui.debug('resuming interrupted rebase\n')
@@ -442,12 +475,24 @@
                 editopt = True
             editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
             revtoreuse = max(self.state)
-            newnode = concludenode(repo, revtoreuse, p1, self.external,
-                                   commitmsg=commitmsg,
-                                   extrafn=_makeextrafn(self.extrafns),
-                                   editor=editor,
-                                   keepbranches=self.keepbranchesf,
-                                   date=self.date)
+            dsguard = dirstateguard.dirstateguard(repo, 'rebase')
+            try:
+                newnode = concludenode(repo, revtoreuse, p1, self.external,
+                                       commitmsg=commitmsg,
+                                       extrafn=_makeextrafn(self.extrafns),
+                                       editor=editor,
+                                       keepbranches=self.keepbranchesf,
+                                       date=self.date)
+                dsguard.close()
+                release(dsguard)
+            except error.InterventionRequired:
+                dsguard.close()
+                release(dsguard)
+                raise
+            except Exception:
+                release(dsguard)
+                raise
+
             if newnode is None:
                 newrev = self.target
             else:
@@ -617,6 +662,16 @@
 
           hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
 
+    Configuration Options:
+
+    You can make rebase require a destination if you set the following config
+    option:
+
+      [commands]
+      rebase.requiredest = False
+
+    Return Values:
+
     Returns 0 on success, 1 if nothing to rebase or there are
     unresolved conflicts.
 
@@ -678,15 +733,31 @@
             if retcode is not None:
                 return retcode
 
-        rbsrt._performrebase()
+        with repo.transaction('rebase') as tr:
+            dsguard = dirstateguard.dirstateguard(repo, 'rebase')
+            try:
+                rbsrt._performrebase(tr)
+                dsguard.close()
+                release(dsguard)
+            except error.InterventionRequired:
+                dsguard.close()
+                release(dsguard)
+                tr.close()
+                raise
+            except Exception:
+                release(dsguard)
+                raise
         rbsrt._finishrebase()
     finally:
         release(lock, wlock)
 
-def _definesets(ui, repo, destf=None, srcf=None, basef=None, revf=[],
+def _definesets(ui, repo, destf=None, srcf=None, basef=None, revf=None,
                 destspace=None):
     """use revisions argument to define destination and rebase set
     """
+    if revf is None:
+        revf = []
+
     # destspace is here to work around issues with `hg pull --rebase` see
     # issue5214 for details
     if srcf and basef:
@@ -699,6 +770,10 @@
     cmdutil.checkunfinished(repo)
     cmdutil.bailifchanged(repo)
 
+    if ui.configbool('commands', 'rebase.requiredest') and not destf:
+        raise error.Abort(_('you must specify a destination'),
+                          hint=_('use: hg rebase -d REV'))
+
     if destf:
         dest = scmutil.revsingle(repo, destf)
 
@@ -799,36 +874,28 @@
     '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
     but also store useful information in extra.
     Return node of committed revision.'''
-    dsguard = dirstateguard.dirstateguard(repo, 'rebase')
-    try:
-        repo.setparents(repo[p1].node(), repo[p2].node())
-        ctx = repo[rev]
-        if commitmsg is None:
-            commitmsg = ctx.description()
-        keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
-        extra = {'rebase_source': ctx.hex()}
-        if extrafn:
-            extrafn(ctx, extra)
+    repo.setparents(repo[p1].node(), repo[p2].node())
+    ctx = repo[rev]
+    if commitmsg is None:
+        commitmsg = ctx.description()
+    keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
+    extra = {'rebase_source': ctx.hex()}
+    if extrafn:
+        extrafn(ctx, extra)
 
-        backup = repo.ui.backupconfig('phases', 'new-commit')
-        try:
-            targetphase = max(ctx.phase(), phases.draft)
-            repo.ui.setconfig('phases', 'new-commit', targetphase, 'rebase')
-            if keepbranch:
-                repo.ui.setconfig('ui', 'allowemptycommit', True)
-            # Commit might fail if unresolved files exist
-            if date is None:
-                date = ctx.date()
-            newnode = repo.commit(text=commitmsg, user=ctx.user(),
-                                  date=date, extra=extra, editor=editor)
-        finally:
-            repo.ui.restoreconfig(backup)
+    targetphase = max(ctx.phase(), phases.draft)
+    overrides = {('phases', 'new-commit'): targetphase}
+    with repo.ui.configoverride(overrides, 'rebase'):
+        if keepbranch:
+            repo.ui.setconfig('ui', 'allowemptycommit', True)
+        # Commit might fail if unresolved files exist
+        if date is None:
+            date = ctx.date()
+        newnode = repo.commit(text=commitmsg, user=ctx.user(),
+                              date=date, extra=extra, editor=editor)
 
-        repo.dirstate.setbranch(repo[newnode].branch())
-        dsguard.close()
-        return newnode
-    finally:
-        release(dsguard)
+    repo.dirstate.setbranch(repo[newnode].branch())
+    return newnode
 
 def rebasenode(repo, rev, p1, base, state, collapse, target):
     'Rebase a single revision rev on top of p1 using base as merge ancestor'
@@ -1061,9 +1128,9 @@
 
 def clearcollapsemsg(repo):
     'Remove collapse message file'
-    util.unlinkpath(repo.join("last-message.txt"), ignoremissing=True)
+    repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
 
-def restorecollapsemsg(repo):
+def restorecollapsemsg(repo, isabort):
     'Restore previously stored collapse message'
     try:
         f = repo.vfs("last-message.txt")
@@ -1072,38 +1139,17 @@
     except IOError as err:
         if err.errno != errno.ENOENT:
             raise
-        raise error.Abort(_('no rebase in progress'))
+        if isabort:
+            # Oh well, just abort like normal
+            collapsemsg = ''
+        else:
+            raise error.Abort(_('missing .hg/last-message.txt for rebase'))
     return collapsemsg
 
-def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
-                external, activebookmark):
-    'Store the current status to allow recovery'
-    f = repo.vfs("rebasestate", "w")
-    f.write(repo[originalwd].hex() + '\n')
-    f.write(repo[target].hex() + '\n')
-    f.write(repo[external].hex() + '\n')
-    f.write('%d\n' % int(collapse))
-    f.write('%d\n' % int(keep))
-    f.write('%d\n' % int(keepbranches))
-    f.write('%s\n' % (activebookmark or ''))
-    for d, v in state.iteritems():
-        oldrev = repo[d].hex()
-        if v >= 0:
-            newrev = repo[v].hex()
-        elif v == revtodo:
-            # To maintain format compatibility, we have to use nullid.
-            # Please do remove this special case when upgrading the format.
-            newrev = hex(nullid)
-        else:
-            newrev = v
-        f.write("%s:%s\n" % (oldrev, newrev))
-    f.close()
-    repo.ui.debug('rebase status stored\n')
-
 def clearstatus(repo):
     'Remove the status files'
     _clearrebasesetvisibiliy(repo)
-    util.unlinkpath(repo.join("rebasestate"), ignoremissing=True)
+    repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
 
 def needupdate(repo, state):
     '''check whether we should `update --clean` away from a merge, or if
@@ -1155,8 +1201,11 @@
             if rebased:
                 strippoints = [
                         c.node() for c in repo.set('roots(%ld)', rebased)]
-                shouldupdate = len([
-                        c.node() for c in repo.set('. & (%ld)', rebased)]) > 0
+
+            updateifonnodes = set(rebased)
+            updateifonnodes.add(target)
+            updateifonnodes.add(originalwd)
+            shouldupdate = repo['.'].rev() in updateifonnodes
 
             # Update away from the rebase if necessary
             if shouldupdate or needupdate(repo, state):
@@ -1183,7 +1232,8 @@
     dest: context
     rebaseset: set of rev
     '''
-    _setrebasesetvisibility(repo, rebaseset)
+    originalwd = repo['.'].rev()
+    _setrebasesetvisibility(repo, set(rebaseset) | set([originalwd]))
 
     # This check isn't strictly necessary, since mq detects commits over an
     # applied patch. But it prevents messing up the working directory when
@@ -1203,7 +1253,12 @@
         if commonbase == root:
             raise error.Abort(_('source is ancestor of destination'))
         if commonbase == dest:
-            samebranch = root.branch() == dest.branch()
+            wctx = repo[None]
+            if dest == wctx.p1():
+                # when rebasing to '.', it will use the current wd branch name
+                samebranch = root.branch() == wctx.branch()
+            else:
+                samebranch = root.branch() == dest.branch()
             if not collapse and samebranch and root in dest.children():
                 repo.ui.debug('source is a child of destination\n')
                 return None
@@ -1268,7 +1323,7 @@
             state[r] = revpruned
         else:
             state[r] = revprecursor
-    return repo['.'].rev(), dest.rev(), state
+    return originalwd, dest.rev(), state
 
 def clearrebased(ui, repo, state, skipped, collapsedas=None):
     """dispose of rebased revision at the end of the rebase
@@ -1307,6 +1362,11 @@
     'Call rebase after pull if the latter has been invoked with --rebase'
     ret = None
     if opts.get('rebase'):
+        if ui.configbool('commands', 'rebase.requiredest'):
+            msg = _('rebase destination required by configuration')
+            hint = _('use hg pull followed by hg rebase -d DEST')
+            raise error.Abort(msg, hint=hint)
+
         wlock = lock = None
         try:
             wlock = repo.wlock()
@@ -1367,9 +1427,8 @@
     """store the currently rebased set on the repo object
 
     This is used by another function to prevent rebased revision to because
-    hidden (see issue4505)"""
+    hidden (see issue4504)"""
     repo = repo.unfiltered()
-    revs = set(revs)
     repo._rebaseset = revs
     # invalidate cache if visibility changes
     hiddens = repo.filteredrevcache.get('visible', set())
@@ -1383,7 +1442,7 @@
         del repo._rebaseset
 
 def _rebasedvisible(orig, repo):
-    """ensure rebased revs stay visible (see issue4505)"""
+    """ensure rebased revs stay visible (see issue4504)"""
     blockers = orig(repo)
     blockers.update(getattr(repo, '_rebaseset', ()))
     return blockers
--- a/hgext/record.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/record.py	Tue Apr 18 12:24:34 2017 -0400
@@ -68,12 +68,9 @@
                          'commit')
 
     opts["interactive"] = True
-    backup = ui.backupconfig('experimental', 'crecord')
-    try:
-        ui.setconfig('experimental', 'crecord', False, 'record')
+    overrides = {('experimental', 'crecord'): False}
+    with ui.configoverride(overrides, 'record'):
         return commands.commit(ui, repo, *pats, **opts)
-    finally:
-        ui.restoreconfig(backup)
 
 def qrefresh(origfn, ui, repo, *pats, **opts):
     if not opts['interactive']:
@@ -117,13 +114,10 @@
         opts['checkname'] = False
         mq.new(ui, repo, patch, *pats, **opts)
 
-    backup = ui.backupconfig('experimental', 'crecord')
-    try:
-        ui.setconfig('experimental', 'crecord', False, 'record')
+    overrides = {('experimental', 'crecord'): False}
+    with ui.configoverride(overrides, 'record'):
         cmdutil.dorecord(ui, repo, committomq, cmdsuggest, False,
                          cmdutil.recordfilter, *pats, **opts)
-    finally:
-        ui.restoreconfig(backup)
 
 def qnew(origfn, ui, repo, patch, *args, **opts):
     if opts['interactive']:
--- a/hgext/schemes.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/schemes.py	Tue Apr 18 12:24:34 2017 -0400
@@ -63,6 +63,7 @@
 # leave the attribute unspecified.
 testedwith = 'ships-with-hg-core'
 
+_partre = re.compile(br'\{(\d+)\}')
 
 class ShortRepository(object):
     def __init__(self, url, scheme, templater):
@@ -70,7 +71,7 @@
         self.templater = templater
         self.url = url
         try:
-            self.parts = max(map(int, re.findall(r'\{(\d+)\}', self.url)))
+            self.parts = max(map(int, _partre.findall(self.url)))
         except ValueError:
             self.parts = 0
 
--- a/hgext/share.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/share.py	Tue Apr 18 12:24:34 2017 -0400
@@ -48,6 +48,7 @@
     error,
     extensions,
     hg,
+    txnutil,
     util,
 )
 
@@ -64,10 +65,14 @@
 
 @command('share',
     [('U', 'noupdate', None, _('do not create a working directory')),
-     ('B', 'bookmarks', None, _('also share bookmarks'))],
+     ('B', 'bookmarks', None, _('also share bookmarks')),
+     ('', 'relative', None, _('point to source using a relative path '
+                              '(EXPERIMENTAL)')),
+    ],
     _('[-U] [-B] SOURCE [DEST]'),
     norepo=True)
-def share(ui, source, dest=None, noupdate=False, bookmarks=False):
+def share(ui, source, dest=None, noupdate=False, bookmarks=False,
+          relative=False):
     """create a new shared repository
 
     Initialize a new repository and working directory that shares its
@@ -86,7 +91,7 @@
     """
 
     return hg.share(ui, source, dest=dest, update=not noupdate,
-                    bookmarks=bookmarks)
+                    bookmarks=bookmarks, relative=relative)
 
 @command('unshare', [], '')
 def unshare(ui, repo):
@@ -108,10 +113,11 @@
 
         destlock = hg.copystore(ui, repo, repo.path)
 
-        sharefile = repo.join('sharedpath')
+        sharefile = repo.vfs.join('sharedpath')
         util.rename(sharefile, sharefile + '.old')
 
-        repo.requirements.discard('sharedpath')
+        repo.requirements.discard('shared')
+        repo.requirements.discard('relshared')
         repo._writerequirements()
     finally:
         destlock and destlock.release()
@@ -171,7 +177,28 @@
     if _hassharedbookmarks(repo):
         srcrepo = _getsrcrepo(repo)
         if srcrepo is not None:
+            # just orig(srcrepo) doesn't work as expected, because
+            # HG_PENDING refers repo.root.
+            try:
+                fp, pending = txnutil.trypending(repo.root, repo.vfs,
+                                                 'bookmarks')
+                if pending:
+                    # only in this case, bookmark information in repo
+                    # is up-to-date.
+                    return fp
+                fp.close()
+            except IOError as inst:
+                if inst.errno != errno.ENOENT:
+                    raise
+
+            # otherwise, we should read bookmarks from srcrepo,
+            # because .hg/bookmarks in srcrepo might be already
+            # changed via another sharing repo
             repo = srcrepo
+
+            # TODO: Pending changes in repo are still invisible in
+            # srcrepo, because bookmarks.pending is written only into repo.
+            # See also https://www.mercurial-scm.org/wiki/SharedRepository
     return orig(repo)
 
 def recordchange(orig, self, tr):
--- a/hgext/shelve.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/shelve.py	Tue Apr 18 12:24:34 2017 -0400
@@ -28,6 +28,7 @@
 
 from mercurial.i18n import _
 from mercurial import (
+    bookmarks,
     bundle2,
     bundlerepo,
     changegroup,
@@ -46,6 +47,7 @@
     scmutil,
     templatefilters,
     util,
+    vfs as vfsmod,
 )
 
 from . import (
@@ -62,7 +64,7 @@
 
 backupdir = 'shelve-backup'
 shelvedir = 'shelved'
-shelvefileextensions = ['hg', 'patch']
+shelvefileextensions = ['hg', 'patch', 'oshelve']
 # universal extension is present in all types of shelves
 patchextension = 'patch'
 
@@ -78,8 +80,8 @@
     def __init__(self, repo, name, filetype=None):
         self.repo = repo
         self.name = name
-        self.vfs = scmutil.vfs(repo.join(shelvedir))
-        self.backupvfs = scmutil.vfs(repo.join(backupdir))
+        self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
+        self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
         self.ui = self.repo.ui
         if filetype:
             self.fname = name + '.' + filetype
@@ -153,6 +155,12 @@
         bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
                                 compression=compression)
 
+    def writeobsshelveinfo(self, info):
+        scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
+
+    def readobsshelveinfo(self):
+        return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
+
 class shelvedstate(object):
     """Handle persistence during unshelving operations.
 
@@ -163,6 +171,8 @@
     _filename = 'shelvedstate'
     _keep = 'keep'
     _nokeep = 'nokeep'
+    # colon is essential to differentiate from a real bookmark name
+    _noactivebook = ':no-active-bookmark'
 
     @classmethod
     def load(cls, repo):
@@ -177,9 +187,10 @@
             wctx = nodemod.bin(fp.readline().strip())
             pendingctx = nodemod.bin(fp.readline().strip())
             parents = [nodemod.bin(h) for h in fp.readline().split()]
-            stripnodes = [nodemod.bin(h) for h in fp.readline().split()]
+            nodestoremove = [nodemod.bin(h) for h in fp.readline().split()]
             branchtorestore = fp.readline().strip()
             keep = fp.readline().strip() == cls._keep
+            activebook = fp.readline().strip()
         except (ValueError, TypeError) as err:
             raise error.CorruptedState(str(err))
         finally:
@@ -191,17 +202,20 @@
             obj.wctx = repo[wctx]
             obj.pendingctx = repo[pendingctx]
             obj.parents = parents
-            obj.stripnodes = stripnodes
+            obj.nodestoremove = nodestoremove
             obj.branchtorestore = branchtorestore
             obj.keep = keep
+            obj.activebookmark = ''
+            if activebook != cls._noactivebook:
+                obj.activebookmark = activebook
         except error.RepoLookupError as err:
             raise error.CorruptedState(str(err))
 
         return obj
 
     @classmethod
-    def save(cls, repo, name, originalwctx, pendingctx, stripnodes,
-             branchtorestore, keep=False):
+    def save(cls, repo, name, originalwctx, pendingctx, nodestoremove,
+             branchtorestore, keep=False, activebook=''):
         fp = repo.vfs(cls._filename, 'wb')
         fp.write('%i\n' % cls._version)
         fp.write('%s\n' % name)
@@ -210,17 +224,18 @@
         fp.write('%s\n' %
                  ' '.join([nodemod.hex(p) for p in repo.dirstate.parents()]))
         fp.write('%s\n' %
-                 ' '.join([nodemod.hex(n) for n in stripnodes]))
+                 ' '.join([nodemod.hex(n) for n in nodestoremove]))
         fp.write('%s\n' % branchtorestore)
         fp.write('%s\n' % (cls._keep if keep else cls._nokeep))
+        fp.write('%s\n' % (activebook or cls._noactivebook))
         fp.close()
 
     @classmethod
     def clear(cls, repo):
-        util.unlinkpath(repo.join(cls._filename), ignoremissing=True)
+        repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
 
 def cleanupoldbackups(repo):
-    vfs = scmutil.vfs(repo.join(backupdir))
+    vfs = vfsmod.vfs(repo.vfs.join(backupdir))
     maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
     hgfiles = [f for f in vfs.listdir()
                if f.endswith('.' + patchextension)]
@@ -235,11 +250,17 @@
             continue
         base = f[:-(1 + len(patchextension))]
         for ext in shelvefileextensions:
-            try:
-                vfs.unlink(base + '.' + ext)
-            except OSError as err:
-                if err.errno != errno.ENOENT:
-                    raise
+            vfs.tryunlink(base + '.' + ext)
+
+def _backupactivebookmark(repo):
+    activebookmark = repo._activebookmark
+    if activebookmark:
+        bookmarks.deactivate(repo)
+    return activebookmark
+
+def _restoreactivebookmark(repo, mark):
+    if mark:
+        bookmarks.activate(repo, mark)
 
 def _aborttransaction(repo):
     '''Abort current transaction for shelve/unshelve, but keep dirstate
@@ -313,17 +334,16 @@
         hasmq = util.safehasattr(repo, 'mq')
         if hasmq:
             saved, repo.mq.checkapplied = repo.mq.checkapplied, False
-        backup = repo.ui.backupconfig('phases', 'new-commit')
+        overrides = {('phases', 'new-commit'): phases.secret}
         try:
-            repo.ui.setconfig('phases', 'new-commit', phases.secret)
             editor_ = False
             if editor:
                 editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
                                                   **opts)
-            return repo.commit(message, shelveuser, opts.get('date'), match,
-                               editor=editor_, extra=extra)
+            with repo.ui.configoverride(overrides):
+                return repo.commit(message, shelveuser, opts.get('date'),
+                                   match, editor=editor_, extra=extra)
         finally:
-            repo.ui.restoreconfig(backup)
             if hasmq:
                 repo.mq.checkapplied = saved
 
@@ -375,7 +395,7 @@
     if not opts.get('message'):
         opts['message'] = desc
 
-    lock = tr = None
+    lock = tr = activebookmark = None
     try:
         lock = repo.lock()
 
@@ -388,6 +408,7 @@
                           not opts.get('addremove', False))
 
         name = getshelvename(repo, parent, opts)
+        activebookmark = _backupactivebookmark(repo)
         extra = {}
         if includeunknown:
             _includeunknownfiles(repo, pats, opts, extra)
@@ -402,7 +423,8 @@
             node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
         else:
             node = cmdutil.dorecord(ui, repo, commitfunc, None,
-                                    False, cmdutil.recordfilter, *pats, **opts)
+                                    False, cmdutil.recordfilter, *pats,
+                                    **opts)
         if not node:
             _nothingtoshelvemessaging(ui, repo, pats, opts)
             return 1
@@ -418,6 +440,7 @@
 
         _finishshelve(repo)
     finally:
+        _restoreactivebookmark(repo, activebookmark)
         lockmod.release(tr, lock)
 
 def _isbareshelve(pats, opts):
@@ -485,6 +508,7 @@
     if not ui.plain():
         width = ui.termwidth()
     namelabel = 'shelve.newest'
+    ui.pager('shelve')
     for mtime, name in listshelves(repo):
         sname = util.split(name)[1]
         if pats and sname not in pats:
@@ -549,19 +573,17 @@
         try:
             checkparents(repo, state)
 
-            util.rename(repo.join('unshelverebasestate'),
-                        repo.join('rebasestate'))
+            repo.vfs.rename('unshelverebasestate', 'rebasestate')
             try:
                 rebase.rebase(ui, repo, **{
                     'abort' : True
                 })
             except Exception:
-                util.rename(repo.join('rebasestate'),
-                            repo.join('unshelverebasestate'))
+                repo.vfs.rename('rebasestate', 'unshelverebasestate')
                 raise
 
             mergefiles(ui, repo, state.wctx, state.pendingctx)
-            repair.strip(ui, repo, state.stripnodes, backup=False,
+            repair.strip(ui, repo, state.nodestoremove, backup=False,
                          topic='shelve')
         finally:
             shelvedstate.clear(repo)
@@ -570,9 +592,7 @@
 def mergefiles(ui, repo, wctx, shelvectx):
     """updates to wctx and merges the changes from shelvectx into the
     dirstate."""
-    oldquiet = ui.quiet
-    try:
-        ui.quiet = True
+    with ui.configoverride({('ui', 'quiet'): True}):
         hg.update(repo, wctx.node())
         files = []
         files.extend(shelvectx.files())
@@ -587,8 +607,6 @@
                        *pathtofiles(repo, files),
                        **{'no_backup': True})
         ui.popbuffer()
-    finally:
-        ui.quiet = oldquiet
 
 def restorebranch(ui, repo, branchtorestore):
     if branchtorestore and branchtorestore != repo.dirstate.branch():
@@ -617,15 +635,13 @@
                 _("unresolved conflicts, can't continue"),
                 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
 
-        util.rename(repo.join('unshelverebasestate'),
-                    repo.join('rebasestate'))
+        repo.vfs.rename('unshelverebasestate', 'rebasestate')
         try:
             rebase.rebase(ui, repo, **{
                 'continue' : True
             })
         except Exception:
-            util.rename(repo.join('rebasestate'),
-                        repo.join('unshelverebasestate'))
+            repo.vfs.rename('rebasestate', 'unshelverebasestate')
             raise
 
         shelvectx = repo['tip']
@@ -634,12 +650,14 @@
             shelvectx = state.pendingctx
         else:
             # only strip the shelvectx if the rebase produced it
-            state.stripnodes.append(shelvectx.node())
+            state.nodestoremove.append(shelvectx.node())
 
         mergefiles(ui, repo, state.wctx, shelvectx)
         restorebranch(ui, repo, state.branchtorestore)
 
-        repair.strip(ui, repo, state.stripnodes, backup=False, topic='shelve')
+        repair.strip(ui, repo, state.nodestoremove, backup=False,
+                     topic='shelve')
+        _restoreactivebookmark(repo, state.activebookmark)
         shelvedstate.clear(repo)
         unshelvecleanup(ui, repo, state.name, opts)
         ui.status(_("unshelve of '%s' complete\n") % state.name)
@@ -659,21 +677,21 @@
     tempopts = {}
     tempopts['message'] = "pending changes temporary commit"
     tempopts['date'] = opts.get('date')
-    ui.quiet = True
-    node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
+    with ui.configoverride({('ui', 'quiet'): True}):
+        node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
     tmpwctx = repo[node]
     return tmpwctx, addedbefore
 
-def _unshelverestorecommit(ui, repo, basename, oldquiet):
+def _unshelverestorecommit(ui, repo, basename):
     """Recreate commit in the repository during the unshelve"""
-    ui.quiet = True
-    shelvedfile(repo, basename, 'hg').applybundle()
-    shelvectx = repo['tip']
-    ui.quiet = oldquiet
+    with ui.configoverride({('ui', 'quiet'): True}):
+        shelvedfile(repo, basename, 'hg').applybundle()
+        shelvectx = repo['tip']
     return repo, shelvectx
 
 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
-                          tmpwctx, shelvectx, branchtorestore):
+                          tmpwctx, shelvectx, branchtorestore,
+                          activebookmark):
     """Rebase restored commit from its original location to a destination"""
     # If the shelve is not immediately on top of the commit
     # we'll be merging with, rebase it to be on top.
@@ -691,13 +709,12 @@
     except error.InterventionRequired:
         tr.close()
 
-        stripnodes = [repo.changelog.node(rev)
-                      for rev in xrange(oldtiprev, len(repo))]
-        shelvedstate.save(repo, basename, pctx, tmpwctx, stripnodes,
-                          branchtorestore, opts.get('keep'))
+        nodestoremove = [repo.changelog.node(rev)
+                         for rev in xrange(oldtiprev, len(repo))]
+        shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
+                          branchtorestore, opts.get('keep'), activebookmark)
 
-        util.rename(repo.join('rebasestate'),
-                    repo.join('unshelverebasestate'))
+        repo.vfs.rename('rebasestate', 'unshelverebasestate')
         raise error.InterventionRequired(
             _("unresolved conflicts (see 'hg resolve', then "
               "'hg unshelve --continue')"))
@@ -721,7 +738,8 @@
     toforget = (addedafter & shelveunknown) - addedbefore
     repo[None].forget(toforget)
 
-def _finishunshelve(repo, oldtiprev, tr):
+def _finishunshelve(repo, oldtiprev, tr, activebookmark):
+    _restoreactivebookmark(repo, activebookmark)
     # The transaction aborting will strip all the commits for us,
     # but it doesn't update the inmemory structures, so addchangegroup
     # hooks still fire and try to operate on the missing commits.
@@ -747,10 +765,12 @@
            _('continue an incomplete unshelve operation')),
           ('k', 'keep', None,
            _('keep shelve after unshelving')),
+          ('n', 'name', '',
+           _('restore shelved change with given name'), _('NAME')),
           ('t', 'tool', '', _('specify merge tool')),
           ('', 'date', '',
            _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
-         _('hg unshelve [SHELVED]'))
+         _('hg unshelve [[-n] SHELVED]'))
 def unshelve(ui, repo, *shelved, **opts):
     """restore a shelved change to the working directory
 
@@ -795,6 +815,9 @@
     continuef = opts.get('continue')
     if not abortf and not continuef:
         cmdutil.checkunfinished(repo)
+    shelved = list(shelved)
+    if opts.get("name"):
+        shelved.append(opts["name"])
 
     if abortf or continuef:
         if abortf and continuef:
@@ -846,13 +869,9 @@
     if not shelvedfile(repo, basename, patchextension).exists():
         raise error.Abort(_("shelved change '%s' not found") % basename)
 
-    oldquiet = ui.quiet
     lock = tr = None
-    forcemerge = ui.backupconfig('ui', 'forcemerge')
     try:
-        ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'unshelve')
         lock = repo.lock()
-
         tr = repo.transaction('unshelve', report=lambda x: None)
         oldtiprev = len(repo)
 
@@ -864,31 +883,32 @@
         # and shelvectx is the unshelved changes. Then we merge it all down
         # to the original pctx.
 
-        tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
-                                                         tmpwctx)
-
-        repo, shelvectx = _unshelverestorecommit(ui, repo, basename, oldquiet)
-        _checkunshelveuntrackedproblems(ui, repo, shelvectx)
-        branchtorestore = ''
-        if shelvectx.branch() != shelvectx.p1().branch():
-            branchtorestore = shelvectx.branch()
+        activebookmark = _backupactivebookmark(repo)
+        overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
+        with ui.configoverride(overrides, 'unshelve'):
+            tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
+                                                             tmpwctx)
+            repo, shelvectx = _unshelverestorecommit(ui, repo, basename)
+            _checkunshelveuntrackedproblems(ui, repo, shelvectx)
+            branchtorestore = ''
+            if shelvectx.branch() != shelvectx.p1().branch():
+                branchtorestore = shelvectx.branch()
 
-        shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
-                                          basename, pctx, tmpwctx, shelvectx,
-                                          branchtorestore)
-        mergefiles(ui, repo, pctx, shelvectx)
-        restorebranch(ui, repo, branchtorestore)
-        _forgetunknownfiles(repo, shelvectx, addedbefore)
+            shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
+                                              basename, pctx, tmpwctx,
+                                              shelvectx, branchtorestore,
+                                              activebookmark)
+            mergefiles(ui, repo, pctx, shelvectx)
+            restorebranch(ui, repo, branchtorestore)
+            _forgetunknownfiles(repo, shelvectx, addedbefore)
 
-        shelvedstate.clear(repo)
-        _finishunshelve(repo, oldtiprev, tr)
-        unshelvecleanup(ui, repo, basename, opts)
+            shelvedstate.clear(repo)
+            _finishunshelve(repo, oldtiprev, tr, activebookmark)
+            unshelvecleanup(ui, repo, basename, opts)
     finally:
-        ui.quiet = oldquiet
         if tr:
             tr.release()
         lockmod.release(lock)
-        ui.restoreconfig(forcemerge)
 
 @command('shelve',
          [('A', 'addremove', None,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/show.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,222 @@
+# show.py - Extension implementing `hg show`
+#
+# Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""unified command to show various repository information (EXPERIMENTAL)
+
+This extension provides the :hg:`show` command, which provides a central
+command for displaying commonly-accessed repository data and views of that
+data.
+"""
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial.node import nullrev
+from mercurial import (
+    cmdutil,
+    error,
+    formatter,
+    graphmod,
+    pycompat,
+    registrar,
+    revset,
+    revsetlang,
+)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+revsetpredicate = registrar.revsetpredicate()
+
+class showcmdfunc(registrar._funcregistrarbase):
+    """Register a function to be invoked for an `hg show <thing>`."""
+
+    # Used by _formatdoc().
+    _docformat = '%s -- %s'
+
+    def _extrasetup(self, name, func, fmtopic=None):
+        """Called with decorator arguments to register a show view.
+
+        ``name`` is the sub-command name.
+
+        ``func`` is the function being decorated.
+
+        ``fmtopic`` is the topic in the style that will be rendered for
+        this view.
+        """
+        func._fmtopic = fmtopic
+
+showview = showcmdfunc()
+
+@command('show', [
+    # TODO: Switch this template flag to use commands.formatteropts if
+    # 'hg show' becomes stable before --template/-T is stable. For now,
+    # we are putting it here without the '(EXPERIMENTAL)' flag because it
+    # is an important part of the 'hg show' user experience and the entire
+    # 'hg show' experience is experimental.
+    ('T', 'template', '', ('display with template'), _('TEMPLATE')),
+    ], _('VIEW'))
+def show(ui, repo, view=None, template=None):
+    """show various repository information
+
+    A requested view of repository data is displayed.
+
+    If no view is requested, the list of available views is shown and the
+    command aborts.
+
+    .. note::
+
+       There are no backwards compatibility guarantees for the output of this
+       command. Output may change in any future Mercurial release.
+
+       Consumers wanting stable command output should specify a template via
+       ``-T/--template``.
+
+    List of available views:
+    """
+    if ui.plain() and not template:
+        hint = _('invoke with -T/--template to control output format')
+        raise error.Abort(_('must specify a template in plain mode'), hint=hint)
+
+    views = showview._table
+
+    if not view:
+        ui.pager('show')
+        # TODO consider using formatter here so available views can be
+        # rendered to custom format.
+        ui.write(_('available views:\n'))
+        ui.write('\n')
+
+        for name, func in sorted(views.items()):
+            ui.write(('%s\n') % func.__doc__)
+
+        ui.write('\n')
+        raise error.Abort(_('no view requested'),
+                          hint=_('use "hg show VIEW" to choose a view'))
+
+    # TODO use same logic as dispatch to perform prefix matching.
+    if view not in views:
+        raise error.Abort(_('unknown view: %s') % view,
+                          hint=_('run "hg show" to see available views'))
+
+    template = template or 'show'
+    fmtopic = 'show%s' % views[view]._fmtopic
+
+    ui.pager('show')
+    with ui.formatter(fmtopic, {'template': template}) as fm:
+        return views[view](ui, repo, fm)
+
+@showview('bookmarks', fmtopic='bookmarks')
+def showbookmarks(ui, repo, fm):
+    """bookmarks and their associated changeset"""
+    marks = repo._bookmarks
+    if not len(marks):
+        # This is a bit hacky. Ideally, templates would have a way to
+        # specify an empty output, but we shouldn't corrupt JSON while
+        # waiting for this functionality.
+        if not isinstance(fm, formatter.jsonformatter):
+            ui.write(_('(no bookmarks set)\n'))
+        return
+
+    active = repo._activebookmark
+    longestname = max(len(b) for b in marks)
+    # TODO consider exposing longest shortest(node).
+
+    for bm, node in sorted(marks.items()):
+        fm.startitem()
+        fm.context(ctx=repo[node])
+        fm.write('bookmark', '%s', bm)
+        fm.write('node', fm.hexfunc(node), fm.hexfunc(node))
+        fm.data(active=bm == active,
+                longestbookmarklen=longestname)
+
+@revsetpredicate('_underway([commitage[, headage]])')
+def underwayrevset(repo, subset, x):
+    args = revset.getargsdict(x, 'underway', 'commitage headage')
+    if 'commitage' not in args:
+        args['commitage'] = None
+    if 'headage' not in args:
+        args['headage'] = None
+
+    # We assume callers of this revset add a topographical sort on the
+    # result. This means there is no benefit to making the revset lazy
+    # since the topographical sort needs to consume all revs.
+    #
+    # With this in mind, we build up the set manually instead of constructing
+    # a complex revset. This enables faster execution.
+
+    # Mutable changesets (non-public) are the most important changesets
+    # to return. ``not public()`` will also pull in obsolete changesets if
+    # there is a non-obsolete changeset with obsolete ancestors. This is
+    # why we exclude obsolete changesets from this query.
+    rs = 'not public() and not obsolete()'
+    rsargs = []
+    if args['commitage']:
+        rs += ' and date(%s)'
+        rsargs.append(revsetlang.getstring(args['commitage'],
+                                           _('commitage requires a string')))
+
+    mutable = repo.revs(rs, *rsargs)
+    relevant = revset.baseset(mutable)
+
+    # Add parents of mutable changesets to provide context.
+    relevant += repo.revs('parents(%ld)', mutable)
+
+    # We also pull in (public) heads if they a) aren't closing a branch
+    # b) are recent.
+    rs = 'head() and not closed()'
+    rsargs = []
+    if args['headage']:
+        rs += ' and date(%s)'
+        rsargs.append(revsetlang.getstring(args['headage'],
+                                           _('headage requires a string')))
+
+    relevant += repo.revs(rs, *rsargs)
+
+    # Add working directory parent.
+    wdirrev = repo['.'].rev()
+    if wdirrev != nullrev:
+        relevant += revset.baseset(set([wdirrev]))
+
+    return subset & relevant
+
+@showview('underway', fmtopic='underway')
+def showunderway(ui, repo, fm):
+    """changesets that aren't finished"""
+    # TODO support date-based limiting when calling revset.
+    revs = repo.revs('sort(_underway(), topo)')
+
+    revdag = graphmod.dagwalker(repo, revs)
+    displayer = cmdutil.changeset_templater(ui, repo, None, None,
+                                            tmpl=fm._t.load(fm._topic),
+                                            mapfile=None, buffered=True)
+
+    ui.setconfig('experimental', 'graphshorten', True)
+    cmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges)
+
+# Adjust the docstring of the show command so it shows all registered views.
+# This is a bit hacky because it runs at the end of module load. When moved
+# into core or when another extension wants to provide a view, we'll need
+# to do this more robustly.
+# TODO make this more robust.
+def _updatedocstring():
+    longest = max(map(len, showview._table.keys()))
+    entries = []
+    for key in sorted(showview._table.keys()):
+        entries.append(pycompat.sysstr('    %s   %s' % (
+            key.ljust(longest), showview._table[key]._origdoc)))
+
+    cmdtable['show'][0].__doc__ = pycompat.sysstr('%s\n\n%s\n    ') % (
+        cmdtable['show'][0].__doc__.rstrip(),
+        pycompat.sysstr('\n\n').join(entries))
+
+_updatedocstring()
--- a/hgext/transplant.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/transplant.py	Tue Apr 18 12:24:34 2017 -0400
@@ -28,11 +28,14 @@
     merge,
     node as nodemod,
     patch,
+    pycompat,
     registrar,
     revlog,
     revset,
     scmutil,
+    smartset,
     util,
+    vfs as vfsmod,
 )
 
 class TransplantError(error.Abort):
@@ -58,7 +61,7 @@
         self.opener = opener
 
         if not opener:
-            self.opener = scmutil.opener(self.path)
+            self.opener = vfsmod.vfs(self.path)
         self.transplants = {}
         self.dirty = False
         self.read()
@@ -100,8 +103,8 @@
 class transplanter(object):
     def __init__(self, ui, repo, opts):
         self.ui = ui
-        self.path = repo.join('transplant')
-        self.opener = scmutil.opener(self.path)
+        self.path = repo.vfs.join('transplant')
+        self.opener = vfsmod.vfs(self.path)
         self.transplants = transplants(self.path, 'transplants',
                                        opener=self.opener)
         def getcommiteditor():
@@ -197,7 +200,7 @@
                     patchfile = None
                 else:
                     fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
-                    fp = os.fdopen(fd, 'w')
+                    fp = os.fdopen(fd, pycompat.sysstr('w'))
                     gen = patch.diff(source, parent, node, opts=diffopts)
                     for chunk in gen:
                         fp.write(chunk)
@@ -245,7 +248,7 @@
         self.ui.status(_('filtering %s\n') % patchfile)
         user, date, msg = (changelog[1], changelog[2], changelog[4])
         fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
-        fp = os.fdopen(fd, 'w')
+        fp = os.fdopen(fd, pycompat.sysstr('w'))
         fp.write("# HG changeset patch\n")
         fp.write("# User %s\n" % user)
         fp.write("# Date %d %d\n" % date)
@@ -258,7 +261,8 @@
                            environ={'HGUSER': changelog[1],
                                     'HGREVISION': nodemod.hex(node),
                                     },
-                           onerr=error.Abort, errprefix=_('filter failed'))
+                           onerr=error.Abort, errprefix=_('filter failed'),
+                           blockedtag='transplant_filter')
             user, date, msg = self.parselog(file(headerfile))[1:4]
         finally:
             os.unlink(headerfile)
@@ -722,7 +726,7 @@
         s = revset.getset(repo, subset, x)
     else:
         s = subset
-    return revset.baseset([r for r in s if
+    return smartset.baseset([r for r in s if
         repo[r].extra().get('transplant_source')])
 
 templatekeyword = registrar.templatekeyword()
--- a/hgext/win32text.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/win32text.py	Tue Apr 18 12:24:34 2017 -0400
@@ -74,7 +74,7 @@
                   'and does not need EOL conversion by the win32text plugin.\n'
                   'Before your next commit, please reconsider your '
                   'encode/decode settings in \nMercurial.ini or %s.\n') %
-                (filename, newlinestr[newline], repo.join('hgrc')))
+                (filename, newlinestr[newline], repo.vfs.join('hgrc')))
 
 def dumbdecode(s, cmd, **kwargs):
     checknewline(s, '\r\n', **kwargs)
--- a/hgext/zeroconf/__init__.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/hgext/zeroconf/__init__.py	Tue Apr 18 12:24:34 2017 -0400
@@ -64,7 +64,9 @@
     # Generic method, sometimes gives useless results
     try:
         dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
-        if not dumbip.startswith('127.') and ':' not in dumbip:
+        if ':' in dumbip:
+            dumbip = '127.0.0.1'
+        if not dumbip.startswith('127.'):
             return dumbip
     except (socket.gaierror, socket.herror):
         dumbip = '127.0.0.1'
--- a/mercurial/__init__.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/__init__.py	Tue Apr 18 12:24:34 2017 -0400
@@ -68,7 +68,7 @@
                 # indicates the type of module. So just assume what we found
                 # is OK (even though it could be a pure Python module).
             except ImportError:
-                if modulepolicy == 'c':
+                if modulepolicy == b'c':
                     raise
                 zl = ziploader('mercurial', 'pure')
                 mod = zl.load_module(name)
@@ -106,7 +106,7 @@
                                   'version should exist' % name)
 
         except ImportError:
-            if modulepolicy == 'c':
+            if modulepolicy == b'c':
                 raise
 
             # Could not load the C extension and pure Python is allowed. So
@@ -137,6 +137,9 @@
             # Only handle Mercurial-related modules.
             if not fullname.startswith(('mercurial.', 'hgext.', 'hgext3rd.')):
                 return None
+            # zstd is already dual-version clean, don't try and mangle it
+            if fullname.startswith('mercurial.zstd'):
+                return None
 
             # This assumes Python 3 doesn't support loading C modules.
             if fullname in _dualmodules:
@@ -280,7 +283,8 @@
                     continue
                 r, c = t.start
                 l = (b'; from mercurial.pycompat import '
-                     b'delattr, getattr, hasattr, setattr, xrange\n')
+                     b'delattr, getattr, hasattr, setattr, xrange, '
+                     b'open, unicode\n')
                 for u in tokenize.tokenize(io.BytesIO(l).readline):
                     if u.type in (tokenize.ENCODING, token.ENDMARKER):
                         continue
@@ -307,17 +311,10 @@
                         if argidx is not None:
                             _ensureunicode(argidx)
 
-                # Bare open call (not an attribute on something else), the
-                # second argument (mode) must be a string, not bytes
-                elif fn == 'open' and not _isop(i - 1, '.'):
-                    arg1idx = _findargnofcall(1)
-                    if arg1idx is not None:
-                        _ensureunicode(arg1idx)
-
-                # It changes iteritems to items as iteritems is not
+                # It changes iteritems/values to items/values as they are not
                 # present in Python 3 world.
-                elif fn == 'iteritems':
-                    yield t._replace(string='items')
+                elif fn in ('iteritems', 'itervalues'):
+                    yield t._replace(string=fn[4:])
                     continue
 
             # Emit unmodified token.
@@ -327,7 +324,7 @@
     # ``replacetoken`` or any mechanism that changes semantics of module
     # loading is changed. Otherwise cached bytecode may get loaded without
     # the new transformation mechanisms applied.
-    BYTECODEHEADER = b'HG\x00\x06'
+    BYTECODEHEADER = b'HG\x00\x0a'
 
     class hgloader(importlib.machinery.SourceFileLoader):
         """Custom module loader that transforms source code.
--- a/mercurial/ancestor.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/ancestor.py	Tue Apr 18 12:24:34 2017 -0400
@@ -296,6 +296,8 @@
         except StopIteration:
             return False
 
+    __bool__ = __nonzero__
+
     def __iter__(self):
         """Generate the ancestors of _initrevs in reverse topological order.
 
--- a/mercurial/archival.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/archival.py	Tue Apr 18 12:24:34 2017 -0400
@@ -22,8 +22,8 @@
     encoding,
     error,
     match as matchmod,
-    scmutil,
     util,
+    vfs as vfsmod,
 )
 stringio = util.stringio
 
@@ -249,7 +249,7 @@
 
     def __init__(self, name, mtime):
         self.basedir = name
-        self.opener = scmutil.opener(self.basedir)
+        self.opener = vfsmod.vfs(self.basedir)
 
     def addfile(self, name, mode, islink, data):
         if islink:
@@ -331,7 +331,7 @@
         for subpath in sorted(ctx.substate):
             sub = ctx.workingsub(subpath)
             submatch = matchmod.subdirmatcher(subpath, matchfn)
-            total += sub.archive(archiver, prefix, submatch)
+            total += sub.archive(archiver, prefix, submatch, decode)
 
     if total == 0:
         raise error.Abort(_('no files match the archive pattern'))
--- a/mercurial/bdiff_module.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/bdiff_module.c	Tue Apr 18 12:24:34 2017 -0400
@@ -158,7 +158,7 @@
 	r = PyBytes_AsString(s);
 	rlen = PyBytes_Size(s);
 
-	w = (char *)malloc(rlen ? rlen : 1);
+	w = (char *)PyMem_Malloc(rlen ? rlen : 1);
 	if (!w)
 		goto nomem;
 
@@ -178,7 +178,7 @@
 	result = PyBytes_FromStringAndSize(w, wlen);
 
 nomem:
-	free(w);
+	PyMem_Free(w);
 	return result ? result : PyErr_NoMemory();
 }
 
--- a/mercurial/bookmarks.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/bookmarks.py	Tue Apr 18 12:24:34 2017 -0400
@@ -19,6 +19,7 @@
     error,
     lock as lockmod,
     obsolete,
+    txnutil,
     util,
 )
 
@@ -29,17 +30,8 @@
     bookmarks or the committed ones. Other extensions (like share)
     may need to tweak this behavior further.
     """
-    bkfile = None
-    if 'HG_PENDING' in encoding.environ:
-        try:
-            bkfile = repo.vfs('bookmarks.pending')
-        except IOError as inst:
-            if inst.errno != errno.ENOENT:
-                raise
-    if bkfile is None:
-        bkfile = repo.vfs('bookmarks')
-    return bkfile
-
+    fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks')
+    return fp
 
 class bmstore(dict):
     """Storage for bookmarks.
@@ -139,11 +131,7 @@
                 finally:
                     f.close()
             else:
-                try:
-                    self._repo.vfs.unlink('bookmarks.current')
-                except OSError as inst:
-                    if inst.errno != errno.ENOENT:
-                        raise
+                self._repo.vfs.tryunlink('bookmarks.current')
         self._aclean = True
 
     def _write(self, fp):
--- a/mercurial/branchmap.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/branchmap.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,9 +7,7 @@
 
 from __future__ import absolute_import
 
-import array
 import struct
-import time
 
 from .node import (
     bin,
@@ -21,12 +19,12 @@
     encoding,
     error,
     scmutil,
+    util,
 )
 
-array = array.array
 calcsize = struct.calcsize
-pack = struct.pack
-unpack = struct.unpack
+pack_into = struct.pack_into
+unpack_from = struct.unpack_from
 
 def _filename(repo):
     """name of a branchcache file for a given repo or repoview"""
@@ -233,7 +231,7 @@
     def write(self, repo):
         try:
             f = repo.vfs(_filename(repo), "w", atomictemp=True)
-            cachekey = [hex(self.tipnode), str(self.tiprev)]
+            cachekey = [hex(self.tipnode), '%d' % self.tiprev]
             if self.filteredhash is not None:
                 cachekey.append(hex(self.filteredhash))
             f.write(" ".join(cachekey) + '\n')
@@ -261,7 +259,7 @@
         missing heads, and a generator of nodes that are strictly a superset of
         heads missing, this function updates self to be correct.
         """
-        starttime = time.time()
+        starttime = util.timer()
         cl = repo.changelog
         # collect new branch entries
         newbranches = {}
@@ -314,7 +312,7 @@
                     self.tiprev = tiprev
         self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
 
-        duration = time.time() - starttime
+        duration = util.timer() - starttime
         repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
                     repo.filtername, duration)
 
@@ -357,12 +355,14 @@
         assert repo.filtername is None
         self._repo = repo
         self._names = [] # branch names in local encoding with static index
-        self._rbcrevs = array('c') # structs of type _rbcrecfmt
+        self._rbcrevs = bytearray()
         self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
         try:
             bndata = repo.vfs.read(_rbcnames)
             self._rbcsnameslen = len(bndata) # for verification before writing
-            self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
+            if bndata:
+                self._names = [encoding.tolocal(bn)
+                               for bn in bndata.split('\0')]
         except (IOError, OSError):
             if readonly:
                 # don't try to use cache - fall back to the slow path
@@ -371,7 +371,7 @@
         if self._names:
             try:
                 data = repo.vfs.read(_rbcrevs)
-                self._rbcrevs.fromstring(data)
+                self._rbcrevs[:] = data
             except (IOError, OSError) as inst:
                 repo.ui.debug("couldn't read revision branch cache: %s\n" %
                               inst)
@@ -390,8 +390,7 @@
         self._rbcnamescount = 0
         self._namesreverse.clear()
         self._rbcrevslen = len(self._repo.changelog)
-        self._rbcrevs = array('c')
-        self._rbcrevs.fromstring('\0' * (self._rbcrevslen * _rbcrecsize))
+        self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
 
     def branchinfo(self, rev):
         """Return branch name and close flag for rev, using and updating
@@ -409,8 +408,8 @@
 
         # fast path: extract data from cache, use it if node is matching
         reponode = changelog.node(rev)[:_rbcnodelen]
-        cachenode, branchidx = unpack(
-            _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
+        cachenode, branchidx = unpack_from(
+            _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
         close = bool(branchidx & _rbccloseflag)
         if close:
             branchidx &= _rbcbranchidxmask
@@ -427,7 +426,7 @@
         else:
             # rev/node map has changed, invalidate the cache from here up
             self._repo.ui.debug("history modification detected - truncating "
-                "revision branch cache to revision %s\n" % rev)
+                "revision branch cache to revision %d\n" % rev)
             truncate = rbcrevidx + _rbcrecsize
             del self._rbcrevs[truncate:]
             self._rbcrevslen = min(self._rbcrevslen, truncate)
@@ -453,14 +452,14 @@
 
     def _setcachedata(self, rev, node, branchidx):
         """Writes the node's branch data to the in-memory cache data."""
+        if rev == nullrev:
+            return
         rbcrevidx = rev * _rbcrecsize
-        rec = array('c')
-        rec.fromstring(pack(_rbcrecfmt, node, branchidx))
         if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
             self._rbcrevs.extend('\0' *
                                  (len(self._repo.changelog) * _rbcrecsize -
                                   len(self._rbcrevs)))
-        self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
+        pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
         self._rbcrevslen = min(self._rbcrevslen, rev)
 
         tr = self._repo.currenttransaction()
@@ -504,7 +503,7 @@
                            len(self._rbcrevs) // _rbcrecsize)
                 f = repo.vfs.open(_rbcrevs, 'ab')
                 if f.tell() != start:
-                    repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
+                    repo.ui.debug("truncating %s to %d\n" % (_rbcrevs, start))
                     f.seek(start)
                     if f.tell() != start:
                         start = 0
--- a/mercurial/bundle2.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/bundle2.py	Tue Apr 18 12:24:34 2017 -0400
@@ -271,6 +271,8 @@
     def __nonzero__(self):
         return bool(self._sequences)
 
+    __bool__ = __nonzero__
+
 class bundleoperation(object):
     """an object that represents a single bundling process
 
@@ -320,9 +322,6 @@
     It iterates over each part then searches for and uses the proper handling
     code to process the part. Parts are processed in order.
 
-    This is very early version of this function that will be strongly reworked
-    before final usage.
-
     Unknown Mandatory part will abort the process.
 
     It is temporarily possible to provide a prebuilt bundleoperation to the
@@ -355,9 +354,19 @@
         for nbpart, part in iterparts:
             _processpart(op, part)
     except Exception as exc:
-        for nbpart, part in iterparts:
-            # consume the bundle content
-            part.seek(0, 2)
+        # Any exceptions seeking to the end of the bundle at this point are
+        # almost certainly related to the underlying stream being bad.
+        # And, chances are that the exception we're handling is related to
+        # getting in that bad state. So, we swallow the seeking error and
+        # re-raise the original error.
+        seekerror = False
+        try:
+            for nbpart, part in iterparts:
+                # consume the bundle content
+                part.seek(0, 2)
+        except Exception:
+            seekerror = True
+
         # Small hack to let caller code distinguish exceptions from bundle2
         # processing from processing the old format. This is mostly
         # needed to handle different return codes to unbundle according to the
@@ -371,7 +380,13 @@
             replycaps = op.reply.capabilities
         exc._replycaps = replycaps
         exc._bundle2salvagedoutput = salvaged
-        raise
+
+        # Re-raising from a variable loses the original stack. So only use
+        # that form if we need to.
+        if seekerror:
+            raise exc
+        else:
+            raise
     finally:
         repo.ui.debug('bundle2-input-bundle: %i parts total\n' % nbpart)
 
@@ -618,41 +633,27 @@
 
     def __init__(self, fp):
         self._fp = fp
-        self._seekable = (util.safehasattr(fp, 'seek') and
-                          util.safehasattr(fp, 'tell'))
 
     def _unpack(self, format):
-        """unpack this struct format from the stream"""
+        """unpack this struct format from the stream
+
+        This method is meant for internal usage by the bundle2 protocol only.
+        They directly manipulate the low level stream including bundle2 level
+        instruction.
+
+        Do not use it to implement higher-level logic or methods."""
         data = self._readexact(struct.calcsize(format))
         return _unpack(format, data)
 
     def _readexact(self, size):
-        """read exactly <size> bytes from the stream"""
-        return changegroup.readexactly(self._fp, size)
-
-    def seek(self, offset, whence=0):
-        """move the underlying file pointer"""
-        if self._seekable:
-            return self._fp.seek(offset, whence)
-        else:
-            raise NotImplementedError(_('File pointer is not seekable'))
+        """read exactly <size> bytes from the stream
 
-    def tell(self):
-        """return the file offset, or None if file is not seekable"""
-        if self._seekable:
-            try:
-                return self._fp.tell()
-            except IOError as e:
-                if e.errno == errno.ESPIPE:
-                    self._seekable = False
-                else:
-                    raise
-        return None
+        This method is meant for internal usage by the bundle2 protocol only.
+        They directly manipulate the low level stream including bundle2 level
+        instruction.
 
-    def close(self):
-        """close underlying file"""
-        if util.safehasattr(self._fp, 'close'):
-            return self._fp.close()
+        Do not use it to implement higher-level logic or methods."""
+        return changegroup.readexactly(self._fp, size)
 
 def getunbundler(ui, fp, magicstring=None):
     """return a valid unbundler object for a given magicstring"""
@@ -806,6 +807,11 @@
         self.params # load params
         return self._compressed
 
+    def close(self):
+        """close underlying file"""
+        if util.safehasattr(self._fp, 'close'):
+            return self._fp.close()
+
 formatmap = {'20': unbundle20}
 
 b2streamparamsmap = {}
@@ -856,7 +862,7 @@
         self._seenparams = set()
         for pname, __ in self._mandatoryparams + self._advisoryparams:
             if pname in self._seenparams:
-                raise RuntimeError('duplicated params: %s' % pname)
+                raise error.ProgrammingError('duplicated params: %s' % pname)
             self._seenparams.add(pname)
         # status of the part's generation:
         # - None: not started,
@@ -865,6 +871,11 @@
         self._generated = None
         self.mandatory = mandatory
 
+    def __repr__(self):
+        cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
+        return ('<%s object at %x; id: %s; type: %s; mandatory: %s>'
+                % (cls, id(self), self.id, self.type, self.mandatory))
+
     def copy(self):
         """return a copy of the part
 
@@ -896,6 +907,13 @@
         return tuple(self._advisoryparams)
 
     def addparam(self, name, value='', mandatory=True):
+        """add a parameter to the part
+
+        If 'mandatory' is set to True, the remote handler must claim support
+        for this parameter or the unbundling will be aborted.
+
+        The 'name' and 'value' cannot exceed 255 bytes each.
+        """
         if self._generated is not None:
             raise error.ReadOnlyPartError('part is being generated')
         if name in self._seenparams:
@@ -909,7 +927,7 @@
     # methods used to generates the bundle2 stream
     def getchunks(self, ui):
         if self._generated is not None:
-            raise RuntimeError('part can only be consumed once')
+            raise error.ProgrammingError('part can only be consumed once')
         self._generated = False
 
         if ui.debugflag:
@@ -1078,7 +1096,7 @@
 
     @property
     def repo(self):
-        raise RuntimeError('no repo access from stream interruption')
+        raise error.ProgrammingError('no repo access from stream interruption')
 
     def gettransaction(self):
         raise TransactionUnavailable('no repo access from stream interruption')
@@ -1088,6 +1106,8 @@
 
     def __init__(self, ui, header, fp):
         super(unbundlepart, self).__init__(fp)
+        self._seekable = (util.safehasattr(fp, 'seek') and
+                          util.safehasattr(fp, 'tell'))
         self.ui = ui
         # unbundle state attr
         self._headerdata = header
@@ -1135,11 +1155,11 @@
         '''seek to specified chunk and start yielding data'''
         if len(self._chunkindex) == 0:
             assert chunknum == 0, 'Must start with chunk 0'
-            self._chunkindex.append((0, super(unbundlepart, self).tell()))
+            self._chunkindex.append((0, self._tellfp()))
         else:
             assert chunknum < len(self._chunkindex), \
                    'Unknown chunk %d' % chunknum
-            super(unbundlepart, self).seek(self._chunkindex[chunknum][1])
+            self._seekfp(self._chunkindex[chunknum][1])
 
         pos = self._chunkindex[chunknum][0]
         payloadsize = self._unpack(_fpayloadsize)[0]
@@ -1157,8 +1177,7 @@
                 chunknum += 1
                 pos += payloadsize
                 if chunknum == len(self._chunkindex):
-                    self._chunkindex.append((pos,
-                                             super(unbundlepart, self).tell()))
+                    self._chunkindex.append((pos, self._tellfp()))
                 yield result
             payloadsize = self._unpack(_fpayloadsize)[0]
             indebug(self.ui, 'payload chunk size: %i' % payloadsize)
@@ -1251,6 +1270,37 @@
                 raise error.Abort(_('Seek failed\n'))
             self._pos = newpos
 
+    def _seekfp(self, offset, whence=0):
+        """move the underlying file pointer
+
+        This method is meant for internal usage by the bundle2 protocol only.
+        They directly manipulate the low level stream including bundle2 level
+        instruction.
+
+        Do not use it to implement higher-level logic or methods."""
+        if self._seekable:
+            return self._fp.seek(offset, whence)
+        else:
+            raise NotImplementedError(_('File pointer is not seekable'))
+
+    def _tellfp(self):
+        """return the file offset, or None if file is not seekable
+
+        This method is meant for internal usage by the bundle2 protocol only.
+        They directly manipulate the low level stream including bundle2 level
+        instruction.
+
+        Do not use it to implement higher-level logic or methods."""
+        if self._seekable:
+            try:
+                return self._fp.tell()
+            except IOError as e:
+                if e.errno == errno.ESPIPE:
+                    self._seekable = False
+                else:
+                    raise
+        return None
+
 # These are only the static capabilities.
 # Check the 'getrepocaps' function for the rest.
 capabilities = {'HG20': (),
--- a/mercurial/bundlerepo.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/bundlerepo.py	Tue Apr 18 12:24:34 2017 -0400
@@ -37,8 +37,8 @@
     phases,
     pycompat,
     revlog,
-    scmutil,
     util,
+    vfs as vfsmod,
 )
 
 class bundlerevlog(revlog.revlog):
@@ -50,7 +50,7 @@
         #
         # To differentiate a rev in the bundle from a rev in the revlog, we
         # check revision against repotiprev.
-        opener = scmutil.readonlyvfs(opener)
+        opener = vfsmod.readonlyvfs(opener)
         revlog.revlog.__init__(self, opener, indexfile)
         self.bundle = bundle
         n = len(self)
@@ -65,6 +65,7 @@
             cs = chunkdata['cs']
             deltabase = chunkdata['deltabase']
             delta = chunkdata['delta']
+            flags = chunkdata['flags']
 
             size = len(delta)
             start = bundle.tell() - size
@@ -87,7 +88,7 @@
 
             baserev = self.rev(deltabase)
             # start, size, full unc. size, base (unused), link, p1, p2, node
-            e = (revlog.offset_type(start, 0), size, -1, baserev, link,
+            e = (revlog.offset_type(start, flags), size, -1, baserev, link,
                  self.rev(p1), self.rev(p2), node)
             self.index.insert(-1, e)
             self.nodemap[node] = n
@@ -114,8 +115,8 @@
         elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
             return revlog.revlog.revdiff(self, rev1, rev2)
 
-        return mdiff.textdiff(self.revision(self.node(rev1)),
-                              self.revision(self.node(rev2)))
+        return mdiff.textdiff(self.revision(rev1, raw=True),
+                              self.revision(rev2, raw=True))
 
     def revision(self, nodeorrev, raw=False):
         """return an uncompressed revision of a given node or revision
@@ -131,35 +132,35 @@
         if node == nullid:
             return ""
 
-        text = None
+        rawtext = None
         chain = []
         iterrev = rev
         # reconstruct the revision if it is from a changegroup
         while iterrev > self.repotiprev:
             if self._cache and self._cache[1] == iterrev:
-                text = self._cache[2]
+                rawtext = self._cache[2]
                 break
             chain.append(iterrev)
             iterrev = self.index[iterrev][3]
-        if text is None:
-            text = self.baserevision(iterrev)
+        if rawtext is None:
+            rawtext = self.baserevision(iterrev)
 
         while chain:
             delta = self._chunk(chain.pop())
-            text = mdiff.patches(text, [delta])
+            rawtext = mdiff.patches(rawtext, [delta])
 
-        text, validatehash = self._processflags(text, self.flags(rev),
+        text, validatehash = self._processflags(rawtext, self.flags(rev),
                                                 'read', raw=raw)
         if validatehash:
             self.checkhash(text, node, rev=rev)
-        self._cache = (node, rev, text)
+        self._cache = (node, rev, rawtext)
         return text
 
     def baserevision(self, nodeorrev):
         # Revlog subclasses may override 'revision' method to modify format of
         # content retrieved from revlog. To use bundlerevlog with such class one
         # needs to override 'baserevision' and make more specific call here.
-        return revlog.revlog.revision(self, nodeorrev)
+        return revlog.revlog.revision(self, nodeorrev, raw=True)
 
     def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
         raise NotImplementedError
@@ -187,7 +188,7 @@
         oldfilter = self.filteredrevs
         try:
             self.filteredrevs = ()
-            return changelog.changelog.revision(self, nodeorrev)
+            return changelog.changelog.revision(self, nodeorrev, raw=True)
         finally:
             self.filteredrevs = oldfilter
 
@@ -209,9 +210,9 @@
             node = self.node(node)
 
         if node in self.fulltextcache:
-            result = self.fulltextcache[node].tostring()
+            result = '%s' % self.fulltextcache[node]
         else:
-            result = manifest.manifestrevlog.revision(self, nodeorrev)
+            result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
         return result
 
     def dirlog(self, d):
@@ -229,7 +230,7 @@
                               linkmapper)
 
     def baserevision(self, nodeorrev):
-        return filelog.filelog.revision(self, nodeorrev)
+        return filelog.filelog.revision(self, nodeorrev, raw=True)
 
 class bundlepeer(localrepo.localpeer):
     def canpush(self):
@@ -239,7 +240,7 @@
     def __init__(self, *args, **kwargs):
         super(bundlephasecache, self).__init__(*args, **kwargs)
         if util.safehasattr(self, 'opener'):
-            self.opener = scmutil.readonlyvfs(self.opener)
+            self.opener = vfsmod.readonlyvfs(self.opener)
 
     def write(self):
         raise NotImplementedError
@@ -272,7 +273,7 @@
                                             suffix=".hg10un")
             self.tempfile = temp
 
-            with os.fdopen(fdtemp, 'wb') as fptemp:
+            with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
                 fptemp.write(header)
                 while True:
                     chunk = read(2**18)
--- a/mercurial/changegroup.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/changegroup.py	Tue Apr 18 12:24:34 2017 -0400
@@ -26,6 +26,7 @@
     error,
     mdiff,
     phases,
+    pycompat,
     util,
 )
 
@@ -98,7 +99,7 @@
                 fh = open(filename, "wb", 131072)
         else:
             fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
-            fh = os.fdopen(fd, "wb")
+            fh = os.fdopen(fd, pycompat.sysstr("wb"))
         cleanup = filename
         for c in chunks:
             fh.write(c)
--- a/mercurial/changelog.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/changelog.py	Tue Apr 18 12:24:34 2017 -0400
@@ -32,7 +32,7 @@
     >>> s
     'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
     >>> res = _string_escape(s)
-    >>> s == res.decode('string_escape')
+    >>> s == util.unescapestr(res)
     True
     """
     # subset of the string_escape codec
@@ -57,7 +57,7 @@
                 l = l.replace('\\\\', '\\\\\n')
                 l = l.replace('\\0', '\0')
                 l = l.replace('\n', '')
-            k, v = l.decode('string_escape').split(':', 1)
+            k, v = util.unescapestr(l).split(':', 1)
             extra[k] = v
     return extra
 
@@ -120,7 +120,7 @@
         return ret
 
     def write(self, s):
-        self.data.append(str(s))
+        self.data.append(bytes(s))
         self.offset += len(s)
         self._end += len(s)
 
--- a/mercurial/chgserver.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/chgserver.py	Tue Apr 18 12:24:34 2017 -0400
@@ -31,13 +31,15 @@
 ::
 
   [chgserver]
-  idletimeout = 3600 # seconds, after which an idle server will exit
-  skiphash = False   # whether to skip config or env change checks
+  # how long (in seconds) should an idle chg server exit
+  idletimeout = 3600
+
+  # whether to skip config or env change checks
+  skiphash = False
 """
 
 from __future__ import absolute_import
 
-import errno
 import hashlib
 import inspect
 import os
@@ -176,30 +178,22 @@
             else:
                 self._csystem = csystem
 
-        def system(self, cmd, environ=None, cwd=None, onerr=None,
-                   errprefix=None):
+        def _runsystem(self, cmd, environ, cwd, out):
             # fallback to the original system method if the output needs to be
             # captured (to self._buffers), or the output stream is not stdout
             # (e.g. stderr, cStringIO), because the chg client is not aware of
             # these situations and will behave differently (write to stdout).
-            if (any(s[1] for s in self._bufferstates)
+            if (out is not self.fout
                 or not util.safehasattr(self.fout, 'fileno')
                 or self.fout.fileno() != util.stdout.fileno()):
-                return super(chgui, self).system(cmd, environ, cwd, onerr,
-                                                 errprefix)
+                return util.system(cmd, environ=environ, cwd=cwd, out=out)
             self.flush()
-            rc = self._csystem(cmd, util.shellenviron(environ), cwd)
-            if rc and onerr:
-                errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
-                                    util.explainexit(rc)[0])
-                if errprefix:
-                    errmsg = '%s: %s' % (errprefix, errmsg)
-                raise onerr(errmsg)
-            return rc
+            return self._csystem(cmd, util.shellenviron(environ), cwd)
 
-        def _runpager(self, cmd):
-            self._csystem(cmd, util.shellenviron(), type='pager',
+        def _runpager(self, cmd, env=None):
+            self._csystem(cmd, util.shellenviron(env), type='pager',
                           cmdtable={'attachio': attachio})
+            return True
 
     return chgui(srcui)
 
@@ -219,8 +213,8 @@
     # stolen from tortoisehg.util.copydynamicconfig()
     for section, name, value in srcui.walkconfig():
         source = srcui.configsource(section, name)
-        if ':' in source or source == '--config':
-            # path:line or command line
+        if ':' in source or source == '--config' or source.startswith('$'):
+            # path:line or command line, or environ
             continue
         newui.setconfig(section, name, value, source)
 
@@ -287,9 +281,9 @@
 
 _iochannels = [
     # server.ch, ui.fp, mode
-    ('cin', 'fin', 'rb'),
-    ('cout', 'fout', 'wb'),
-    ('cerr', 'ferr', 'wb'),
+    ('cin', 'fin', pycompat.sysstr('rb')),
+    ('cout', 'fout', pycompat.sysstr('wb')),
+    ('cerr', 'ferr', pycompat.sysstr('wb')),
 ]
 
 class chgcmdserver(commandserver.server):
@@ -549,11 +543,7 @@
         # remove another server's socket file. but that's okay
         # since that server will detect and exit automatically and
         # the client will start a new server on demand.
-        try:
-            os.unlink(self._realaddress)
-        except OSError as exc:
-            if exc.errno != errno.ENOENT:
-                raise
+        util.tryunlink(self._realaddress)
 
     def printbanner(self, address):
         # no "listening at" message should be printed to simulate hg behavior
--- a/mercurial/cmdutil.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/cmdutil.py	Tue Apr 18 12:24:34 2017 -0400
@@ -8,6 +8,7 @@
 from __future__ import absolute_import
 
 import errno
+import itertools
 import os
 import re
 import tempfile
@@ -26,14 +27,12 @@
     changelog,
     copies,
     crecord as crecordmod,
-    dirstateguard as dirstateguardmod,
     encoding,
     error,
     formatter,
     graphmod,
     lock as lockmod,
     match as matchmod,
-    mergeutil,
     obsolete,
     patch,
     pathutil,
@@ -43,9 +42,11 @@
     revlog,
     revset,
     scmutil,
+    smartset,
     templatekw,
     templater,
     util,
+    vfs as vfsmod,
 )
 stringio = util.stringio
 
@@ -201,7 +202,7 @@
                     newlyaddedandmodifiedfiles]
         backups = {}
         if tobackup:
-            backupdir = repo.join('record-backups')
+            backupdir = repo.vfs.join('record-backups')
             try:
                 os.mkdir(backupdir)
             except OSError as err:
@@ -584,7 +585,7 @@
             raise error.CommandError(cmd, _('invalid arguments'))
         if not os.path.isfile(file_):
             raise error.Abort(_("revlog '%s' not found") % file_)
-        r = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False),
+        r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
                           file_[:-2] + ".i")
     return r
 
@@ -728,7 +729,7 @@
                              dryrun=dryrun, cwd=cwd)
         if rename and not dryrun:
             if not after and srcexists and not samefile:
-                util.unlinkpath(repo.wjoin(abssrc))
+                repo.wvfs.unlinkpath(abssrc)
             wctx.forget([abssrc])
 
     # pat: ossep
@@ -971,20 +972,18 @@
                     editor = None
                 else:
                     editor = getcommiteditor(editform=editform, **opts)
-                allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
                 extra = {}
                 for idfunc in extrapreimport:
                     extrapreimportmap[idfunc](repo, extractdata, extra, opts)
-                try:
-                    if partial:
-                        repo.ui.setconfig('ui', 'allowemptycommit', True)
+                overrides = {}
+                if partial:
+                    overrides[('ui', 'allowemptycommit')] = True
+                with repo.ui.configoverride(overrides, 'import'):
                     n = repo.commit(message, user,
                                     date, match=m,
                                     editor=editor, extra=extra)
                     for idfunc in extrapostimport:
                         extrapostimportmap[idfunc](repo[n])
-                finally:
-                    repo.ui.restoreconfig(allowemptyback)
         else:
             if opts.get('exact') or importbranch:
                 branch = branch or 'default'
@@ -1157,6 +1156,8 @@
 
 def _changesetlabels(ctx):
     labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
+    if ctx.obsolete():
+        labels.append('changeset.obsolete')
     if ctx.troubled():
         labels.append('changeset.troubled')
         for trouble in ctx.troubles():
@@ -1300,7 +1301,7 @@
             for key, value in sorted(extra.items()):
                 # i18n: column positioning for "hg log"
                 self.ui.write(_("extra:       %s=%s\n")
-                              % (key, value.encode('string_escape')),
+                              % (key, util.escapestr(value)),
                               label='ui.debug log.extra')
 
         description = ctx.description().strip()
@@ -1443,26 +1444,16 @@
 
     def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
         changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
-        formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
-        filters = {'formatnode': formatnode}
-        defaulttempl = {
-            'parent': '{rev}:{node|formatnode} ',
-            'manifest': '{rev}:{node|formatnode}',
-            'file_copy': '{name} ({source})',
-            'envvar': '{key}={value}',
-            'extra': '{key}={value|stringescape}'
-            }
-        # filecopy is preserved for compatibility reasons
-        defaulttempl['filecopy'] = defaulttempl['file_copy']
         assert not (tmpl and mapfile)
+        defaulttempl = templatekw.defaulttempl
         if mapfile:
-            self.t = templater.templater.frommapfile(mapfile, filters=filters,
+            self.t = templater.templater.frommapfile(mapfile,
                                                      cache=defaulttempl)
         else:
             self.t = formatter.maketemplater(ui, 'changeset', tmpl,
-                                             filters=filters,
                                              cache=defaulttempl)
 
+        self._counter = itertools.count()
         self.cache = {}
 
         # find correct templates for current mode
@@ -1501,6 +1492,7 @@
         props['ctx'] = ctx
         props['repo'] = self.repo
         props['ui'] = self.repo.ui
+        props['index'] = next(self._counter)
         props['revcache'] = {'copies': copies}
         props['cache'] = self.cache
 
@@ -2092,11 +2084,11 @@
     if opts.get('rev'):
         revs = scmutil.revrange(repo, opts['rev'])
     elif follow and repo.dirstate.p1() == nullid:
-        revs = revset.baseset()
+        revs = smartset.baseset()
     elif follow:
         revs = repo.revs('reverse(:.)')
     else:
-        revs = revset.spanset(repo)
+        revs = smartset.spanset(repo)
         revs.reverse()
     return revs
 
@@ -2111,7 +2103,7 @@
     limit = loglimit(opts)
     revs = _logrevs(repo, opts)
     if not revs:
-        return revset.baseset(), None, None
+        return smartset.baseset(), None, None
     expr, filematcher = _makelogrevset(repo, pats, opts, revs)
     if opts.get('rev'):
         # User-specified revs might be unsorted, but don't sort before
@@ -2127,7 +2119,7 @@
             if idx >= limit:
                 break
             limitedrevs.append(rev)
-        revs = revset.baseset(limitedrevs)
+        revs = smartset.baseset(limitedrevs)
 
     return revs, expr, filematcher
 
@@ -2142,7 +2134,7 @@
     limit = loglimit(opts)
     revs = _logrevs(repo, opts)
     if not revs:
-        return revset.baseset([]), None, None
+        return smartset.baseset([]), None, None
     expr, filematcher = _makelogrevset(repo, pats, opts, revs)
     if expr:
         matcher = revset.match(repo.ui, expr, order=revset.followorder)
@@ -2153,7 +2145,7 @@
             if limit <= idx:
                 break
             limitedrevs.append(r)
-        revs = revset.baseset(limitedrevs)
+        revs = smartset.baseset(limitedrevs)
 
     return revs, expr, filematcher
 
@@ -2162,6 +2154,7 @@
     if not spec:
         return templatekw.showgraphnode  # fast path for "{graphnode}"
 
+    spec = templater.unquotestring(spec)
     templ = formatter.gettemplater(ui, 'graphnode', spec)
     cache = {}
     if isinstance(displayer, changeset_templater):
@@ -2225,7 +2218,7 @@
             graphmod.ascii(ui, state, type, char, lines, coldata)
     displayer.close()
 
-def graphlog(ui, repo, *pats, **opts):
+def graphlog(ui, repo, pats, opts):
     # Parameters are identical to log command ones
     revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
     revdag = graphmod.dagwalker(repo, revs)
@@ -2236,6 +2229,8 @@
         if opts.get('rev'):
             endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
         getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
+
+    ui.pager('log')
     displayer = show_changeset(ui, repo, opts, buffered=True)
     displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
                  filematcher)
@@ -2296,6 +2291,15 @@
         bad.extend(f for f in rejected if f in match.files())
     return bad
 
+def addwebdirpath(repo, serverpath, webconf):
+    webconf[serverpath] = repo.root
+    repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
+
+    for r in repo.revs('filelog("path:.hgsub")'):
+        ctx = repo[r]
+        for subpath in ctx.substate:
+            ctx.sub(subpath).addwebdirpath(serverpath, webconf)
+
 def forget(ui, repo, match, prefix, explicitonly):
     join = lambda f: os.path.join(prefix, f)
     bad = []
@@ -2483,7 +2487,7 @@
             for f in list:
                 if f in added:
                     continue # we never unlink added files on remove
-                util.unlinkpath(repo.wjoin(f), ignoremissing=True)
+                repo.wvfs.unlinkpath(f, ignoremissing=True)
         repo[None].forget(list)
 
     if warn:
@@ -2764,6 +2768,7 @@
     while forms:
         tmpl = repo.ui.config('committemplate', '.'.join(forms))
         if tmpl:
+            tmpl = templater.unquotestring(tmpl)
             templatetext = committext = buildcommittemplate(
                 repo, ctx, subs, extramsg, tmpl)
             break
@@ -2975,13 +2980,6 @@
         clean    = set(changes.clean)
         modadded = set()
 
-        # split between files known in target manifest and the others
-        smf = set(mf)
-
-        # determine the exact nature of the deleted changesets
-        deladded = _deleted - smf
-        deleted = _deleted - deladded
-
         # We need to account for the state of the file in the dirstate,
         # even when we revert against something else than parent. This will
         # slightly alter the behavior of revert (doing back up or not, delete
@@ -3023,7 +3021,10 @@
         # in case of merge, files that are actually added can be reported as
         # modified, we need to post process the result
         if p2 != nullid:
-            mergeadd = dsmodified - smf
+            mergeadd = set(dsmodified)
+            for path in dsmodified:
+                if path in mf:
+                    mergeadd.remove(path)
             dsadded |= mergeadd
             dsmodified -= mergeadd
 
@@ -3036,6 +3037,13 @@
                 dsremoved.add(src)
                 names[src] = (repo.pathto(src, cwd), True)
 
+        # determine the exact nature of the deleted changesets
+        deladded = set(_deleted)
+        for path in _deleted:
+            if path in mf:
+                deladded.remove(path)
+        deleted = _deleted - deladded
+
         # distinguish between file to forget and the other
         added = set()
         for abs in dsadded:
@@ -3205,7 +3213,7 @@
 
     def doremove(f):
         try:
-            util.unlinkpath(repo.wjoin(f))
+            repo.wvfs.unlinkpath(f)
         except OSError:
             pass
         repo.dirstate.remove(f)
@@ -3254,15 +3262,18 @@
         diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
         diffopts.nodates = True
         diffopts.git = True
-        reversehunks = repo.ui.configbool('experimental',
-                                          'revertalternateinteractivemode',
-                                          True)
+        operation = 'discard'
+        reversehunks = True
+        if node != parent:
+            operation = 'revert'
+            reversehunks = repo.ui.configbool('experimental',
+                                              'revertalternateinteractivemode',
+                                              True)
         if reversehunks:
             diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
         else:
             diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
         originalchunks = patch.parsepatch(diff)
-        operation = 'discard' if node == parent else 'revert'
 
         try:
 
@@ -3366,11 +3377,6 @@
 
     return cmd
 
-def checkunresolved(ms):
-    ms._repo.ui.deprecwarn('checkunresolved moved from cmdutil to mergeutil',
-                           '4.1')
-    return mergeutil.checkunresolved(ms)
-
 # a list of (ui, repo, otherpeer, opts, missing) functions called by
 # commands.outgoing.  "missing" is "missing" of the result of
 # "findcommonoutgoing()"
@@ -3420,7 +3426,7 @@
             raise error.Abort(msg, hint=hint)
     for f, clearable, allowcommit, msg, hint in unfinishedstates:
         if clearable and repo.vfs.exists(f):
-            util.unlink(repo.join(f))
+            util.unlink(repo.vfs.join(f))
 
 afterresolvedstates = [
     ('graftstate',
@@ -3477,10 +3483,3 @@
     if after[1]:
         hint = after[0]
     raise error.Abort(_('no %s in progress') % task, hint=hint)
-
-class dirstateguard(dirstateguardmod.dirstateguard):
-    def __init__(self, repo, name):
-        dirstateguardmod.dirstateguard.__init__(self, repo, name)
-        repo.ui.deprecwarn(
-            'dirstateguard has moved from cmdutil to dirstateguard',
-            '4.1')
--- a/mercurial/color.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/color.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,59 +7,499 @@
 
 from __future__ import absolute_import
 
-_styles = {'grep.match': 'red bold',
-           'grep.linenumber': 'green',
-           'grep.rev': 'green',
-           'grep.change': 'green',
-           'grep.sep': 'cyan',
-           'grep.filename': 'magenta',
-           'grep.user': 'magenta',
-           'grep.date': 'magenta',
-           'bookmarks.active': 'green',
-           'branches.active': 'none',
-           'branches.closed': 'black bold',
-           'branches.current': 'green',
-           'branches.inactive': 'none',
-           'diff.changed': 'white',
-           'diff.deleted': 'red',
-           'diff.diffline': 'bold',
-           'diff.extended': 'cyan bold',
-           'diff.file_a': 'red bold',
-           'diff.file_b': 'green bold',
-           'diff.hunk': 'magenta',
-           'diff.inserted': 'green',
-           'diff.tab': '',
-           'diff.trailingwhitespace': 'bold red_background',
-           'changeset.public' : '',
-           'changeset.draft' : '',
-           'changeset.secret' : '',
-           'diffstat.deleted': 'red',
-           'diffstat.inserted': 'green',
-           'histedit.remaining': 'red bold',
-           'ui.prompt': 'yellow',
-           'log.changeset': 'yellow',
-           'patchbomb.finalsummary': '',
-           'patchbomb.from': 'magenta',
-           'patchbomb.to': 'cyan',
-           'patchbomb.subject': 'green',
-           'patchbomb.diffstats': '',
-           'rebase.rebased': 'blue',
-           'rebase.remaining': 'red bold',
-           'resolve.resolved': 'green bold',
-           'resolve.unresolved': 'red bold',
-           'shelve.age': 'cyan',
-           'shelve.newest': 'green bold',
-           'shelve.name': 'blue bold',
-           'status.added': 'green bold',
-           'status.clean': 'none',
-           'status.copied': 'none',
-           'status.deleted': 'cyan bold underline',
-           'status.ignored': 'black bold',
-           'status.modified': 'blue bold',
-           'status.removed': 'red bold',
-           'status.unknown': 'magenta bold underline',
-           'tags.normal': 'green',
-           'tags.local': 'black bold'}
+import re
+
+from .i18n import _
+
+from . import (
+    encoding,
+    pycompat,
+    util
+)
+
+try:
+    import curses
+    # Mapping from effect name to terminfo attribute name (or raw code) or
+    # color number.  This will also force-load the curses module.
+    _baseterminfoparams = {
+        'none': (True, 'sgr0', ''),
+        'standout': (True, 'smso', ''),
+        'underline': (True, 'smul', ''),
+        'reverse': (True, 'rev', ''),
+        'inverse': (True, 'rev', ''),
+        'blink': (True, 'blink', ''),
+        'dim': (True, 'dim', ''),
+        'bold': (True, 'bold', ''),
+        'invisible': (True, 'invis', ''),
+        'italic': (True, 'sitm', ''),
+        'black': (False, curses.COLOR_BLACK, ''),
+        'red': (False, curses.COLOR_RED, ''),
+        'green': (False, curses.COLOR_GREEN, ''),
+        'yellow': (False, curses.COLOR_YELLOW, ''),
+        'blue': (False, curses.COLOR_BLUE, ''),
+        'magenta': (False, curses.COLOR_MAGENTA, ''),
+        'cyan': (False, curses.COLOR_CYAN, ''),
+        'white': (False, curses.COLOR_WHITE, ''),
+    }
+except ImportError:
+    curses = None
+    _baseterminfoparams = {}
+
+_enabledbydefault = True
+
+# start and stop parameters for effects
+_effects = {
+    'none': 0,
+    'black': 30,
+    'red': 31,
+    'green': 32,
+    'yellow': 33,
+    'blue': 34,
+    'magenta': 35,
+    'cyan': 36,
+    'white': 37,
+    'bold': 1,
+    'italic': 3,
+    'underline': 4,
+    'inverse': 7,
+    'dim': 2,
+    'black_background': 40,
+    'red_background': 41,
+    'green_background': 42,
+    'yellow_background': 43,
+    'blue_background': 44,
+    'purple_background': 45,
+    'cyan_background': 46,
+    'white_background': 47,
+    }
+
+_defaultstyles = {
+    'grep.match': 'red bold',
+    'grep.linenumber': 'green',
+    'grep.rev': 'green',
+    'grep.change': 'green',
+    'grep.sep': 'cyan',
+    'grep.filename': 'magenta',
+    'grep.user': 'magenta',
+    'grep.date': 'magenta',
+    'bookmarks.active': 'green',
+    'branches.active': 'none',
+    'branches.closed': 'black bold',
+    'branches.current': 'green',
+    'branches.inactive': 'none',
+    'diff.changed': 'white',
+    'diff.deleted': 'red',
+    'diff.diffline': 'bold',
+    'diff.extended': 'cyan bold',
+    'diff.file_a': 'red bold',
+    'diff.file_b': 'green bold',
+    'diff.hunk': 'magenta',
+    'diff.inserted': 'green',
+    'diff.tab': '',
+    'diff.trailingwhitespace': 'bold red_background',
+    'changeset.public' : '',
+    'changeset.draft' : '',
+    'changeset.secret' : '',
+    'diffstat.deleted': 'red',
+    'diffstat.inserted': 'green',
+    'histedit.remaining': 'red bold',
+    'ui.prompt': 'yellow',
+    'log.changeset': 'yellow',
+    'patchbomb.finalsummary': '',
+    'patchbomb.from': 'magenta',
+    'patchbomb.to': 'cyan',
+    'patchbomb.subject': 'green',
+    'patchbomb.diffstats': '',
+    'rebase.rebased': 'blue',
+    'rebase.remaining': 'red bold',
+    'resolve.resolved': 'green bold',
+    'resolve.unresolved': 'red bold',
+    'shelve.age': 'cyan',
+    'shelve.newest': 'green bold',
+    'shelve.name': 'blue bold',
+    'status.added': 'green bold',
+    'status.clean': 'none',
+    'status.copied': 'none',
+    'status.deleted': 'cyan bold underline',
+    'status.ignored': 'black bold',
+    'status.modified': 'blue bold',
+    'status.removed': 'red bold',
+    'status.unknown': 'magenta bold underline',
+    'tags.normal': 'green',
+    'tags.local': 'black bold',
+}
 
 def loadcolortable(ui, extname, colortable):
-    _styles.update(colortable)
+    _defaultstyles.update(colortable)
+
+def _terminfosetup(ui, mode):
+    '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
+
+    # If we failed to load curses, we go ahead and return.
+    if curses is None:
+        return
+    # Otherwise, see what the config file says.
+    if mode not in ('auto', 'terminfo'):
+        return
+    ui._terminfoparams.update(_baseterminfoparams)
+
+    for key, val in ui.configitems('color'):
+        if key.startswith('color.'):
+            newval = (False, int(val), '')
+            ui._terminfoparams[key[6:]] = newval
+        elif key.startswith('terminfo.'):
+            newval = (True, '', val.replace('\\E', '\x1b'))
+            ui._terminfoparams[key[9:]] = newval
+    try:
+        curses.setupterm()
+    except curses.error as e:
+        ui._terminfoparams.clear()
+        return
+
+    for key, (b, e, c) in ui._terminfoparams.items():
+        if not b:
+            continue
+        if not c and not curses.tigetstr(e):
+            # Most terminals don't support dim, invis, etc, so don't be
+            # noisy and use ui.debug().
+            ui.debug("no terminfo entry for %s\n" % e)
+            del ui._terminfoparams[key]
+    if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
+        # Only warn about missing terminfo entries if we explicitly asked for
+        # terminfo mode.
+        if mode == "terminfo":
+            ui.warn(_("no terminfo entry for setab/setaf: reverting to "
+              "ECMA-48 color\n"))
+        ui._terminfoparams.clear()
+
+def setup(ui):
+    """configure color on a ui
+
+    That function both set the colormode for the ui object and read
+    the configuration looking for custom colors and effect definitions."""
+    mode = _modesetup(ui)
+    ui._colormode = mode
+    if mode and mode != 'debug':
+        configstyles(ui)
+
+def _modesetup(ui):
+    if ui.plain():
+        return None
+    default = 'never'
+    if _enabledbydefault:
+        default = 'auto'
+    config = ui.config('ui', 'color', default)
+    if config == 'debug':
+        return 'debug'
+
+    auto = (config == 'auto')
+    always = not auto and util.parsebool(config)
+    if not always and not auto:
+        return None
+
+    formatted = (always or (encoding.environ.get('TERM') != 'dumb'
+                 and ui.formatted()))
+
+    mode = ui.config('color', 'mode', 'auto')
+
+    # If pager is active, color.pagermode overrides color.mode.
+    if getattr(ui, 'pageractive', False):
+        mode = ui.config('color', 'pagermode', mode)
+
+    realmode = mode
+    if mode == 'auto':
+        if pycompat.osname == 'nt':
+            term = encoding.environ.get('TERM')
+            # TERM won't be defined in a vanilla cmd.exe environment.
+
+            # UNIX-like environments on Windows such as Cygwin and MSYS will
+            # set TERM. They appear to make a best effort attempt at setting it
+            # to something appropriate. However, not all environments with TERM
+            # defined support ANSI. Since "ansi" could result in terminal
+            # gibberish, we error on the side of selecting "win32". However, if
+            # w32effects is not defined, we almost certainly don't support
+            # "win32", so don't even try.
+            if (term and 'xterm' in term) or not w32effects:
+                realmode = 'ansi'
+            else:
+                realmode = 'win32'
+        else:
+            realmode = 'ansi'
+
+    def modewarn():
+        # only warn if color.mode was explicitly set and we're in
+        # a formatted terminal
+        if mode == realmode and ui.formatted():
+            ui.warn(_('warning: failed to set color mode to %s\n') % mode)
+
+    if realmode == 'win32':
+        ui._terminfoparams.clear()
+        if not w32effects:
+            modewarn()
+            return None
+    elif realmode == 'ansi':
+        ui._terminfoparams.clear()
+    elif realmode == 'terminfo':
+        _terminfosetup(ui, mode)
+        if not ui._terminfoparams:
+            ## FIXME Shouldn't we return None in this case too?
+            modewarn()
+            realmode = 'ansi'
+    else:
+        return None
+
+    if always or (auto and formatted):
+        return realmode
+    return None
+
+def configstyles(ui):
+    ui._styles.update(_defaultstyles)
+    for status, cfgeffects in ui.configitems('color'):
+        if '.' not in status or status.startswith(('color.', 'terminfo.')):
+            continue
+        cfgeffects = ui.configlist('color', status)
+        if cfgeffects:
+            good = []
+            for e in cfgeffects:
+                if valideffect(ui, e):
+                    good.append(e)
+                else:
+                    ui.warn(_("ignoring unknown color/effect %r "
+                              "(configured in color.%s)\n")
+                            % (e, status))
+            ui._styles[status] = ' '.join(good)
+
+def _activeeffects(ui):
+    '''Return the effects map for the color mode set on the ui.'''
+    if ui._colormode == 'win32':
+        return w32effects
+    elif ui._colormode is not None:
+        return _effects
+    return {}
+
+def valideffect(ui, effect):
+    'Determine if the effect is valid or not.'
+    return ((not ui._terminfoparams and effect in _activeeffects(ui))
+             or (effect in ui._terminfoparams
+                 or effect[:-11] in ui._terminfoparams))
+
+def _effect_str(ui, effect):
+    '''Helper function for render_effects().'''
+
+    bg = False
+    if effect.endswith('_background'):
+        bg = True
+        effect = effect[:-11]
+    try:
+        attr, val, termcode = ui._terminfoparams[effect]
+    except KeyError:
+        return ''
+    if attr:
+        if termcode:
+            return termcode
+        else:
+            return curses.tigetstr(val)
+    elif bg:
+        return curses.tparm(curses.tigetstr('setab'), val)
+    else:
+        return curses.tparm(curses.tigetstr('setaf'), val)
+
+def _mergeeffects(text, start, stop):
+    """Insert start sequence at every occurrence of stop sequence
+
+    >>> s = _mergeeffects('cyan', '[C]', '|')
+    >>> s = _mergeeffects(s + 'yellow', '[Y]', '|')
+    >>> s = _mergeeffects('ma' + s + 'genta', '[M]', '|')
+    >>> s = _mergeeffects('red' + s, '[R]', '|')
+    >>> s
+    '[R]red[M]ma[Y][C]cyan|[R][M][Y]yellow|[R][M]genta|'
+    """
+    parts = []
+    for t in text.split(stop):
+        if not t:
+            continue
+        parts.extend([start, t, stop])
+    return ''.join(parts)
+
+def _render_effects(ui, text, effects):
+    'Wrap text in commands to turn on each effect.'
+    if not text:
+        return text
+    if ui._terminfoparams:
+        start = ''.join(_effect_str(ui, effect)
+                        for effect in ['none'] + effects.split())
+        stop = _effect_str(ui, 'none')
+    else:
+        activeeffects = _activeeffects(ui)
+        start = [pycompat.bytestr(activeeffects[e])
+                 for e in ['none'] + effects.split()]
+        start = '\033[' + ';'.join(start) + 'm'
+        stop = '\033[' + pycompat.bytestr(activeeffects['none']) + 'm'
+    return _mergeeffects(text, start, stop)
+
+_ansieffectre = re.compile(br'\x1b\[[0-9;]*m')
+
+def stripeffects(text):
+    """Strip ANSI control codes which could be inserted by colorlabel()"""
+    return _ansieffectre.sub('', text)
+
+def colorlabel(ui, msg, label):
+    """add color control code according to the mode"""
+    if ui._colormode == 'debug':
+        if label and msg:
+            if msg[-1] == '\n':
+                msg = "[%s|%s]\n" % (label, msg[:-1])
+            else:
+                msg = "[%s|%s]" % (label, msg)
+    elif ui._colormode is not None:
+        effects = []
+        for l in label.split():
+            s = ui._styles.get(l, '')
+            if s:
+                effects.append(s)
+            elif valideffect(ui, l):
+                effects.append(l)
+        effects = ' '.join(effects)
+        if effects:
+            msg = '\n'.join([_render_effects(ui, line, effects)
+                             for line in msg.split('\n')])
+    return msg
+
+w32effects = None
+if pycompat.osname == 'nt':
+    import ctypes
+
+    _kernel32 = ctypes.windll.kernel32
+
+    _WORD = ctypes.c_ushort
+
+    _INVALID_HANDLE_VALUE = -1
+
+    class _COORD(ctypes.Structure):
+        _fields_ = [('X', ctypes.c_short),
+                    ('Y', ctypes.c_short)]
+
+    class _SMALL_RECT(ctypes.Structure):
+        _fields_ = [('Left', ctypes.c_short),
+                    ('Top', ctypes.c_short),
+                    ('Right', ctypes.c_short),
+                    ('Bottom', ctypes.c_short)]
+
+    class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
+        _fields_ = [('dwSize', _COORD),
+                    ('dwCursorPosition', _COORD),
+                    ('wAttributes', _WORD),
+                    ('srWindow', _SMALL_RECT),
+                    ('dwMaximumWindowSize', _COORD)]
+
+    _STD_OUTPUT_HANDLE = 0xfffffff5 # (DWORD)-11
+    _STD_ERROR_HANDLE = 0xfffffff4  # (DWORD)-12
+
+    _FOREGROUND_BLUE = 0x0001
+    _FOREGROUND_GREEN = 0x0002
+    _FOREGROUND_RED = 0x0004
+    _FOREGROUND_INTENSITY = 0x0008
+
+    _BACKGROUND_BLUE = 0x0010
+    _BACKGROUND_GREEN = 0x0020
+    _BACKGROUND_RED = 0x0040
+    _BACKGROUND_INTENSITY = 0x0080
+
+    _COMMON_LVB_REVERSE_VIDEO = 0x4000
+    _COMMON_LVB_UNDERSCORE = 0x8000
+
+    # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
+    w32effects = {
+        'none': -1,
+        'black': 0,
+        'red': _FOREGROUND_RED,
+        'green': _FOREGROUND_GREEN,
+        'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
+        'blue': _FOREGROUND_BLUE,
+        'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
+        'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
+        'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
+        'bold': _FOREGROUND_INTENSITY,
+        'black_background': 0x100,                  # unused value > 0x0f
+        'red_background': _BACKGROUND_RED,
+        'green_background': _BACKGROUND_GREEN,
+        'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
+        'blue_background': _BACKGROUND_BLUE,
+        'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
+        'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
+        'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
+                             _BACKGROUND_BLUE),
+        'bold_background': _BACKGROUND_INTENSITY,
+        'underline': _COMMON_LVB_UNDERSCORE,  # double-byte charsets only
+        'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
+    }
+
+    passthrough = set([_FOREGROUND_INTENSITY,
+                       _BACKGROUND_INTENSITY,
+                       _COMMON_LVB_UNDERSCORE,
+                       _COMMON_LVB_REVERSE_VIDEO])
+
+    stdout = _kernel32.GetStdHandle(
+                  _STD_OUTPUT_HANDLE)  # don't close the handle returned
+    if stdout is None or stdout == _INVALID_HANDLE_VALUE:
+        w32effects = None
+    else:
+        csbi = _CONSOLE_SCREEN_BUFFER_INFO()
+        if not _kernel32.GetConsoleScreenBufferInfo(
+                    stdout, ctypes.byref(csbi)):
+            # stdout may not support GetConsoleScreenBufferInfo()
+            # when called from subprocess or redirected
+            w32effects = None
+        else:
+            origattr = csbi.wAttributes
+            ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
+                                re.MULTILINE | re.DOTALL)
+
+    def win32print(ui, writefunc, *msgs, **opts):
+        for text in msgs:
+            _win32print(ui, text, writefunc, **opts)
+
+    def _win32print(ui, text, writefunc, **opts):
+        label = opts.get('label', '')
+        attr = origattr
+
+        def mapcolor(val, attr):
+            if val == -1:
+                return origattr
+            elif val in passthrough:
+                return attr | val
+            elif val > 0x0f:
+                return (val & 0x70) | (attr & 0x8f)
+            else:
+                return (val & 0x07) | (attr & 0xf8)
+
+        # determine console attributes based on labels
+        for l in label.split():
+            style = ui._styles.get(l, '')
+            for effect in style.split():
+                try:
+                    attr = mapcolor(w32effects[effect], attr)
+                except KeyError:
+                    # w32effects could not have certain attributes so we skip
+                    # them if not found
+                    pass
+        # hack to ensure regexp finds data
+        if not text.startswith('\033['):
+            text = '\033[m' + text
+
+        # Look for ANSI-like codes embedded in text
+        m = re.match(ansire, text)
+
+        try:
+            while m:
+                for sattr in m.group(1).split(';'):
+                    if sattr:
+                        attr = mapcolor(int(sattr), attr)
+                ui.flush()
+                _kernel32.SetConsoleTextAttribute(stdout, attr)
+                writefunc(m.group(2), **opts)
+                m = re.match(ansire, m.group(3))
+        finally:
+            # Explicitly reset original attributes
+            ui.flush()
+            _kernel32.SetConsoleTextAttribute(stdout, origattr)
--- a/mercurial/commands.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/commands.py	Tue Apr 18 12:24:34 2017 -0400
@@ -11,17 +11,10 @@
 import errno
 import os
 import re
-import socket
-import string
-import sys
-import tempfile
-import time
 
 from .i18n import _
 from .node import (
-    bin,
     hex,
-    nullhex,
     nullid,
     nullrev,
     short,
@@ -40,30 +33,24 @@
     error,
     exchange,
     extensions,
-    formatter,
     graphmod,
     hbisect,
     help,
     hg,
     lock as lockmod,
     merge as mergemod,
-    minirst,
     obsolete,
     patch,
     phases,
-    policy,
-    pvec,
     pycompat,
-    repair,
-    revlog,
-    revset,
+    rcutil,
+    revsetlang,
     scmutil,
     server,
     sshserver,
-    sslutil,
     streamclone,
+    tags as tagsmod,
     templatekw,
-    templater,
     ui as uimod,
     util,
 )
@@ -92,6 +79,11 @@
      _('do not prompt, automatically pick the first choice for all prompts')),
     ('q', 'quiet', None, _('suppress output')),
     ('v', 'verbose', None, _('enable additional output')),
+    ('', 'color', '',
+     # i18n: 'always', 'auto', 'never', and 'debug' are keywords
+     # and should not be translated
+     _("when to colorize (boolean, always, auto, never, or debug)"),
+     _('TYPE')),
     ('', 'config', [],
      _('set/override config option (use \'section.name=value\')'),
      _('CONFIG')),
@@ -107,6 +99,8 @@
     ('', 'version', None, _('output version information and exit')),
     ('h', 'help', None, _('display help and exit')),
     ('', 'hidden', False, _('consider hidden changesets')),
+    ('', 'pager', 'auto',
+     _("when to paginate (boolean, always, auto, or never)"), _('TYPE')),
 ]
 
 dryrunopts = [('n', 'dry-run', None,
@@ -168,6 +162,7 @@
 diffopts = [
     ('a', 'text', None, _('treat all files as text')),
     ('g', 'git', None, _('use git extended diff format')),
+    ('', 'binary', None, _('generate binary diffs in git mode (default)')),
     ('', 'nodates', None, _('omit dates from diff headers'))
 ]
 
@@ -433,6 +428,8 @@
     if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
         raise error.Abort(_('at least one of -n/-c is required for -l'))
 
+    ui.pager('annotate')
+
     if fm.isplain():
         def makefunc(get, fmt):
             return lambda x: fmt(get(x))
@@ -892,7 +889,8 @@
                 # update state
                 state['current'] = [node]
                 hbisect.save_state(repo, state)
-                status = ui.system(command, environ={'HG_NODE': hex(node)})
+                status = ui.system(command, environ={'HG_NODE': hex(node)},
+                                   blockedtag='bisect_check')
                 if status == 125:
                     transition = "skip"
                 elif status == 0:
@@ -1228,6 +1226,7 @@
     Returns 0.
     """
 
+    ui.pager('branches')
     fm = ui.formatter('branches', opts)
     hexfunc = fm.hexfunc
 
@@ -1264,6 +1263,7 @@
         fmt = ' ' * padsize + ' %d:%s'
         fm.condwrite(not ui.quiet, 'rev node', fmt, rev, hexfunc(ctx.node()),
                      label='log.changeset changeset.%s' % ctx.phasestr())
+        fm.context(ctx=ctx)
         fm.data(active=isactive, closed=not isopen, current=current)
         if not ui.quiet:
             fm.plain(notice)
@@ -1282,12 +1282,11 @@
     ('a', 'all', None, _('bundle all changesets in the repository')),
     ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
     ] + remoteopts,
-    _('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
+    _('[-f] [-t BUNDLESPEC] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
 def bundle(ui, repo, fname, dest=None, **opts):
-    """create a changegroup file
-
-    Generate a changegroup file collecting changesets to be added
-    to a repository.
+    """create a bundle file
+
+    Generate a bundle file containing data to be added to a repository.
 
     To create a bundle containing all changesets, use -a/--all
     (or --base null). Otherwise, hg assumes the destination will have
@@ -1295,11 +1294,10 @@
     will assume the repository has all the nodes in destination, or
     default-push/default if no destination is specified.
 
-    You can change bundle format with the -t/--type option. You can
-    specify a compression, a bundle version or both using a dash
-    (comp-version). The available compression methods are: none, bzip2,
-    and gzip (by default, bundles are compressed using bzip2). The
-    available formats are: v1, v2 (default to most suitable).
+    You can change bundle format with the -t/--type option. See
+    :hg:`help bundlespec` for documentation on this format. By default,
+    the most appropriate format is used and compression defaults to
+    bzip2.
 
     The bundle file can then be transferred using conventional means
     and applied to another repository with the unbundle or pull
@@ -1324,7 +1322,7 @@
                 repo, bundletype, strict=False)
     except error.UnsupportedBundleSpecification as e:
         raise error.Abort(str(e),
-                          hint=_("see 'hg help bundle' for supported "
+                          hint=_("see 'hg help bundlespec' for supported "
                                  "values for --type"))
 
     # Packed bundles are a pseudo bundle format for now.
@@ -1379,9 +1377,11 @@
             bcompression = 'UN'
         bversion = 'HG10' + bcompression
         bcompression = None
+    elif cgversion in ('02', '03'):
+        bversion = 'HG20'
     else:
-        assert cgversion == '02'
-        bversion = 'HG20'
+        raise error.ProgrammingError(
+            'bundle: unexpected changegroup version %s' % cgversion)
 
     # TODO compression options should be derived from bundlespec parsing.
     # This is a temporary hack to allow adjusting bundle compression
@@ -1427,6 +1427,7 @@
     ctx = scmutil.revsingle(repo, opts.get('rev'))
     m = scmutil.match(ctx, (file1,) + pats, opts)
 
+    ui.pager('cat')
     return cmdutil.cat(ui, repo, ctx, m, '', **opts)
 
 @command('^clone',
@@ -1638,10 +1639,12 @@
         release(lock, wlock)
 
 def _docommit(ui, repo, *pats, **opts):
+    opts = pycompat.byteskwargs(opts)
     if opts.get('interactive'):
         opts.pop('interactive')
         ret = cmdutil.dorecord(ui, repo, commit, None, False,
-                               cmdutil.recordfilter, *pats, **opts)
+                               cmdutil.recordfilter, *pats,
+                               **pycompat.strkwargs(opts))
         # ret can be 0 (no changes to record) or the value returned by
         # commit(), 1 if nothing changed or None on success.
         return 1 if ret == 0 else ret
@@ -1704,25 +1707,23 @@
             return 1
     else:
         def commitfunc(ui, repo, message, match, opts):
-            backup = ui.backupconfig('phases', 'new-commit')
+            overrides = {}
+            if opts.get('secret'):
+                overrides[('phases', 'new-commit')] = 'secret'
+
             baseui = repo.baseui
-            basebackup = baseui.backupconfig('phases', 'new-commit')
-            try:
-                if opts.get('secret'):
-                    ui.setconfig('phases', 'new-commit', 'secret', 'commit')
-                    # Propagate to subrepos
-                    baseui.setconfig('phases', 'new-commit', 'secret', 'commit')
-
-                editform = cmdutil.mergeeditform(repo[None], 'commit.normal')
-                editor = cmdutil.getcommiteditor(editform=editform, **opts)
-                return repo.commit(message, opts.get('user'), opts.get('date'),
-                                   match,
-                                   editor=editor,
-                                   extra=extra)
-            finally:
-                ui.restoreconfig(backup)
-                repo.baseui.restoreconfig(basebackup)
-
+            with baseui.configoverride(overrides, 'commit'):
+                with ui.configoverride(overrides, 'commit'):
+                    editform = cmdutil.mergeeditform(repo[None],
+                                                     'commit.normal')
+                    editor = cmdutil.getcommiteditor(
+                        editform=editform, **pycompat.strkwargs(opts))
+                    return repo.commit(message,
+                                       opts.get('user'),
+                                       opts.get('date'),
+                                       match,
+                                       editor=editor,
+                                       extra=extra)
 
         node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
 
@@ -1775,11 +1776,11 @@
         if opts.get('local'):
             if not repo:
                 raise error.Abort(_("can't use --local outside a repository"))
-            paths = [repo.join('hgrc')]
+            paths = [repo.vfs.join('hgrc')]
         elif opts.get('global'):
-            paths = scmutil.systemrcpath()
+            paths = rcutil.systemrcpath()
         else:
-            paths = scmutil.userrcpath()
+            paths = rcutil.userrcpath()
 
         for f in paths:
             if os.path.exists(f):
@@ -1799,12 +1800,19 @@
 
         editor = ui.geteditor()
         ui.system("%s \"%s\"" % (editor, f),
-                  onerr=error.Abort, errprefix=_("edit failed"))
+                  onerr=error.Abort, errprefix=_("edit failed"),
+                  blockedtag='config_edit')
         return
-
+    ui.pager('config')
     fm = ui.formatter('config', opts)
-    for f in scmutil.rcpath():
-        ui.debug('read config from: %s\n' % f)
+    for t, f in rcutil.rccomponents():
+        if t == 'path':
+            ui.debug('read config from: %s\n' % f)
+        elif t == 'items':
+            for section, name, value, source in f:
+                ui.debug('set config by: %s\n' % source)
+        else:
+            raise error.ProgrammingError('unknown rctype: %s' % t)
     untrusted = bool(opts.get('untrusted'))
     if values:
         sections = [v for v in values if '.' not in v]
@@ -1814,7 +1822,7 @@
     matched = False
     for section, name, value in ui.walkconfig(untrusted=untrusted):
         source = ui.configsource(section, name, untrusted)
-        value = str(value)
+        value = pycompat.bytestr(value)
         if fm.isplain():
             source = source or 'none'
             value = value.replace('\n', '\\n')
@@ -1866,1176 +1874,6 @@
     with repo.wlock(False):
         return cmdutil.copy(ui, repo, pats, opts)
 
-@command('debuginstall', [] + formatteropts, '', norepo=True)
-def debuginstall(ui, **opts):
-    '''test Mercurial installation
-
-    Returns 0 on success.
-    '''
-
-    def writetemp(contents):
-        (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
-        f = os.fdopen(fd, "wb")
-        f.write(contents)
-        f.close()
-        return name
-
-    problems = 0
-
-    fm = ui.formatter('debuginstall', opts)
-    fm.startitem()
-
-    # encoding
-    fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
-    err = None
-    try:
-        encoding.fromlocal("test")
-    except error.Abort as inst:
-        err = inst
-        problems += 1
-    fm.condwrite(err, 'encodingerror', _(" %s\n"
-                 " (check that your locale is properly set)\n"), err)
-
-    # Python
-    fm.write('pythonexe', _("checking Python executable (%s)\n"),
-             pycompat.sysexecutable)
-    fm.write('pythonver', _("checking Python version (%s)\n"),
-             ("%d.%d.%d" % sys.version_info[:3]))
-    fm.write('pythonlib', _("checking Python lib (%s)...\n"),
-             os.path.dirname(os.__file__))
-
-    security = set(sslutil.supportedprotocols)
-    if sslutil.hassni:
-        security.add('sni')
-
-    fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
-             fm.formatlist(sorted(security), name='protocol',
-                           fmt='%s', sep=','))
-
-    # These are warnings, not errors. So don't increment problem count. This
-    # may change in the future.
-    if 'tls1.2' not in security:
-        fm.plain(_('  TLS 1.2 not supported by Python install; '
-                   'network connections lack modern security\n'))
-    if 'sni' not in security:
-        fm.plain(_('  SNI not supported by Python install; may have '
-                   'connectivity issues with some servers\n'))
-
-    # TODO print CA cert info
-
-    # hg version
-    hgver = util.version()
-    fm.write('hgver', _("checking Mercurial version (%s)\n"),
-             hgver.split('+')[0])
-    fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
-             '+'.join(hgver.split('+')[1:]))
-
-    # compiled modules
-    fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
-             policy.policy)
-    fm.write('hgmodules', _("checking installed modules (%s)...\n"),
-             os.path.dirname(__file__))
-
-    err = None
-    try:
-        from . import (
-            base85,
-            bdiff,
-            mpatch,
-            osutil,
-        )
-        dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
-    except Exception as inst:
-        err = inst
-        problems += 1
-    fm.condwrite(err, 'extensionserror', " %s\n", err)
-
-    compengines = util.compengines._engines.values()
-    fm.write('compengines', _('checking registered compression engines (%s)\n'),
-             fm.formatlist(sorted(e.name() for e in compengines),
-                           name='compengine', fmt='%s', sep=', '))
-    fm.write('compenginesavail', _('checking available compression engines '
-                                   '(%s)\n'),
-             fm.formatlist(sorted(e.name() for e in compengines
-                                  if e.available()),
-                           name='compengine', fmt='%s', sep=', '))
-    wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
-    fm.write('compenginesserver', _('checking available compression engines '
-                                    'for wire protocol (%s)\n'),
-             fm.formatlist([e.name() for e in wirecompengines
-                            if e.wireprotosupport()],
-                           name='compengine', fmt='%s', sep=', '))
-
-    # templates
-    p = templater.templatepaths()
-    fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
-    fm.condwrite(not p, '', _(" no template directories found\n"))
-    if p:
-        m = templater.templatepath("map-cmdline.default")
-        if m:
-            # template found, check if it is working
-            err = None
-            try:
-                templater.templater.frommapfile(m)
-            except Exception as inst:
-                err = inst
-                p = None
-            fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
-        else:
-            p = None
-        fm.condwrite(p, 'defaulttemplate',
-                     _("checking default template (%s)\n"), m)
-        fm.condwrite(not m, 'defaulttemplatenotfound',
-                     _(" template '%s' not found\n"), "default")
-    if not p:
-        problems += 1
-    fm.condwrite(not p, '',
-                 _(" (templates seem to have been installed incorrectly)\n"))
-
-    # editor
-    editor = ui.geteditor()
-    editor = util.expandpath(editor)
-    fm.write('editor', _("checking commit editor... (%s)\n"), editor)
-    cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
-    fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
-                 _(" No commit editor set and can't find %s in PATH\n"
-                   " (specify a commit editor in your configuration"
-                   " file)\n"), not cmdpath and editor == 'vi' and editor)
-    fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
-                 _(" Can't find editor '%s' in PATH\n"
-                   " (specify a commit editor in your configuration"
-                   " file)\n"), not cmdpath and editor)
-    if not cmdpath and editor != 'vi':
-        problems += 1
-
-    # check username
-    username = None
-    err = None
-    try:
-        username = ui.username()
-    except error.Abort as e:
-        err = e
-        problems += 1
-
-    fm.condwrite(username, 'username',  _("checking username (%s)\n"), username)
-    fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
-        " (specify a username in your configuration file)\n"), err)
-
-    fm.condwrite(not problems, '',
-                 _("no problems detected\n"))
-    if not problems:
-        fm.data(problems=problems)
-    fm.condwrite(problems, 'problems',
-                 _("%d problems detected,"
-                   " please check your install!\n"), problems)
-    fm.end()
-
-    return problems
-
-@command('debugknown', [], _('REPO ID...'), norepo=True)
-def debugknown(ui, repopath, *ids, **opts):
-    """test whether node ids are known to a repo
-
-    Every ID must be a full-length hex node id string. Returns a list of 0s
-    and 1s indicating unknown/known.
-    """
-    repo = hg.peer(ui, opts, repopath)
-    if not repo.capable('known'):
-        raise error.Abort("known() not supported by target repository")
-    flags = repo.known([bin(s) for s in ids])
-    ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
-
-@command('debuglabelcomplete', [], _('LABEL...'))
-def debuglabelcomplete(ui, repo, *args):
-    '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
-    debugnamecomplete(ui, repo, *args)
-
-@command('debugmergestate', [], '')
-def debugmergestate(ui, repo, *args):
-    """print merge state
-
-    Use --verbose to print out information about whether v1 or v2 merge state
-    was chosen."""
-    def _hashornull(h):
-        if h == nullhex:
-            return 'null'
-        else:
-            return h
-
-    def printrecords(version):
-        ui.write(('* version %s records\n') % version)
-        if version == 1:
-            records = v1records
-        else:
-            records = v2records
-
-        for rtype, record in records:
-            # pretty print some record types
-            if rtype == 'L':
-                ui.write(('local: %s\n') % record)
-            elif rtype == 'O':
-                ui.write(('other: %s\n') % record)
-            elif rtype == 'm':
-                driver, mdstate = record.split('\0', 1)
-                ui.write(('merge driver: %s (state "%s")\n')
-                         % (driver, mdstate))
-            elif rtype in 'FDC':
-                r = record.split('\0')
-                f, state, hash, lfile, afile, anode, ofile = r[0:7]
-                if version == 1:
-                    onode = 'not stored in v1 format'
-                    flags = r[7]
-                else:
-                    onode, flags = r[7:9]
-                ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
-                         % (f, rtype, state, _hashornull(hash)))
-                ui.write(('  local path: %s (flags "%s")\n') % (lfile, flags))
-                ui.write(('  ancestor path: %s (node %s)\n')
-                         % (afile, _hashornull(anode)))
-                ui.write(('  other path: %s (node %s)\n')
-                         % (ofile, _hashornull(onode)))
-            elif rtype == 'f':
-                filename, rawextras = record.split('\0', 1)
-                extras = rawextras.split('\0')
-                i = 0
-                extrastrings = []
-                while i < len(extras):
-                    extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
-                    i += 2
-
-                ui.write(('file extras: %s (%s)\n')
-                         % (filename, ', '.join(extrastrings)))
-            elif rtype == 'l':
-                labels = record.split('\0', 2)
-                labels = [l for l in labels if len(l) > 0]
-                ui.write(('labels:\n'))
-                ui.write(('  local: %s\n' % labels[0]))
-                ui.write(('  other: %s\n' % labels[1]))
-                if len(labels) > 2:
-                    ui.write(('  base:  %s\n' % labels[2]))
-            else:
-                ui.write(('unrecognized entry: %s\t%s\n')
-                         % (rtype, record.replace('\0', '\t')))
-
-    # Avoid mergestate.read() since it may raise an exception for unsupported
-    # merge state records. We shouldn't be doing this, but this is OK since this
-    # command is pretty low-level.
-    ms = mergemod.mergestate(repo)
-
-    # sort so that reasonable information is on top
-    v1records = ms._readrecordsv1()
-    v2records = ms._readrecordsv2()
-    order = 'LOml'
-    def key(r):
-        idx = order.find(r[0])
-        if idx == -1:
-            return (1, r[1])
-        else:
-            return (0, idx)
-    v1records.sort(key=key)
-    v2records.sort(key=key)
-
-    if not v1records and not v2records:
-        ui.write(('no merge state found\n'))
-    elif not v2records:
-        ui.note(('no version 2 merge state\n'))
-        printrecords(1)
-    elif ms._v1v2match(v1records, v2records):
-        ui.note(('v1 and v2 states match: using v2\n'))
-        printrecords(2)
-    else:
-        ui.note(('v1 and v2 states mismatch: using v1\n'))
-        printrecords(1)
-        if ui.verbose:
-            printrecords(2)
-
-@command('debugnamecomplete', [], _('NAME...'))
-def debugnamecomplete(ui, repo, *args):
-    '''complete "names" - tags, open branch names, bookmark names'''
-
-    names = set()
-    # since we previously only listed open branches, we will handle that
-    # specially (after this for loop)
-    for name, ns in repo.names.iteritems():
-        if name != 'branches':
-            names.update(ns.listnames(repo))
-    names.update(tag for (tag, heads, tip, closed)
-                 in repo.branchmap().iterbranches() if not closed)
-    completions = set()
-    if not args:
-        args = ['']
-    for a in args:
-        completions.update(n for n in names if n.startswith(a))
-    ui.write('\n'.join(sorted(completions)))
-    ui.write('\n')
-
-@command('debuglocks',
-         [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
-          ('W', 'force-wlock', None,
-           _('free the working state lock (DANGEROUS)'))],
-         _('[OPTION]...'))
-def debuglocks(ui, repo, **opts):
-    """show or modify state of locks
-
-    By default, this command will show which locks are held. This
-    includes the user and process holding the lock, the amount of time
-    the lock has been held, and the machine name where the process is
-    running if it's not local.
-
-    Locks protect the integrity of Mercurial's data, so should be
-    treated with care. System crashes or other interruptions may cause
-    locks to not be properly released, though Mercurial will usually
-    detect and remove such stale locks automatically.
-
-    However, detecting stale locks may not always be possible (for
-    instance, on a shared filesystem). Removing locks may also be
-    blocked by filesystem permissions.
-
-    Returns 0 if no locks are held.
-
-    """
-
-    if opts.get('force_lock'):
-        repo.svfs.unlink('lock')
-    if opts.get('force_wlock'):
-        repo.vfs.unlink('wlock')
-    if opts.get('force_lock') or opts.get('force_lock'):
-        return 0
-
-    now = time.time()
-    held = 0
-
-    def report(vfs, name, method):
-        # this causes stale locks to get reaped for more accurate reporting
-        try:
-            l = method(False)
-        except error.LockHeld:
-            l = None
-
-        if l:
-            l.release()
-        else:
-            try:
-                stat = vfs.lstat(name)
-                age = now - stat.st_mtime
-                user = util.username(stat.st_uid)
-                locker = vfs.readlock(name)
-                if ":" in locker:
-                    host, pid = locker.split(':')
-                    if host == socket.gethostname():
-                        locker = 'user %s, process %s' % (user, pid)
-                    else:
-                        locker = 'user %s, process %s, host %s' \
-                                 % (user, pid, host)
-                ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
-                return 1
-            except OSError as e:
-                if e.errno != errno.ENOENT:
-                    raise
-
-        ui.write(("%-6s free\n") % (name + ":"))
-        return 0
-
-    held += report(repo.svfs, "lock", repo.lock)
-    held += report(repo.vfs, "wlock", repo.wlock)
-
-    return held
-
-@command('debugobsolete',
-        [('', 'flags', 0, _('markers flag')),
-         ('', 'record-parents', False,
-          _('record parent information for the precursor')),
-         ('r', 'rev', [], _('display markers relevant to REV')),
-         ('', 'index', False, _('display index of the marker')),
-         ('', 'delete', [], _('delete markers specified by indices')),
-        ] + commitopts2 + formatteropts,
-         _('[OBSOLETED [REPLACEMENT ...]]'))
-def debugobsolete(ui, repo, precursor=None, *successors, **opts):
-    """create arbitrary obsolete marker
-
-    With no arguments, displays the list of obsolescence markers."""
-
-    def parsenodeid(s):
-        try:
-            # We do not use revsingle/revrange functions here to accept
-            # arbitrary node identifiers, possibly not present in the
-            # local repository.
-            n = bin(s)
-            if len(n) != len(nullid):
-                raise TypeError()
-            return n
-        except TypeError:
-            raise error.Abort('changeset references must be full hexadecimal '
-                             'node identifiers')
-
-    if opts.get('delete'):
-        indices = []
-        for v in opts.get('delete'):
-            try:
-                indices.append(int(v))
-            except ValueError:
-                raise error.Abort(_('invalid index value: %r') % v,
-                                  hint=_('use integers for indices'))
-
-        if repo.currenttransaction():
-            raise error.Abort(_('cannot delete obsmarkers in the middle '
-                                'of transaction.'))
-
-        with repo.lock():
-            n = repair.deleteobsmarkers(repo.obsstore, indices)
-            ui.write(_('deleted %i obsolescence markers\n') % n)
-
-        return
-
-    if precursor is not None:
-        if opts['rev']:
-            raise error.Abort('cannot select revision when creating marker')
-        metadata = {}
-        metadata['user'] = opts['user'] or ui.username()
-        succs = tuple(parsenodeid(succ) for succ in successors)
-        l = repo.lock()
-        try:
-            tr = repo.transaction('debugobsolete')
-            try:
-                date = opts.get('date')
-                if date:
-                    date = util.parsedate(date)
-                else:
-                    date = None
-                prec = parsenodeid(precursor)
-                parents = None
-                if opts['record_parents']:
-                    if prec not in repo.unfiltered():
-                        raise error.Abort('cannot used --record-parents on '
-                                         'unknown changesets')
-                    parents = repo.unfiltered()[prec].parents()
-                    parents = tuple(p.node() for p in parents)
-                repo.obsstore.create(tr, prec, succs, opts['flags'],
-                                     parents=parents, date=date,
-                                     metadata=metadata)
-                tr.close()
-            except ValueError as exc:
-                raise error.Abort(_('bad obsmarker input: %s') % exc)
-            finally:
-                tr.release()
-        finally:
-            l.release()
-    else:
-        if opts['rev']:
-            revs = scmutil.revrange(repo, opts['rev'])
-            nodes = [repo[r].node() for r in revs]
-            markers = list(obsolete.getmarkers(repo, nodes=nodes))
-            markers.sort(key=lambda x: x._data)
-        else:
-            markers = obsolete.getmarkers(repo)
-
-        markerstoiter = markers
-        isrelevant = lambda m: True
-        if opts.get('rev') and opts.get('index'):
-            markerstoiter = obsolete.getmarkers(repo)
-            markerset = set(markers)
-            isrelevant = lambda m: m in markerset
-
-        fm = ui.formatter('debugobsolete', opts)
-        for i, m in enumerate(markerstoiter):
-            if not isrelevant(m):
-                # marker can be irrelevant when we're iterating over a set
-                # of markers (markerstoiter) which is bigger than the set
-                # of markers we want to display (markers)
-                # this can happen if both --index and --rev options are
-                # provided and thus we need to iterate over all of the markers
-                # to get the correct indices, but only display the ones that
-                # are relevant to --rev value
-                continue
-            fm.startitem()
-            ind = i if opts.get('index') else None
-            cmdutil.showmarker(fm, m, index=ind)
-        fm.end()
-
-@command('debugpathcomplete',
-         [('f', 'full', None, _('complete an entire path')),
-          ('n', 'normal', None, _('show only normal files')),
-          ('a', 'added', None, _('show only added files')),
-          ('r', 'removed', None, _('show only removed files'))],
-         _('FILESPEC...'))
-def debugpathcomplete(ui, repo, *specs, **opts):
-    '''complete part or all of a tracked path
-
-    This command supports shells that offer path name completion. It
-    currently completes only files already known to the dirstate.
-
-    Completion extends only to the next path segment unless
-    --full is specified, in which case entire paths are used.'''
-
-    def complete(path, acceptable):
-        dirstate = repo.dirstate
-        spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
-        rootdir = repo.root + pycompat.ossep
-        if spec != repo.root and not spec.startswith(rootdir):
-            return [], []
-        if os.path.isdir(spec):
-            spec += '/'
-        spec = spec[len(rootdir):]
-        fixpaths = pycompat.ossep != '/'
-        if fixpaths:
-            spec = spec.replace(pycompat.ossep, '/')
-        speclen = len(spec)
-        fullpaths = opts['full']
-        files, dirs = set(), set()
-        adddir, addfile = dirs.add, files.add
-        for f, st in dirstate.iteritems():
-            if f.startswith(spec) and st[0] in acceptable:
-                if fixpaths:
-                    f = f.replace('/', pycompat.ossep)
-                if fullpaths:
-                    addfile(f)
-                    continue
-                s = f.find(pycompat.ossep, speclen)
-                if s >= 0:
-                    adddir(f[:s])
-                else:
-                    addfile(f)
-        return files, dirs
-
-    acceptable = ''
-    if opts['normal']:
-        acceptable += 'nm'
-    if opts['added']:
-        acceptable += 'a'
-    if opts['removed']:
-        acceptable += 'r'
-    cwd = repo.getcwd()
-    if not specs:
-        specs = ['.']
-
-    files, dirs = set(), set()
-    for spec in specs:
-        f, d = complete(spec, acceptable or 'nmar')
-        files.update(f)
-        dirs.update(d)
-    files.update(dirs)
-    ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
-    ui.write('\n')
-
-@command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
-def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
-    '''access the pushkey key/value protocol
-
-    With two args, list the keys in the given namespace.
-
-    With five args, set a key to new if it currently is set to old.
-    Reports success or failure.
-    '''
-
-    target = hg.peer(ui, {}, repopath)
-    if keyinfo:
-        key, old, new = keyinfo
-        r = target.pushkey(namespace, key, old, new)
-        ui.status(str(r) + '\n')
-        return not r
-    else:
-        for k, v in sorted(target.listkeys(namespace).iteritems()):
-            ui.write("%s\t%s\n" % (k.encode('string-escape'),
-                                   v.encode('string-escape')))
-
-@command('debugpvec', [], _('A B'))
-def debugpvec(ui, repo, a, b=None):
-    ca = scmutil.revsingle(repo, a)
-    cb = scmutil.revsingle(repo, b)
-    pa = pvec.ctxpvec(ca)
-    pb = pvec.ctxpvec(cb)
-    if pa == pb:
-        rel = "="
-    elif pa > pb:
-        rel = ">"
-    elif pa < pb:
-        rel = "<"
-    elif pa | pb:
-        rel = "|"
-    ui.write(_("a: %s\n") % pa)
-    ui.write(_("b: %s\n") % pb)
-    ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
-    ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
-             (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
-              pa.distance(pb), rel))
-
-@command('debugrebuilddirstate|debugrebuildstate',
-    [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
-     ('', 'minimal', None, _('only rebuild files that are inconsistent with '
-                             'the working copy parent')),
-    ],
-    _('[-r REV]'))
-def debugrebuilddirstate(ui, repo, rev, **opts):
-    """rebuild the dirstate as it would look like for the given revision
-
-    If no revision is specified the first current parent will be used.
-
-    The dirstate will be set to the files of the given revision.
-    The actual working directory content or existing dirstate
-    information such as adds or removes is not considered.
-
-    ``minimal`` will only rebuild the dirstate status for files that claim to be
-    tracked but are not in the parent manifest, or that exist in the parent
-    manifest but are not in the dirstate. It will not change adds, removes, or
-    modified files that are in the working copy parent.
-
-    One use of this command is to make the next :hg:`status` invocation
-    check the actual file content.
-    """
-    ctx = scmutil.revsingle(repo, rev)
-    with repo.wlock():
-        dirstate = repo.dirstate
-        changedfiles = None
-        # See command doc for what minimal does.
-        if opts.get('minimal'):
-            manifestfiles = set(ctx.manifest().keys())
-            dirstatefiles = set(dirstate)
-            manifestonly = manifestfiles - dirstatefiles
-            dsonly = dirstatefiles - manifestfiles
-            dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
-            changedfiles = manifestonly | dsnotadded
-
-        dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
-
-@command('debugrebuildfncache', [], '')
-def debugrebuildfncache(ui, repo):
-    """rebuild the fncache file"""
-    repair.rebuildfncache(ui, repo)
-
-@command('debugrename',
-    [('r', 'rev', '', _('revision to debug'), _('REV'))],
-    _('[-r REV] FILE'))
-def debugrename(ui, repo, file1, *pats, **opts):
-    """dump rename information"""
-
-    ctx = scmutil.revsingle(repo, opts.get('rev'))
-    m = scmutil.match(ctx, (file1,) + pats, opts)
-    for abs in ctx.walk(m):
-        fctx = ctx[abs]
-        o = fctx.filelog().renamed(fctx.filenode())
-        rel = m.rel(abs)
-        if o:
-            ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
-        else:
-            ui.write(_("%s not renamed\n") % rel)
-
-@command('debugrevlog', debugrevlogopts +
-    [('d', 'dump', False, _('dump index data'))],
-    _('-c|-m|FILE'),
-    optionalrepo=True)
-def debugrevlog(ui, repo, file_=None, **opts):
-    """show data and statistics about a revlog"""
-    r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
-
-    if opts.get("dump"):
-        numrevs = len(r)
-        ui.write(("# rev p1rev p2rev start   end deltastart base   p1   p2"
-                 " rawsize totalsize compression heads chainlen\n"))
-        ts = 0
-        heads = set()
-
-        for rev in xrange(numrevs):
-            dbase = r.deltaparent(rev)
-            if dbase == -1:
-                dbase = rev
-            cbase = r.chainbase(rev)
-            clen = r.chainlen(rev)
-            p1, p2 = r.parentrevs(rev)
-            rs = r.rawsize(rev)
-            ts = ts + rs
-            heads -= set(r.parentrevs(rev))
-            heads.add(rev)
-            try:
-                compression = ts / r.end(rev)
-            except ZeroDivisionError:
-                compression = 0
-            ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
-                     "%11d %5d %8d\n" %
-                     (rev, p1, p2, r.start(rev), r.end(rev),
-                      r.start(dbase), r.start(cbase),
-                      r.start(p1), r.start(p2),
-                      rs, ts, compression, len(heads), clen))
-        return 0
-
-    v = r.version
-    format = v & 0xFFFF
-    flags = []
-    gdelta = False
-    if v & revlog.REVLOGNGINLINEDATA:
-        flags.append('inline')
-    if v & revlog.REVLOGGENERALDELTA:
-        gdelta = True
-        flags.append('generaldelta')
-    if not flags:
-        flags = ['(none)']
-
-    nummerges = 0
-    numfull = 0
-    numprev = 0
-    nump1 = 0
-    nump2 = 0
-    numother = 0
-    nump1prev = 0
-    nump2prev = 0
-    chainlengths = []
-
-    datasize = [None, 0, 0]
-    fullsize = [None, 0, 0]
-    deltasize = [None, 0, 0]
-    chunktypecounts = {}
-    chunktypesizes = {}
-
-    def addsize(size, l):
-        if l[0] is None or size < l[0]:
-            l[0] = size
-        if size > l[1]:
-            l[1] = size
-        l[2] += size
-
-    numrevs = len(r)
-    for rev in xrange(numrevs):
-        p1, p2 = r.parentrevs(rev)
-        delta = r.deltaparent(rev)
-        if format > 0:
-            addsize(r.rawsize(rev), datasize)
-        if p2 != nullrev:
-            nummerges += 1
-        size = r.length(rev)
-        if delta == nullrev:
-            chainlengths.append(0)
-            numfull += 1
-            addsize(size, fullsize)
-        else:
-            chainlengths.append(chainlengths[delta] + 1)
-            addsize(size, deltasize)
-            if delta == rev - 1:
-                numprev += 1
-                if delta == p1:
-                    nump1prev += 1
-                elif delta == p2:
-                    nump2prev += 1
-            elif delta == p1:
-                nump1 += 1
-            elif delta == p2:
-                nump2 += 1
-            elif delta != nullrev:
-                numother += 1
-
-        # Obtain data on the raw chunks in the revlog.
-        chunk = r._chunkraw(rev, rev)[1]
-        if chunk:
-            chunktype = chunk[0]
-        else:
-            chunktype = 'empty'
-
-        if chunktype not in chunktypecounts:
-            chunktypecounts[chunktype] = 0
-            chunktypesizes[chunktype] = 0
-
-        chunktypecounts[chunktype] += 1
-        chunktypesizes[chunktype] += size
-
-    # Adjust size min value for empty cases
-    for size in (datasize, fullsize, deltasize):
-        if size[0] is None:
-            size[0] = 0
-
-    numdeltas = numrevs - numfull
-    numoprev = numprev - nump1prev - nump2prev
-    totalrawsize = datasize[2]
-    datasize[2] /= numrevs
-    fulltotal = fullsize[2]
-    fullsize[2] /= numfull
-    deltatotal = deltasize[2]
-    if numrevs - numfull > 0:
-        deltasize[2] /= numrevs - numfull
-    totalsize = fulltotal + deltatotal
-    avgchainlen = sum(chainlengths) / numrevs
-    maxchainlen = max(chainlengths)
-    compratio = 1
-    if totalsize:
-        compratio = totalrawsize / totalsize
-
-    basedfmtstr = '%%%dd\n'
-    basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
-
-    def dfmtstr(max):
-        return basedfmtstr % len(str(max))
-    def pcfmtstr(max, padding=0):
-        return basepcfmtstr % (len(str(max)), ' ' * padding)
-
-    def pcfmt(value, total):
-        if total:
-            return (value, 100 * float(value) / total)
-        else:
-            return value, 100.0
-
-    ui.write(('format : %d\n') % format)
-    ui.write(('flags  : %s\n') % ', '.join(flags))
-
-    ui.write('\n')
-    fmt = pcfmtstr(totalsize)
-    fmt2 = dfmtstr(totalsize)
-    ui.write(('revisions     : ') + fmt2 % numrevs)
-    ui.write(('    merges    : ') + fmt % pcfmt(nummerges, numrevs))
-    ui.write(('    normal    : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
-    ui.write(('revisions     : ') + fmt2 % numrevs)
-    ui.write(('    full      : ') + fmt % pcfmt(numfull, numrevs))
-    ui.write(('    deltas    : ') + fmt % pcfmt(numdeltas, numrevs))
-    ui.write(('revision size : ') + fmt2 % totalsize)
-    ui.write(('    full      : ') + fmt % pcfmt(fulltotal, totalsize))
-    ui.write(('    deltas    : ') + fmt % pcfmt(deltatotal, totalsize))
-
-    def fmtchunktype(chunktype):
-        if chunktype == 'empty':
-            return '    %s     : ' % chunktype
-        elif chunktype in string.ascii_letters:
-            return '    0x%s (%s)  : ' % (hex(chunktype), chunktype)
-        else:
-            return '    0x%s      : ' % hex(chunktype)
-
-    ui.write('\n')
-    ui.write(('chunks        : ') + fmt2 % numrevs)
-    for chunktype in sorted(chunktypecounts):
-        ui.write(fmtchunktype(chunktype))
-        ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
-    ui.write(('chunks size   : ') + fmt2 % totalsize)
-    for chunktype in sorted(chunktypecounts):
-        ui.write(fmtchunktype(chunktype))
-        ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
-
-    ui.write('\n')
-    fmt = dfmtstr(max(avgchainlen, compratio))
-    ui.write(('avg chain length  : ') + fmt % avgchainlen)
-    ui.write(('max chain length  : ') + fmt % maxchainlen)
-    ui.write(('compression ratio : ') + fmt % compratio)
-
-    if format > 0:
-        ui.write('\n')
-        ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
-                 % tuple(datasize))
-    ui.write(('full revision size (min/max/avg)     : %d / %d / %d\n')
-             % tuple(fullsize))
-    ui.write(('delta size (min/max/avg)             : %d / %d / %d\n')
-             % tuple(deltasize))
-
-    if numdeltas > 0:
-        ui.write('\n')
-        fmt = pcfmtstr(numdeltas)
-        fmt2 = pcfmtstr(numdeltas, 4)
-        ui.write(('deltas against prev  : ') + fmt % pcfmt(numprev, numdeltas))
-        if numprev > 0:
-            ui.write(('    where prev = p1  : ') + fmt2 % pcfmt(nump1prev,
-                                                              numprev))
-            ui.write(('    where prev = p2  : ') + fmt2 % pcfmt(nump2prev,
-                                                              numprev))
-            ui.write(('    other            : ') + fmt2 % pcfmt(numoprev,
-                                                              numprev))
-        if gdelta:
-            ui.write(('deltas against p1    : ')
-                     + fmt % pcfmt(nump1, numdeltas))
-            ui.write(('deltas against p2    : ')
-                     + fmt % pcfmt(nump2, numdeltas))
-            ui.write(('deltas against other : ') + fmt % pcfmt(numother,
-                                                             numdeltas))
-
-@command('debugrevspec',
-    [('', 'optimize', None,
-      _('print parsed tree after optimizing (DEPRECATED)')),
-     ('p', 'show-stage', [],
-      _('print parsed tree at the given stage'), _('NAME')),
-     ('', 'no-optimized', False, _('evaluate tree without optimization')),
-     ('', 'verify-optimized', False, _('verify optimized result')),
-     ],
-    ('REVSPEC'))
-def debugrevspec(ui, repo, expr, **opts):
-    """parse and apply a revision specification
-
-    Use -p/--show-stage option to print the parsed tree at the given stages.
-    Use -p all to print tree at every stage.
-
-    Use --verify-optimized to compare the optimized result with the unoptimized
-    one. Returns 1 if the optimized result differs.
-    """
-    stages = [
-        ('parsed', lambda tree: tree),
-        ('expanded', lambda tree: revset.expandaliases(ui, tree)),
-        ('concatenated', revset.foldconcat),
-        ('analyzed', revset.analyze),
-        ('optimized', revset.optimize),
-    ]
-    if opts['no_optimized']:
-        stages = stages[:-1]
-    if opts['verify_optimized'] and opts['no_optimized']:
-        raise error.Abort(_('cannot use --verify-optimized with '
-                            '--no-optimized'))
-    stagenames = set(n for n, f in stages)
-
-    showalways = set()
-    showchanged = set()
-    if ui.verbose and not opts['show_stage']:
-        # show parsed tree by --verbose (deprecated)
-        showalways.add('parsed')
-        showchanged.update(['expanded', 'concatenated'])
-        if opts['optimize']:
-            showalways.add('optimized')
-    if opts['show_stage'] and opts['optimize']:
-        raise error.Abort(_('cannot use --optimize with --show-stage'))
-    if opts['show_stage'] == ['all']:
-        showalways.update(stagenames)
-    else:
-        for n in opts['show_stage']:
-            if n not in stagenames:
-                raise error.Abort(_('invalid stage name: %s') % n)
-        showalways.update(opts['show_stage'])
-
-    treebystage = {}
-    printedtree = None
-    tree = revset.parse(expr, lookup=repo.__contains__)
-    for n, f in stages:
-        treebystage[n] = tree = f(tree)
-        if n in showalways or (n in showchanged and tree != printedtree):
-            if opts['show_stage'] or n != 'parsed':
-                ui.write(("* %s:\n") % n)
-            ui.write(revset.prettyformat(tree), "\n")
-            printedtree = tree
-
-    if opts['verify_optimized']:
-        arevs = revset.makematcher(treebystage['analyzed'])(repo)
-        brevs = revset.makematcher(treebystage['optimized'])(repo)
-        if ui.verbose:
-            ui.note(("* analyzed set:\n"), revset.prettyformatset(arevs), "\n")
-            ui.note(("* optimized set:\n"), revset.prettyformatset(brevs), "\n")
-        arevs = list(arevs)
-        brevs = list(brevs)
-        if arevs == brevs:
-            return 0
-        ui.write(('--- analyzed\n'), label='diff.file_a')
-        ui.write(('+++ optimized\n'), label='diff.file_b')
-        sm = difflib.SequenceMatcher(None, arevs, brevs)
-        for tag, alo, ahi, blo, bhi in sm.get_opcodes():
-            if tag in ('delete', 'replace'):
-                for c in arevs[alo:ahi]:
-                    ui.write('-%s\n' % c, label='diff.deleted')
-            if tag in ('insert', 'replace'):
-                for c in brevs[blo:bhi]:
-                    ui.write('+%s\n' % c, label='diff.inserted')
-            if tag == 'equal':
-                for c in arevs[alo:ahi]:
-                    ui.write(' %s\n' % c)
-        return 1
-
-    func = revset.makematcher(tree)
-    revs = func(repo)
-    if ui.verbose:
-        ui.note(("* set:\n"), revset.prettyformatset(revs), "\n")
-    for c in revs:
-        ui.write("%s\n" % c)
-
-@command('debugsetparents', [], _('REV1 [REV2]'))
-def debugsetparents(ui, repo, rev1, rev2=None):
-    """manually set the parents of the current working directory
-
-    This is useful for writing repository conversion tools, but should
-    be used with care. For example, neither the working directory nor the
-    dirstate is updated, so file status may be incorrect after running this
-    command.
-
-    Returns 0 on success.
-    """
-
-    r1 = scmutil.revsingle(repo, rev1).node()
-    r2 = scmutil.revsingle(repo, rev2, 'null').node()
-
-    with repo.wlock():
-        repo.setparents(r1, r2)
-
-@command('debugdirstate|debugstate',
-    [('', 'nodates', None, _('do not display the saved mtime')),
-    ('', 'datesort', None, _('sort by saved mtime'))],
-    _('[OPTION]...'))
-def debugstate(ui, repo, **opts):
-    """show the contents of the current dirstate"""
-
-    nodates = opts.get('nodates')
-    datesort = opts.get('datesort')
-
-    timestr = ""
-    if datesort:
-        keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
-    else:
-        keyfunc = None # sort by filename
-    for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
-        if ent[3] == -1:
-            timestr = 'unset               '
-        elif nodates:
-            timestr = 'set                 '
-        else:
-            timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
-                                    time.localtime(ent[3]))
-        if ent[1] & 0o20000:
-            mode = 'lnk'
-        else:
-            mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
-        ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
-    for f in repo.dirstate.copies():
-        ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
-
-@command('debugsub',
-    [('r', 'rev', '',
-     _('revision to check'), _('REV'))],
-    _('[-r REV] [REV]'))
-def debugsub(ui, repo, rev=None):
-    ctx = scmutil.revsingle(repo, rev, None)
-    for k, v in sorted(ctx.substate.items()):
-        ui.write(('path %s\n') % k)
-        ui.write((' source   %s\n') % v[0])
-        ui.write((' revision %s\n') % v[1])
-
-@command('debugsuccessorssets',
-    [],
-    _('[REV]'))
-def debugsuccessorssets(ui, repo, *revs):
-    """show set of successors for revision
-
-    A successors set of changeset A is a consistent group of revisions that
-    succeed A. It contains non-obsolete changesets only.
-
-    In most cases a changeset A has a single successors set containing a single
-    successor (changeset A replaced by A').
-
-    A changeset that is made obsolete with no successors are called "pruned".
-    Such changesets have no successors sets at all.
-
-    A changeset that has been "split" will have a successors set containing
-    more than one successor.
-
-    A changeset that has been rewritten in multiple different ways is called
-    "divergent". Such changesets have multiple successor sets (each of which
-    may also be split, i.e. have multiple successors).
-
-    Results are displayed as follows::
-
-        <rev1>
-            <successors-1A>
-        <rev2>
-            <successors-2A>
-            <successors-2B1> <successors-2B2> <successors-2B3>
-
-    Here rev2 has two possible (i.e. divergent) successors sets. The first
-    holds one element, whereas the second holds three (i.e. the changeset has
-    been split).
-    """
-    # passed to successorssets caching computation from one call to another
-    cache = {}
-    ctx2str = str
-    node2str = short
-    if ui.debug():
-        def ctx2str(ctx):
-            return ctx.hex()
-        node2str = hex
-    for rev in scmutil.revrange(repo, revs):
-        ctx = repo[rev]
-        ui.write('%s\n'% ctx2str(ctx))
-        for succsset in obsolete.successorssets(repo, ctx.node(), cache):
-            if succsset:
-                ui.write('    ')
-                ui.write(node2str(succsset[0]))
-                for node in succsset[1:]:
-                    ui.write(' ')
-                    ui.write(node2str(node))
-            ui.write('\n')
-
-@command('debugtemplate',
-    [('r', 'rev', [], _('apply template on changesets'), _('REV')),
-     ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
-    _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
-    optionalrepo=True)
-def debugtemplate(ui, repo, tmpl, **opts):
-    """parse and apply a template
-
-    If -r/--rev is given, the template is processed as a log template and
-    applied to the given changesets. Otherwise, it is processed as a generic
-    template.
-
-    Use --verbose to print the parsed tree.
-    """
-    revs = None
-    if opts['rev']:
-        if repo is None:
-            raise error.RepoError(_('there is no Mercurial repository here '
-                                    '(.hg not found)'))
-        revs = scmutil.revrange(repo, opts['rev'])
-
-    props = {}
-    for d in opts['define']:
-        try:
-            k, v = (e.strip() for e in d.split('=', 1))
-            if not k:
-                raise ValueError
-            props[k] = v
-        except ValueError:
-            raise error.Abort(_('malformed keyword definition: %s') % d)
-
-    if ui.verbose:
-        aliases = ui.configitems('templatealias')
-        tree = templater.parse(tmpl)
-        ui.note(templater.prettyformat(tree), '\n')
-        newtree = templater.expandaliases(tree, aliases)
-        if newtree != tree:
-            ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
-
-    mapfile = None
-    if revs is None:
-        k = 'debugtemplate'
-        t = formatter.maketemplater(ui, k, tmpl)
-        ui.write(templater.stringify(t(k, **props)))
-    else:
-        displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
-                                                mapfile, buffered=False)
-        for r in revs:
-            displayer.show(repo[r], **props)
-        displayer.close()
-
-@command('debugwalk', walkopts, _('[OPTION]... [FILE]...'), inferrepo=True)
-def debugwalk(ui, repo, *pats, **opts):
-    """show how files match on given patterns"""
-    m = scmutil.match(repo[None], pats, opts)
-    items = list(repo.walk(m))
-    if not items:
-        return
-    f = lambda fn: fn
-    if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
-        f = lambda fn: util.normpath(fn)
-    fmt = 'f  %%-%ds  %%-%ds  %%s' % (
-        max([len(abs) for abs in items]),
-        max([len(m.rel(abs)) for abs in items]))
-    for abs in items:
-        line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
-        ui.write("%s\n" % line.rstrip())
-
-@command('debugwireargs',
-    [('', 'three', '', 'three'),
-    ('', 'four', '', 'four'),
-    ('', 'five', '', 'five'),
-    ] + remoteopts,
-    _('REPO [OPTIONS]... [ONE [TWO]]'),
-    norepo=True)
-def debugwireargs(ui, repopath, *vals, **opts):
-    repo = hg.peer(ui, opts, repopath)
-    for opt in remoteopts:
-        del opts[opt[1]]
-    args = {}
-    for k, v in opts.iteritems():
-        if v:
-            args[k] = v
-    # run twice to check that we don't mess up the stream for the next command
-    res1 = repo.debugwireargs(*vals, **args)
-    res2 = repo.debugwireargs(*vals, **args)
-    ui.write("%s\n" % res1)
-    if res1 != res2:
-        ui.warn("%s\n" % res2)
-
 @command('^diff',
     [('r', 'rev', [], _('revision'), _('REV')),
     ('c', 'change', '', _('change made by revision'), _('REV'))
@@ -3119,6 +1957,7 @@
 
     diffopts = patch.diffallopts(ui, opts)
     m = scmutil.match(repo[node2], pats, opts)
+    ui.pager('diff')
     cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
                            listsubrepos=opts.get('subrepos'),
                            root=opts.get('root'))
@@ -3190,6 +2029,7 @@
 
     Returns 0 on success.
     """
+    opts = pycompat.byteskwargs(opts)
     changesets += tuple(opts.get('rev', []))
     if not changesets:
         changesets = ['.']
@@ -3200,6 +2040,7 @@
         ui.note(_('exporting patches:\n'))
     else:
         ui.note(_('exporting patch:\n'))
+    ui.pager('export')
     cmdutil.export(repo, revs, template=opts.get('output'),
                  switch_parent=opts.get('switch_parent'),
                  opts=patch.diffallopts(ui, opts))
@@ -3253,7 +2094,7 @@
     Returns 0 if a match is found, 1 otherwise.
 
     """
-    ctx = scmutil.revsingle(repo, opts.get('rev'), None)
+    ctx = scmutil.revsingle(repo, opts.get(r'rev'), None)
 
     end = '\n'
     if opts.get('print0'):
@@ -3261,6 +2102,7 @@
     fmt = '%s' + end
 
     m = scmutil.match(ctx, pats, opts)
+    ui.pager('files')
     with ui.formatter('files', opts) as fm:
         return cmdutil.files(ui, ctx, m, fm, fmt, opts.get('subrepos'))
 
@@ -3552,7 +2394,7 @@
 
     # remove state when we complete successfully
     if not opts.get('dry_run'):
-        util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
+        repo.vfs.unlinkpath('graftstate', ignoremissing=True)
 
     return 0
 
@@ -3782,6 +2624,7 @@
                 except error.LookupError:
                     pass
 
+    ui.pager('grep')
     fm = ui.formatter('grep', opts)
     for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
         rev = ctx.rev()
@@ -3872,6 +2715,7 @@
     if not heads:
         return 1
 
+    ui.pager('heads')
     heads = sorted(heads, key=lambda x: -x.rev())
     displayer = cmdutil.show_changeset(ui, repo, opts)
     for ctx in heads:
@@ -3897,11 +2741,6 @@
     Returns 0 if successful.
     """
 
-    textwidth = ui.configint('ui', 'textwidth', 78)
-    termwidth = ui.termwidth() - 2
-    if textwidth <= 0 or termwidth < textwidth:
-        textwidth = termwidth
-
     keep = opts.get('system') or []
     if len(keep) == 0:
         if pycompat.sysplatform.startswith('win'):
@@ -3916,36 +2755,8 @@
     if ui.verbose:
         keep.append('verbose')
 
-    section = None
-    subtopic = None
-    if name and '.' in name:
-        name, remaining = name.split('.', 1)
-        remaining = encoding.lower(remaining)
-        if '.' in remaining:
-            subtopic, section = remaining.split('.', 1)
-        else:
-            if name in help.subtopics:
-                subtopic = remaining
-            else:
-                section = remaining
-
-    text = help.help_(ui, name, subtopic=subtopic, **opts)
-
-    formatted, pruned = minirst.format(text, textwidth, keep=keep,
-                                       section=section)
-
-    # We could have been given a weird ".foo" section without a name
-    # to look for, or we could have simply failed to found "foo.bar"
-    # because bar isn't a section of foo
-    if section and not (formatted and name):
-        raise error.Abort(_("help section not found"))
-
-    if 'verbose' in pruned:
-        keep.append('omitted')
-    else:
-        keep.append('notomitted')
-    formatted, pruned = minirst.format(text, textwidth, keep=keep,
-                                       section=section)
+    formatted = help.formattedhelp(ui, name, keep=keep, **opts)
+    ui.pager('help')
     ui.write(formatted)
 
 
@@ -4127,8 +2938,9 @@
     Import a list of patches and commit them individually (unless
     --no-commit is specified).
 
-    To read a patch from standard input, use "-" as the patch name. If
-    a URL is specified, the patch will be downloaded from there.
+    To read a patch from standard input (stdin), use "-" as the patch
+    name. If a URL is specified, the patch will be downloaded from
+    there.
 
     Import first applies changes to the working directory (unless
     --bypass is specified), import will abort if there are outstanding
@@ -4198,6 +3010,10 @@
 
           hg import incoming-patches.mbox
 
+      - import patches from stdin::
+
+          hg import -
+
       - attempt to exactly restore an exported changeset (not always
         possible)::
 
@@ -4372,6 +3188,7 @@
 
     Returns 0 if there are incoming changes, 1 otherwise.
     """
+    opts = pycompat.byteskwargs(opts)
     if opts.get('graph'):
         cmdutil.checkunsupportedgraphflags([], opts)
         def display(other, chlist, displayer):
@@ -4392,6 +3209,7 @@
         if 'bookmarks' not in other.listkeys('namespaces'):
             ui.warn(_("remote doesn't support bookmarks\n"))
             return 0
+        ui.pager('incoming')
         ui.status(_('comparing with %s\n') % util.hidepassword(source))
         return bookmarks.incoming(ui, repo, other)
 
@@ -4458,6 +3276,7 @@
     m = scmutil.match(ctx, pats, opts, default='relglob',
                       badfn=lambda x, y: False)
 
+    ui.pager('locate')
     for abs in ctx.matches(m):
         if opts.get('fullpath'):
             ui.write(repo.wjoin(abs), end)
@@ -4588,12 +3407,13 @@
     Returns 0 on success.
 
     """
+    opts = pycompat.byteskwargs(opts)
     if opts.get('follow') and opts.get('rev'):
-        opts['rev'] = [revset.formatspec('reverse(::%lr)', opts.get('rev'))]
+        opts['rev'] = [revsetlang.formatspec('reverse(::%lr)', opts.get('rev'))]
         del opts['follow']
 
     if opts.get('graph'):
-        return cmdutil.graphlog(ui, repo, *pats, **opts)
+        return cmdutil.graphlog(ui, repo, pats, opts)
 
     revs, expr, filematcher = cmdutil.getlogrevs(repo, pats, opts)
     limit = cmdutil.loglimit(opts)
@@ -4606,6 +3426,7 @@
             endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
         getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
 
+    ui.pager('log')
     displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
     for rev in revs:
         if count == limit:
@@ -4648,7 +3469,6 @@
 
     Returns 0 on success.
     """
-
     fm = ui.formatter('manifest', opts)
 
     if opts.get('all'):
@@ -4664,6 +3484,7 @@
             for fn, b, size in repo.store.datafiles():
                 if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix:
                     res.append(fn[plen:-slen])
+        ui.pager('manifest')
         for f in res:
             fm.startitem()
             fm.write("path", '%s\n', f)
@@ -4680,6 +3501,7 @@
     mode = {'l': '644', 'x': '755', '': '644'}
     ctx = scmutil.revsingle(repo, node)
     mf = ctx.manifest()
+    ui.pager('manifest')
     for f in ctx:
         fm.startitem()
         fl = ctx[f].flags()
@@ -4812,6 +3634,7 @@
             return
 
         revdag = cmdutil.graphrevs(repo, o, opts)
+        ui.pager('outgoing')
         displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
         cmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges)
         cmdutil.outgoinghooks(ui, repo, other, opts, o)
@@ -4825,6 +3648,7 @@
             ui.warn(_("remote doesn't support bookmarks\n"))
             return 0
         ui.status(_('comparing with %s\n') % util.hidepassword(dest))
+        ui.pager('outgoing')
         return bookmarks.outgoing(ui, repo, other)
 
     repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default')
@@ -4921,6 +3745,7 @@
 
     Returns 0 on success.
     """
+    ui.pager('paths')
     if search:
         pathitems = [(name, path) for name, path in ui.paths.iteritems()
                      if name == search]
@@ -5113,6 +3938,12 @@
 
     Returns 0 on success, 1 if an update had unresolved files.
     """
+
+    if ui.configbool('commands', 'update.requiredest') and opts.get('update'):
+        msg = _('update destination required by configuration')
+        hint = _('use hg pull followed by hg update DEST')
+        raise error.Abort(msg, hint=hint)
+
     source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
     ui.status(_('pulling from %s\n') % util.hidepassword(source))
     other = hg.peer(repo, opts, source)
@@ -5268,7 +4099,7 @@
     elif path.pushrev:
         # It doesn't make any sense to specify ancestor revisions. So limit
         # to DAG heads to make discovery simpler.
-        expr = revset.formatspec('heads(%r)', path.pushrev)
+        expr = revsetlang.formatspec('heads(%r)', path.pushrev)
         revs = scmutil.revrange(repo, [expr])
         revs = [repo[rev].node() for rev in revs]
         if not revs:
@@ -5434,6 +4265,8 @@
 
     - :hg:`resolve -l`: list files which had or still have conflicts.
       In the printed list, ``U`` = unresolved and ``R`` = resolved.
+      You can use ``set:unresolved()`` or ``set:resolved()`` to filter
+      the list. See :hg:`help filesets` for details.
 
     .. note::
 
@@ -5457,6 +4290,7 @@
                          hint=('use --all to re-merge all unresolved files'))
 
     if show:
+        ui.pager('resolve')
         fm = ui.formatter('resolve', opts)
         ms = mergemod.mergestate.read(repo)
         m = scmutil.match(repo[None], pats, opts)
@@ -5780,12 +4614,13 @@
     ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
      _('FILE')),
     ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
-    ('', 'stdio', None, _('for remote clients')),
-    ('', 'cmdserver', '', _('for remote clients'), _('MODE')),
+    ('', 'stdio', None, _('for remote clients (ADVANCED)')),
+    ('', 'cmdserver', '', _('for remote clients (ADVANCED)'), _('MODE')),
     ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
     ('', 'style', '', _('template style to use'), _('STYLE')),
     ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
-    ('', 'certificate', '', _('SSL certificate file'), _('FILE'))],
+    ('', 'certificate', '', _('SSL certificate file'), _('FILE'))]
+     + subrepoopts,
     _('[OPTION]...'),
     optionalrepo=True)
 def serve(ui, repo, **opts):
@@ -5904,6 +4739,7 @@
     Returns 0 on success.
     """
 
+    opts = pycompat.byteskwargs(opts)
     revs = opts.get('rev')
     change = opts.get('change')
 
@@ -5916,7 +4752,7 @@
     else:
         node1, node2 = scmutil.revpair(repo, revs)
 
-    if pats:
+    if pats or ui.configbool('commands', 'status.relative'):
         cwd = repo.getcwd()
     else:
         cwd = ''
@@ -5940,12 +4776,13 @@
     stat = repo.status(node1, node2, m,
                        'ignored' in show, 'clean' in show, 'unknown' in show,
                        opts.get('subrepos'))
-    changestates = zip(states, 'MAR!?IC', stat)
+    changestates = zip(states, pycompat.iterbytestr('MAR!?IC'), stat)
 
     if (opts.get('all') or opts.get('copies')
         or ui.configbool('ui', 'statuscopies')) and not opts.get('no_status'):
         copy = copies.pathcopies(repo[node1], repo[node2], m)
 
+    ui.pager('status')
     fm = ui.formatter('status', opts)
     fmt = '%s' + end
     showchar = not opts.get('no_status')
@@ -5976,6 +4813,7 @@
     Returns 0 on success.
     """
 
+    ui.pager('summary')
     ctx = repo[None]
     parents = ctx.parents()
     pnode = parents[0].node()
@@ -5996,7 +4834,7 @@
         # label with log.changeset (instead of log.parent) since this
         # shows a working directory parent *changeset*:
         # i18n: column positioning for "hg summary"
-        ui.write(_('parent: %d:%s ') % (p.rev(), str(p)),
+        ui.write(_('parent: %d:%s ') % (p.rev(), p),
                  label=cmdutil._changesetlabels(p))
         ui.write(' '.join(p.tags()), label='log.tag')
         if p.bookmarks():
@@ -6006,6 +4844,8 @@
                 ui.write(_(' (empty repository)'))
             else:
                 ui.write(_(' (no revision checked out)'))
+        if p.obsolete():
+            ui.write(_(' (obsolete)'))
         if p.troubled():
             ui.write(' ('
                      + ', '.join(ui.label(trouble, 'trouble.%s' % trouble)
@@ -6352,8 +5192,8 @@
             scmutil.revsingle(repo, rev_).rev() == nullrev):
             raise error.Abort(_("cannot tag null revision"))
 
-        repo.tag(names, r, message, opts.get('local'), opts.get('user'), date,
-                 editor=editor)
+        tagsmod.tag(repo, names, r, message, opts.get('local'),
+                    opts.get('user'), date, editor=editor)
     finally:
         release(lock, wlock)
 
@@ -6368,6 +5208,7 @@
     Returns 0 on success.
     """
 
+    ui.pager('tags')
     fm = ui.formatter('tags', opts)
     hexfunc = fm.hexfunc
     tagtype = ""
@@ -6420,10 +5261,9 @@
      _('update to new branch head if changesets were unbundled'))],
     _('[-u] FILE...'))
 def unbundle(ui, repo, fname1, *fnames, **opts):
-    """apply one or more changegroup files
-
-    Apply one or more compressed changegroup files generated by the
-    bundle command.
+    """apply one or more bundle files
+
+    Apply one or more bundle files generated by :hg:`bundle`.
 
     Returns 0 on success, 1 if an update has unresolved files.
     """
@@ -6464,12 +5304,13 @@
 @command('^update|up|checkout|co',
     [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
     ('c', 'check', None, _('require clean working directory')),
+    ('m', 'merge', None, _('merge uncommitted changes')),
     ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
     ('r', 'rev', '', _('revision'), _('REV'))
      ] + mergetoolopts,
-    _('[-c] [-C] [-d DATE] [[-r] REV]'))
+    _('[-C|-c|-m] [-d DATE] [[-r] REV]'))
 def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False,
-           tool=None):
+           merge=None, tool=None):
     """update working directory (or switch revisions)
 
     Update the repository's working directory to the specified
@@ -6488,10 +5329,11 @@
 
     .. container:: verbose
 
-      The following rules apply when the working directory contains
-      uncommitted changes:
-
-      1. If neither -c/--check nor -C/--clean is specified, and if
+      The -C/--clean, -c/--check, and -m/--merge options control what
+      happens if the working directory contains uncommitted changes.
+      At most of one of them can be specified.
+
+      1. If no option is specified, and if
          the requested changeset is an ancestor or descendant of
          the working directory's parent, the uncommitted changes
          are merged into the requested changeset and the merged
@@ -6500,10 +5342,14 @@
          branch), the update is aborted and the uncommitted changes
          are preserved.
 
-      2. With the -c/--check option, the update is aborted and the
+      2. With the -m/--merge option, the update is allowed even if the
+         requested changeset is not an ancestor or descendant of
+         the working directory's parent.
+
+      3. With the -c/--check option, the update is aborted and the
          uncommitted changes are preserved.
 
-      3. With the -C/--clean option, uncommitted changes are discarded and
+      4. With the -C/--clean option, uncommitted changes are discarded and
          the working directory is updated to the requested changeset.
 
     To cancel an uncommitted merge (and lose your changes), use
@@ -6522,14 +5368,26 @@
     if rev and node:
         raise error.Abort(_("please specify just one revision"))
 
+    if ui.configbool('commands', 'update.requiredest'):
+        if not node and not rev and not date:
+            raise error.Abort(_('you must specify a destination'),
+                              hint=_('for example: hg update ".::"'))
+
     if rev is None or rev == '':
         rev = node
 
     if date and rev is not None:
         raise error.Abort(_("you can't specify a revision and a date"))
 
-    if check and clean:
-        raise error.Abort(_("cannot specify both -c/--check and -C/--clean"))
+    if len([x for x in (clean, check, merge) if x]) > 1:
+        raise error.Abort(_("can only specify one of -C/--clean, -c/--check, "
+                            "or -m/merge"))
+
+    updatecheck = None
+    if check:
+        updatecheck = 'abort'
+    elif merge:
+        updatecheck = 'none'
 
     with repo.wlock():
         cmdutil.clearunfinished(repo)
@@ -6541,12 +5399,10 @@
         brev = rev
         rev = scmutil.revsingle(repo, rev, rev).rev()
 
-        if check:
-            cmdutil.bailifchanged(repo, merge=False)
-
         repo.ui.setconfig('ui', 'forcemerge', tool, 'update')
 
-        return hg.updatetotally(ui, repo, rev, brev, clean=clean, check=check)
+        return hg.updatetotally(ui, repo, rev, brev, clean=clean,
+                                updatecheck=updatecheck)
 
 @command('verify', [])
 def verify(ui, repo):
@@ -6570,6 +5426,8 @@
 @command('version', [] + formatteropts, norepo=True)
 def version_(ui, **opts):
     """output version and copyright information"""
+    if ui.verbose:
+        ui.pager('version')
     fm = ui.formatter("version", opts)
     fm.startitem()
     fm.write("ver", _("Mercurial Distributed SCM (version %s)\n"),
--- a/mercurial/commandserver.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/commandserver.py	Tue Apr 18 12:24:34 2017 -0400
@@ -304,8 +304,8 @@
     ui.flush()
     newfiles = []
     nullfd = os.open(os.devnull, os.O_RDWR)
-    for f, sysf, mode in [(ui.fin, util.stdin, 'rb'),
-                          (ui.fout, util.stdout, 'wb')]:
+    for f, sysf, mode in [(ui.fin, util.stdin, pycompat.sysstr('rb')),
+                          (ui.fout, util.stdout, pycompat.sysstr('wb'))]:
         if f is sysf:
             newfd = os.dup(f.fileno())
             os.dup2(nullfd, f.fileno())
@@ -447,6 +447,7 @@
         self._sock = None
         self._oldsigchldhandler = None
         self._workerpids = set()  # updated by signal handler; do not iterate
+        self._socketunlinked = None
 
     def init(self):
         self._sock = socket.socket(socket.AF_UNIX)
@@ -455,11 +456,17 @@
         o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
         self._oldsigchldhandler = o
         self._servicehandler.printbanner(self.address)
+        self._socketunlinked = False
+
+    def _unlinksocket(self):
+        if not self._socketunlinked:
+            self._servicehandler.unlinksocket(self.address)
+            self._socketunlinked = True
 
     def _cleanup(self):
         signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
         self._sock.close()
-        self._servicehandler.unlinksocket(self.address)
+        self._unlinksocket()
         # don't kill child processes as they have active clients, just wait
         self._reapworkers(0)
 
@@ -470,11 +477,23 @@
             self._cleanup()
 
     def _mainloop(self):
+        exiting = False
         h = self._servicehandler
-        while not h.shouldexit():
+        while True:
+            if not exiting and h.shouldexit():
+                # clients can no longer connect() to the domain socket, so
+                # we stop queuing new requests.
+                # for requests that are queued (connect()-ed, but haven't been
+                # accept()-ed), handle them before exit. otherwise, clients
+                # waiting for recv() will receive ECONNRESET.
+                self._unlinksocket()
+                exiting = True
             try:
                 ready = select.select([self._sock], [], [], h.pollinterval)[0]
                 if not ready:
+                    # only exit if we completed all queued requests
+                    if exiting:
+                        break
                     continue
                 conn, _addr = self._sock.accept()
             except (select.error, socket.error) as inst:
--- a/mercurial/config.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/config.py	Tue Apr 18 12:24:34 2017 -0400
@@ -13,15 +13,16 @@
 from .i18n import _
 from . import (
     error,
+    pycompat,
     util,
 )
 
 class config(object):
-    def __init__(self, data=None, includepaths=[]):
+    def __init__(self, data=None, includepaths=None):
         self._data = {}
         self._source = {}
         self._unset = []
-        self._includepaths = includepaths
+        self._includepaths = includepaths or []
         if data:
             for k in data._data:
                 self._data[k] = data[k].copy()
@@ -69,6 +70,9 @@
     def items(self, section):
         return self._data.get(section, {}).items()
     def set(self, section, item, value, source=""):
+        if pycompat.ispy3:
+            assert not isinstance(value, str), (
+                'config values may not be unicode strings on Python 3')
         if section not in self:
             self._data[section] = util.sortdict()
         self._data[section][item] = value
@@ -169,5 +173,92 @@
 
     def read(self, path, fp=None, sections=None, remap=None):
         if not fp:
-            fp = util.posixfile(path)
-        self.parse(path, fp.read(), sections, remap, self.read)
+            fp = util.posixfile(path, 'rb')
+        assert getattr(fp, 'mode', r'rb') == r'rb', (
+            'config files must be opened in binary mode, got fp=%r mode=%r' % (
+                fp, fp.mode))
+        self.parse(path, fp.read(),
+                   sections=sections, remap=remap, include=self.read)
+
+def parselist(value):
+    """parse a configuration value as a list of comma/space separated strings
+
+    >>> parselist('this,is "a small" ,test')
+    ['this', 'is', 'a small', 'test']
+    """
+
+    def _parse_plain(parts, s, offset):
+        whitespace = False
+        while offset < len(s) and (s[offset:offset + 1].isspace()
+                                   or s[offset:offset + 1] == ','):
+            whitespace = True
+            offset += 1
+        if offset >= len(s):
+            return None, parts, offset
+        if whitespace:
+            parts.append('')
+        if s[offset:offset + 1] == '"' and not parts[-1]:
+            return _parse_quote, parts, offset + 1
+        elif s[offset:offset + 1] == '"' and parts[-1][-1] == '\\':
+            parts[-1] = parts[-1][:-1] + s[offset:offset + 1]
+            return _parse_plain, parts, offset + 1
+        parts[-1] += s[offset:offset + 1]
+        return _parse_plain, parts, offset + 1
+
+    def _parse_quote(parts, s, offset):
+        if offset < len(s) and s[offset:offset + 1] == '"': # ""
+            parts.append('')
+            offset += 1
+            while offset < len(s) and (s[offset:offset + 1].isspace() or
+                    s[offset:offset + 1] == ','):
+                offset += 1
+            return _parse_plain, parts, offset
+
+        while offset < len(s) and s[offset:offset + 1] != '"':
+            if (s[offset:offset + 1] == '\\' and offset + 1 < len(s)
+                    and s[offset + 1:offset + 2] == '"'):
+                offset += 1
+                parts[-1] += '"'
+            else:
+                parts[-1] += s[offset:offset + 1]
+            offset += 1
+
+        if offset >= len(s):
+            real_parts = _configlist(parts[-1])
+            if not real_parts:
+                parts[-1] = '"'
+            else:
+                real_parts[0] = '"' + real_parts[0]
+                parts = parts[:-1]
+                parts.extend(real_parts)
+            return None, parts, offset
+
+        offset += 1
+        while offset < len(s) and s[offset:offset + 1] in [' ', ',']:
+            offset += 1
+
+        if offset < len(s):
+            if offset + 1 == len(s) and s[offset:offset + 1] == '"':
+                parts[-1] += '"'
+                offset += 1
+            else:
+                parts.append('')
+        else:
+            return None, parts, offset
+
+        return _parse_plain, parts, offset
+
+    def _configlist(s):
+        s = s.rstrip(' ,')
+        if not s:
+            return []
+        parser, parts, offset = _parse_plain, [''], 0
+        while parser:
+            parser, parts, offset = parser(parts, s, offset)
+        return parts
+
+    if value is not None and isinstance(value, bytes):
+        result = _configlist(value.lstrip(' ,\n'))
+    else:
+        result = value
+    return result or []
--- a/mercurial/context.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/context.py	Tue Apr 18 12:24:34 2017 -0400
@@ -18,11 +18,11 @@
     bin,
     hex,
     modifiednodeid,
-    newnodeid,
     nullid,
     nullrev,
     short,
     wdirid,
+    wdirnodes,
 )
 from . import (
     encoding,
@@ -33,6 +33,7 @@
     obsolete as obsmod,
     patch,
     phases,
+    pycompat,
     repoview,
     revlog,
     scmutil,
@@ -64,6 +65,12 @@
         return o
 
     def __str__(self):
+        r = short(self.node())
+        if pycompat.ispy3:
+            return r.decode('ascii')
+        return r
+
+    def __bytes__(self):
         return short(self.node())
 
     def __int__(self):
@@ -90,14 +97,11 @@
     def __iter__(self):
         return iter(self._manifest)
 
-    def _manifestmatches(self, match, s):
-        """generate a new manifest filtered by the match argument
-
-        This method is for internal use only and mainly exists to provide an
-        object oriented way for other contexts to customize the manifest
-        generation.
-        """
-        return self.manifest().matches(match)
+    def _buildstatusmanifest(self, status):
+        """Builds a manifest that includes the given status results, if this is
+        a working copy context. For non-working copy contexts, it just returns
+        the normal manifest."""
+        return self.manifest()
 
     def _matchstatus(self, other, match):
         """return match.always if match is none
@@ -116,17 +120,19 @@
         # 1000 and cache it so that when you read 1001, we just need to apply a
         # delta to what's in the cache. So that's one full reconstruction + one
         # delta application.
+        mf2 = None
         if self.rev() is not None and self.rev() < other.rev():
-            self.manifest()
-        mf1 = other._manifestmatches(match, s)
-        mf2 = self._manifestmatches(match, s)
+            mf2 = self._buildstatusmanifest(s)
+        mf1 = other._buildstatusmanifest(s)
+        if mf2 is None:
+            mf2 = self._buildstatusmanifest(s)
 
         modified, added = [], []
         removed = []
         clean = []
         deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
         deletedset = set(deleted)
-        d = mf1.diff(mf2, clean=listclean)
+        d = mf1.diff(mf2, match=match, clean=listclean)
         for fn, value in d.iteritems():
             if fn in deletedset:
                 continue
@@ -140,7 +146,7 @@
                 removed.append(fn)
             elif flag1 != flag2:
                 modified.append(fn)
-            elif node2 != newnodeid:
+            elif node2 not in wdirnodes:
                 # When comparing files between two commits, we save time by
                 # not comparing the file contents when the nodeids differ.
                 # Note that this means we incorrectly report a reverted change
@@ -153,8 +159,10 @@
 
         if removed:
             # need to filter files if they are already reported as removed
-            unknown = [fn for fn in unknown if fn not in mf1]
-            ignored = [fn for fn in ignored if fn not in mf1]
+            unknown = [fn for fn in unknown if fn not in mf1 and
+                                               (not match or match(fn))]
+            ignored = [fn for fn in ignored if fn not in mf1 and
+                                               (not match or match(fn))]
             # if they're deleted, don't report them as removed
             removed = [fn for fn in removed if fn not in deletedset]
 
@@ -290,8 +298,10 @@
         '''
         return subrepo.subrepo(self, path, allowwdir=True)
 
-    def match(self, pats=[], include=None, exclude=None, default='glob',
+    def match(self, pats=None, include=None, exclude=None, default='glob',
               listsubrepos=False, badfn=None):
+        if pats is None:
+            pats = []
         r = self._repo
         return matchmod.match(r.root, r.getcwd(), pats,
                               include, exclude, default,
@@ -397,6 +407,19 @@
                  date, extra, editor)
     return ctx
 
+def _filterederror(repo, changeid):
+    """build an exception to be raised about a filtered changeid
+
+    This is extracted in a function to help extensions (eg: evolve) to
+    experiment with various message variants."""
+    if repo.filtername.startswith('visible'):
+        msg = _("hidden revision '%s'") % changeid
+        hint = _('use --hidden to access hidden revisions')
+        return error.FilteredRepoLookupError(msg, hint=hint)
+    msg = _("filtered revision '%s' (not in '%s' subset)")
+    msg %= (changeid, repo.filtername)
+    return error.FilteredRepoLookupError(msg)
+
 class changectx(basectx):
     """A changecontext object makes access to data related to a particular
     changeset convenient. It represents a read-only context already present in
@@ -418,7 +441,7 @@
                 self._node = repo.changelog.node(changeid)
                 self._rev = changeid
                 return
-            if isinstance(changeid, long):
+            if not pycompat.ispy3 and isinstance(changeid, long):
                 changeid = str(changeid)
             if changeid == 'null':
                 self._node = nullid
@@ -446,7 +469,7 @@
 
             try:
                 r = int(changeid)
-                if str(r) != changeid:
+                if '%d' % r != changeid:
                     raise ValueError
                 l = len(repo.changelog)
                 if r < 0:
@@ -503,13 +526,7 @@
                 pass
         except (error.FilteredIndexError, error.FilteredLookupError,
                 error.FilteredRepoLookupError):
-            if repo.filtername.startswith('visible'):
-                msg = _("hidden revision '%s'") % changeid
-                hint = _('use --hidden to access hidden revisions')
-                raise error.FilteredRepoLookupError(msg, hint=hint)
-            msg = _("filtered revision '%s' (not in '%s' subset)")
-            msg %= (changeid, repo.filtername)
-            raise error.FilteredRepoLookupError(msg)
+            raise _filterederror(repo, changeid)
         except IndexError:
             pass
         raise error.RepoLookupError(
@@ -524,6 +541,8 @@
     def __nonzero__(self):
         return self._rev != nullrev
 
+    __bool__ = __nonzero__
+
     @propertycache
     def _changeset(self):
         return self._repo.changelog.changelogrevision(self.rev())
@@ -712,6 +731,8 @@
             # file is missing
             return False
 
+    __bool__ = __nonzero__
+
     def __str__(self):
         try:
             return "%s@%s" % (self.path(), self._changectx)
@@ -1166,7 +1187,7 @@
     diffinrange = any(stype == '!' for _, stype in filteredblocks)
     return diffinrange, linerange1
 
-def blockancestors(fctx, fromline, toline):
+def blockancestors(fctx, fromline, toline, followfirst=False):
     """Yield ancestors of `fctx` with respect to the block of lines within
     `fromline`-`toline` range.
     """
@@ -1175,9 +1196,11 @@
     while visit:
         c, linerange2 = visit.pop(max(visit))
         pl = c.parents()
+        if followfirst:
+            pl = pl[:1]
         if not pl:
             # The block originates from the initial revision.
-            yield c
+            yield c, linerange2
             continue
         inrange = False
         for p in pl:
@@ -1190,7 +1213,45 @@
                 continue
             visit[p.linkrev(), p.filenode()] = p, linerange1
         if inrange:
-            yield c
+            yield c, linerange2
+
+def blockdescendants(fctx, fromline, toline):
+    """Yield descendants of `fctx` with respect to the block of lines within
+    `fromline`-`toline` range.
+    """
+    # First possibly yield 'fctx' if it has changes in range with respect to
+    # its parents.
+    try:
+        c, linerange1 = next(blockancestors(fctx, fromline, toline))
+    except StopIteration:
+        pass
+    else:
+        if c == fctx:
+            yield c, linerange1
+
+    diffopts = patch.diffopts(fctx._repo.ui)
+    fl = fctx.filelog()
+    seen = {fctx.filerev(): (fctx, (fromline, toline))}
+    for i in fl.descendants([fctx.filerev()]):
+        c = fctx.filectx(i)
+        inrange = False
+        for x in fl.parentrevs(i):
+            try:
+                p, linerange2 = seen[x]
+            except KeyError:
+                # nullrev or other branch
+                continue
+            inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
+            inrange = inrange or inrangep
+            # If revision 'i' has been seen (it's a merge), we assume that its
+            # line range is the same independently of which parents was used
+            # to compute it.
+            assert i not in seen or seen[i][1] == linerange1, (
+                'computed line range for %s is not consistent between '
+                'ancestor branches' % c)
+            seen[i] = c, linerange1
+        if inrange:
+            yield c, linerange1
 
 class committablectx(basectx):
     """A committablectx object provides common functionality for a context that
@@ -1226,6 +1287,8 @@
     def __nonzero__(self):
         return True
 
+    __bool__ = __nonzero__
+
     def _buildflagfunc(self):
         # Create a fallback function for getting file flags when the
         # filesystem doesn't support them
@@ -1263,35 +1326,6 @@
         return self._repo.dirstate.flagfunc(self._buildflagfunc)
 
     @propertycache
-    def _manifest(self):
-        """generate a manifest corresponding to the values in self._status
-
-        This reuse the file nodeid from parent, but we append an extra letter
-        when modified. Modified files get an extra 'm' while added files get
-        an extra 'a'. This is used by manifests merge to see that files
-        are different and by update logic to avoid deleting newly added files.
-        """
-        parents = self.parents()
-
-        man = parents[0].manifest().copy()
-
-        ff = self._flagfunc
-        for i, l in ((addednodeid, self._status.added),
-                     (modifiednodeid, self._status.modified)):
-            for f in l:
-                man[f] = i
-                try:
-                    man.setflag(f, ff(f))
-                except OSError:
-                    pass
-
-        for f in self._status.deleted + self._status.removed:
-            if f in man:
-                del man[f]
-
-        return man
-
-    @propertycache
     def _status(self):
         return self._repo.status()
 
@@ -1534,21 +1568,21 @@
                     self._repo.dirstate.normallookup(dest)
                 self._repo.dirstate.copy(source, dest)
 
-    def match(self, pats=[], include=None, exclude=None, default='glob',
+    def match(self, pats=None, include=None, exclude=None, default='glob',
               listsubrepos=False, badfn=None):
+        if pats is None:
+            pats = []
         r = self._repo
 
         # Only a case insensitive filesystem needs magic to translate user input
         # to actual case in the filesystem.
+        matcherfunc = matchmod.match
         if not util.fscasesensitive(r.root):
-            return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
-                                           exclude, default, r.auditor, self,
-                                           listsubrepos=listsubrepos,
-                                           badfn=badfn)
-        return matchmod.match(r.root, r.getcwd(), pats,
-                              include, exclude, default,
-                              auditor=r.auditor, ctx=self,
-                              listsubrepos=listsubrepos, badfn=badfn)
+            matcherfunc = matchmod.icasefsmatcher
+        return matcherfunc(r.root, r.getcwd(), pats,
+                           include, exclude, default,
+                           auditor=r.auditor, ctx=self,
+                           listsubrepos=listsubrepos, badfn=badfn)
 
     def _filtersuspectsymlink(self, files):
         if not files or self._repo.dirstate._checklink:
@@ -1605,22 +1639,6 @@
                 pass
         return modified, fixup
 
-    def _manifestmatches(self, match, s):
-        """Slow path for workingctx
-
-        The fast path is when we compare the working directory to its parent
-        which means this function is comparing with a non-parent; therefore we
-        need to build a manifest and return what matches.
-        """
-        mf = self._repo['.']._manifestmatches(match, s)
-        for f in s.modified + s.added:
-            mf[f] = newnodeid
-            mf.setflag(f, self.flags(f))
-        for f in s.removed:
-            if f in mf:
-                del mf[f]
-        return mf
-
     def _dirstatestatus(self, match=None, ignored=False, clean=False,
                         unknown=False):
         '''Gets the status from the dirstate -- internal use only.'''
@@ -1652,6 +1670,39 @@
 
         return s
 
+    @propertycache
+    def _manifest(self):
+        """generate a manifest corresponding to the values in self._status
+
+        This reuse the file nodeid from parent, but we use special node
+        identifiers for added and modified files. This is used by manifests
+        merge to see that files are different and by update logic to avoid
+        deleting newly added files.
+        """
+        return self._buildstatusmanifest(self._status)
+
+    def _buildstatusmanifest(self, status):
+        """Builds a manifest that includes the given status results."""
+        parents = self.parents()
+
+        man = parents[0].manifest().copy()
+
+        ff = self._flagfunc
+        for i, l in ((addednodeid, status.added),
+                     (modifiednodeid, status.modified)):
+            for f in l:
+                man[f] = i
+                try:
+                    man.setflag(f, ff(f))
+                except OSError:
+                    pass
+
+        for f in status.deleted + status.removed:
+            if f in man:
+                del man[f]
+
+        return man
+
     def _buildstatus(self, other, s, match, listignored, listclean,
                      listunknown):
         """build a status with respect to another context
@@ -1711,6 +1762,8 @@
     def __nonzero__(self):
         return True
 
+    __bool__ = __nonzero__
+
     def linkrev(self):
         # linked to self._changectx no matter if file is modified or not
         return self.rev()
@@ -1779,7 +1832,7 @@
 
     def remove(self, ignoremissing=False):
         """wraps unlink for a repo's working directory"""
-        util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
+        self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
 
     def write(self, data, flags):
         """wraps repo.wwrite"""
@@ -1856,8 +1909,8 @@
     commit function for every file in 'files', but calls order is
     undefined. If the file is available in the revision being
     committed (updated or added), filectxfn returns a memfilectx
-    object. If the file was removed, filectxfn raises an
-    IOError. Moved files are represented by marking the source file
+    object. If the file was removed, filectxfn return None for recent
+    Mercurial. Moved files are represented by marking the source file
     removed and the new file added with copy information (see
     memfilectx).
 
@@ -2046,10 +2099,10 @@
         # sanity check to ensure that the reused manifest parents are
         # manifests of our commit parents
         mp1, mp2 = self.manifestctx().parents
-        if p1 != nullid and p1.manifestctx().node() != mp1:
+        if p1 != nullid and p1.manifestnode() != mp1:
             raise RuntimeError('can\'t reuse the manifest: '
                                'its p1 doesn\'t match the new ctx p1')
-        if p2 != nullid and p2.manifestctx().node() != mp2:
+        if p2 != nullid and p2.manifestnode() != mp2:
             raise RuntimeError('can\'t reuse the manifest: '
                                'its p2 doesn\'t match the new ctx p2')
 
--- a/mercurial/copies.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/copies.py	Tue Apr 18 12:24:34 2017 -0400
@@ -149,10 +149,7 @@
     """
     ma = a.manifest()
     mb = b.manifest()
-    if match:
-        ma = ma.matches(match)
-        mb = mb.matches(match)
-    return mb.filesnotin(ma)
+    return mb.filesnotin(ma, match=match)
 
 def _forwardcopies(a, b, match=None):
     '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
--- a/mercurial/crecord.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/crecord.py	Tue Apr 18 12:24:34 2017 -0400
@@ -473,12 +473,17 @@
     """
     ui.write(_('starting interactive selection\n'))
     chunkselector = curseschunkselector(headerlist, ui, operation)
-    f = signal.getsignal(signal.SIGTSTP)
-    curses.wrapper(chunkselector.main)
-    if chunkselector.initerr is not None:
-        raise error.Abort(chunkselector.initerr)
-    # ncurses does not restore signal handler for SIGTSTP
-    signal.signal(signal.SIGTSTP, f)
+    origsigtstp = sentinel = object()
+    if util.safehasattr(signal, 'SIGTSTP'):
+        origsigtstp = signal.getsignal(signal.SIGTSTP)
+    try:
+        curses.wrapper(chunkselector.main)
+        if chunkselector.initerr is not None:
+            raise error.Abort(chunkselector.initerr)
+        # ncurses does not restore signal handler for SIGTSTP
+    finally:
+        if origsigtstp is not sentinel:
+            signal.signal(signal.SIGTSTP, origsigtstp)
     return chunkselector.opts
 
 def testdecorator(testfn, f):
@@ -564,7 +569,7 @@
 
         # affects some ui text
         if operation not in _headermessages:
-            raise RuntimeError('unexpected operation: %s' % operation)
+            raise error.ProgrammingError('unexpected operation: %s' % operation)
         self.operation = operation
 
     def uparrowevent(self):
@@ -1375,7 +1380,8 @@
             pass
         helpwin.refresh()
         try:
-            helpwin.getkey()
+            with self.ui.timeblockedsection('crecord'):
+                helpwin.getkey()
         except curses.error:
             pass
 
@@ -1392,7 +1398,8 @@
         self.stdscr.refresh()
         confirmwin.refresh()
         try:
-            response = chr(self.stdscr.getch())
+            with self.ui.timeblockedsection('crecord'):
+                response = chr(self.stdscr.getch())
         except ValueError:
             response = None
 
@@ -1412,7 +1419,8 @@
 
 are you sure you want to review/edit and confirm the selected changes [yn]?
 """)
-        response = self.confirmationwindow(confirmtext)
+        with self.ui.timeblockedsection('crecord'):
+            response = self.confirmationwindow(confirmtext)
         if response is None:
             response = "n"
         if response.lower().startswith("y"):
@@ -1611,8 +1619,17 @@
         method to be wrapped by curses.wrapper() for selecting chunks.
         """
 
-        origsigwinchhandler = signal.signal(signal.SIGWINCH,
-                                            self.sigwinchhandler)
+        origsigwinch = sentinel = object()
+        if util.safehasattr(signal, 'SIGWINCH'):
+            origsigwinch = signal.signal(signal.SIGWINCH,
+                                         self.sigwinchhandler)
+        try:
+            return self._main(stdscr)
+        finally:
+            if origsigwinch is not sentinel:
+                signal.signal(signal.SIGWINCH, origsigwinch)
+
+    def _main(self, stdscr):
         self.stdscr = stdscr
         # error during initialization, cannot be printed in the curses
         # interface, it should be printed by the calling code
@@ -1655,7 +1672,8 @@
         while True:
             self.updatescreen()
             try:
-                keypressed = self.statuswin.getkey()
+                with self.ui.timeblockedsection('crecord'):
+                    keypressed = self.statuswin.getkey()
                 if self.errorstr is not None:
                     self.errorstr = None
                     continue
@@ -1663,4 +1681,3 @@
                 keypressed = "foobar"
             if self.handlekeypressed(keypressed):
                 break
-        signal.signal(signal.SIGWINCH, origsigwinchhandler)
--- a/mercurial/debugcommands.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/debugcommands.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,41 +7,64 @@
 
 from __future__ import absolute_import
 
+import difflib
+import errno
 import operator
 import os
 import random
+import socket
+import string
+import sys
+import tempfile
+import time
 
 from .i18n import _
 from .node import (
     bin,
     hex,
+    nullhex,
     nullid,
+    nullrev,
     short,
 )
 from . import (
     bundle2,
     changegroup,
     cmdutil,
+    color,
     commands,
     context,
     dagparser,
     dagutil,
+    encoding,
     error,
     exchange,
     extensions,
     fileset,
+    formatter,
     hg,
     localrepo,
     lock as lockmod,
+    merge as mergemod,
+    obsolete,
+    policy,
+    pvec,
     pycompat,
     repair,
     revlog,
+    revset,
+    revsetlang,
     scmutil,
     setdiscovery,
     simplemerge,
+    smartset,
+    sslutil,
     streamclone,
+    templater,
     treediscovery,
+    upgrade,
     util,
+    vfs as vfsmod,
 )
 
 release = lockmod.release
@@ -55,7 +78,7 @@
     """find the ancestor revision of two revisions in a given index"""
     if len(args) == 3:
         index, rev1, rev2 = args
-        r = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False), index)
+        r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
         lookup = r.lookup
     elif len(args) == 2:
         if not repo:
@@ -324,6 +347,47 @@
         error = _(".hg/dirstate inconsistent with current parent's manifest")
         raise error.Abort(error)
 
+@command('debugcolor',
+        [('', 'style', None, _('show all configured styles'))],
+        'hg debugcolor')
+def debugcolor(ui, repo, **opts):
+    """show available color, effects or style"""
+    ui.write(('color mode: %s\n') % ui._colormode)
+    if opts.get('style'):
+        return _debugdisplaystyle(ui)
+    else:
+        return _debugdisplaycolor(ui)
+
+def _debugdisplaycolor(ui):
+    ui = ui.copy()
+    ui._styles.clear()
+    for effect in color._activeeffects(ui).keys():
+        ui._styles[effect] = effect
+    if ui._terminfoparams:
+        for k, v in ui.configitems('color'):
+            if k.startswith('color.'):
+                ui._styles[k] = k[6:]
+            elif k.startswith('terminfo.'):
+                ui._styles[k] = k[9:]
+    ui.write(_('available colors:\n'))
+    # sort label with a '_' after the other to group '_background' entry.
+    items = sorted(ui._styles.items(),
+                   key=lambda i: ('_' in i[0], i[0], i[1]))
+    for colorname, label in items:
+        ui.write(('%s\n') % colorname, label=label)
+
+def _debugdisplaystyle(ui):
+    ui.write(_('available style:\n'))
+    width = max(len(s) for s in ui._styles)
+    for label, effects in sorted(ui._styles.items()):
+        ui.write('%s' % label, label=label)
+        if effects:
+            # 50
+            ui.write(': ')
+            ui.write(' ' * (max(0, width - len(label))))
+            ui.write(', '.join(ui.label(e, e) for e in effects.split()))
+        ui.write('\n')
+
 @command('debugcommands', [], _('[COMMAND]'), norepo=True)
 def debugcommands(ui, cmd='', *args):
     """list all available commands and options"""
@@ -390,7 +454,7 @@
     spaces = opts.get('spaces')
     dots = opts.get('dots')
     if file_:
-        rlog = revlog.revlog(scmutil.opener(pycompat.getcwd(), audit=False),
+        rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
                              file_)
         revs = set((int(r) for r in revs))
         def events():
@@ -567,6 +631,37 @@
 
     fm.end()
 
+@command('debugdirstate|debugstate',
+    [('', 'nodates', None, _('do not display the saved mtime')),
+    ('', 'datesort', None, _('sort by saved mtime'))],
+    _('[OPTION]...'))
+def debugstate(ui, repo, **opts):
+    """show the contents of the current dirstate"""
+
+    nodates = opts.get('nodates')
+    datesort = opts.get('datesort')
+
+    timestr = ""
+    if datesort:
+        keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
+    else:
+        keyfunc = None # sort by filename
+    for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
+        if ent[3] == -1:
+            timestr = 'unset               '
+        elif nodates:
+            timestr = 'set                 '
+        else:
+            timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
+                                    time.localtime(ent[3]))
+        if ent[1] & 0o20000:
+            mode = 'lnk'
+        else:
+            mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
+        ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
+    for f in repo.dirstate.copies():
+        ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
+
 @command('debugdiscovery',
     [('', 'old', None, _('use old-style discovery')),
     ('', 'nonheads', None,
@@ -641,7 +736,7 @@
     fm = ui.formatter('debugextensions', opts)
     for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
         isinternal = extensions.ismoduleinternal(extmod)
-        extsource = extmod.__file__
+        extsource = pycompat.fsencode(extmod.__file__)
         if isinternal:
             exttestedwith = []  # never expose magic string to users
         else:
@@ -694,13 +789,17 @@
 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
 def debugfsinfo(ui, path="."):
     """show information detected about current filesystem"""
-    util.writefile('.debugfsinfo', '')
     ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
+    ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
     ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
     ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
-    ui.write(('case-sensitive: %s\n') % (util.fscasesensitive('.debugfsinfo')
-                                and 'yes' or 'no'))
-    os.unlink('.debugfsinfo')
+    casesensitive = '(unknown)'
+    try:
+        with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
+            casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
+    except OSError:
+        pass
+    ui.write(('case-sensitive: %s\n') % casesensitive)
 
 @command('debuggetbundle',
     [('H', 'head', [], _('id of head node'), _('ID')),
@@ -851,6 +950,1106 @@
             ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
     ui.write("}\n")
 
+@command('debuginstall', [] + commands.formatteropts, '', norepo=True)
+def debuginstall(ui, **opts):
+    '''test Mercurial installation
+
+    Returns 0 on success.
+    '''
+
+    def writetemp(contents):
+        (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
+        f = os.fdopen(fd, pycompat.sysstr("wb"))
+        f.write(contents)
+        f.close()
+        return name
+
+    problems = 0
+
+    fm = ui.formatter('debuginstall', opts)
+    fm.startitem()
+
+    # encoding
+    fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
+    err = None
+    try:
+        encoding.fromlocal("test")
+    except error.Abort as inst:
+        err = inst
+        problems += 1
+    fm.condwrite(err, 'encodingerror', _(" %s\n"
+                 " (check that your locale is properly set)\n"), err)
+
+    # Python
+    fm.write('pythonexe', _("checking Python executable (%s)\n"),
+             pycompat.sysexecutable)
+    fm.write('pythonver', _("checking Python version (%s)\n"),
+             ("%d.%d.%d" % sys.version_info[:3]))
+    fm.write('pythonlib', _("checking Python lib (%s)...\n"),
+             os.path.dirname(pycompat.fsencode(os.__file__)))
+
+    security = set(sslutil.supportedprotocols)
+    if sslutil.hassni:
+        security.add('sni')
+
+    fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
+             fm.formatlist(sorted(security), name='protocol',
+                           fmt='%s', sep=','))
+
+    # These are warnings, not errors. So don't increment problem count. This
+    # may change in the future.
+    if 'tls1.2' not in security:
+        fm.plain(_('  TLS 1.2 not supported by Python install; '
+                   'network connections lack modern security\n'))
+    if 'sni' not in security:
+        fm.plain(_('  SNI not supported by Python install; may have '
+                   'connectivity issues with some servers\n'))
+
+    # TODO print CA cert info
+
+    # hg version
+    hgver = util.version()
+    fm.write('hgver', _("checking Mercurial version (%s)\n"),
+             hgver.split('+')[0])
+    fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
+             '+'.join(hgver.split('+')[1:]))
+
+    # compiled modules
+    fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
+             policy.policy)
+    fm.write('hgmodules', _("checking installed modules (%s)...\n"),
+             os.path.dirname(pycompat.fsencode(__file__)))
+
+    err = None
+    try:
+        from . import (
+            base85,
+            bdiff,
+            mpatch,
+            osutil,
+        )
+        dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
+    except Exception as inst:
+        err = inst
+        problems += 1
+    fm.condwrite(err, 'extensionserror', " %s\n", err)
+
+    compengines = util.compengines._engines.values()
+    fm.write('compengines', _('checking registered compression engines (%s)\n'),
+             fm.formatlist(sorted(e.name() for e in compengines),
+                           name='compengine', fmt='%s', sep=', '))
+    fm.write('compenginesavail', _('checking available compression engines '
+                                   '(%s)\n'),
+             fm.formatlist(sorted(e.name() for e in compengines
+                                  if e.available()),
+                           name='compengine', fmt='%s', sep=', '))
+    wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
+    fm.write('compenginesserver', _('checking available compression engines '
+                                    'for wire protocol (%s)\n'),
+             fm.formatlist([e.name() for e in wirecompengines
+                            if e.wireprotosupport()],
+                           name='compengine', fmt='%s', sep=', '))
+
+    # templates
+    p = templater.templatepaths()
+    fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
+    fm.condwrite(not p, '', _(" no template directories found\n"))
+    if p:
+        m = templater.templatepath("map-cmdline.default")
+        if m:
+            # template found, check if it is working
+            err = None
+            try:
+                templater.templater.frommapfile(m)
+            except Exception as inst:
+                err = inst
+                p = None
+            fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
+        else:
+            p = None
+        fm.condwrite(p, 'defaulttemplate',
+                     _("checking default template (%s)\n"), m)
+        fm.condwrite(not m, 'defaulttemplatenotfound',
+                     _(" template '%s' not found\n"), "default")
+    if not p:
+        problems += 1
+    fm.condwrite(not p, '',
+                 _(" (templates seem to have been installed incorrectly)\n"))
+
+    # editor
+    editor = ui.geteditor()
+    editor = util.expandpath(editor)
+    fm.write('editor', _("checking commit editor... (%s)\n"), editor)
+    cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
+    fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
+                 _(" No commit editor set and can't find %s in PATH\n"
+                   " (specify a commit editor in your configuration"
+                   " file)\n"), not cmdpath and editor == 'vi' and editor)
+    fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
+                 _(" Can't find editor '%s' in PATH\n"
+                   " (specify a commit editor in your configuration"
+                   " file)\n"), not cmdpath and editor)
+    if not cmdpath and editor != 'vi':
+        problems += 1
+
+    # check username
+    username = None
+    err = None
+    try:
+        username = ui.username()
+    except error.Abort as e:
+        err = e
+        problems += 1
+
+    fm.condwrite(username, 'username',  _("checking username (%s)\n"), username)
+    fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
+        " (specify a username in your configuration file)\n"), err)
+
+    fm.condwrite(not problems, '',
+                 _("no problems detected\n"))
+    if not problems:
+        fm.data(problems=problems)
+    fm.condwrite(problems, 'problems',
+                 _("%d problems detected,"
+                   " please check your install!\n"), problems)
+    fm.end()
+
+    return problems
+
+@command('debugknown', [], _('REPO ID...'), norepo=True)
+def debugknown(ui, repopath, *ids, **opts):
+    """test whether node ids are known to a repo
+
+    Every ID must be a full-length hex node id string. Returns a list of 0s
+    and 1s indicating unknown/known.
+    """
+    repo = hg.peer(ui, opts, repopath)
+    if not repo.capable('known'):
+        raise error.Abort("known() not supported by target repository")
+    flags = repo.known([bin(s) for s in ids])
+    ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
+
+@command('debuglabelcomplete', [], _('LABEL...'))
+def debuglabelcomplete(ui, repo, *args):
+    '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
+    debugnamecomplete(ui, repo, *args)
+
+@command('debuglocks',
+         [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
+          ('W', 'force-wlock', None,
+           _('free the working state lock (DANGEROUS)'))],
+         _('[OPTION]...'))
+def debuglocks(ui, repo, **opts):
+    """show or modify state of locks
+
+    By default, this command will show which locks are held. This
+    includes the user and process holding the lock, the amount of time
+    the lock has been held, and the machine name where the process is
+    running if it's not local.
+
+    Locks protect the integrity of Mercurial's data, so should be
+    treated with care. System crashes or other interruptions may cause
+    locks to not be properly released, though Mercurial will usually
+    detect and remove such stale locks automatically.
+
+    However, detecting stale locks may not always be possible (for
+    instance, on a shared filesystem). Removing locks may also be
+    blocked by filesystem permissions.
+
+    Returns 0 if no locks are held.
+
+    """
+
+    if opts.get('force_lock'):
+        repo.svfs.unlink('lock')
+    if opts.get('force_wlock'):
+        repo.vfs.unlink('wlock')
+    if opts.get('force_lock') or opts.get('force_lock'):
+        return 0
+
+    now = time.time()
+    held = 0
+
+    def report(vfs, name, method):
+        # this causes stale locks to get reaped for more accurate reporting
+        try:
+            l = method(False)
+        except error.LockHeld:
+            l = None
+
+        if l:
+            l.release()
+        else:
+            try:
+                stat = vfs.lstat(name)
+                age = now - stat.st_mtime
+                user = util.username(stat.st_uid)
+                locker = vfs.readlock(name)
+                if ":" in locker:
+                    host, pid = locker.split(':')
+                    if host == socket.gethostname():
+                        locker = 'user %s, process %s' % (user, pid)
+                    else:
+                        locker = 'user %s, process %s, host %s' \
+                                 % (user, pid, host)
+                ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
+                return 1
+            except OSError as e:
+                if e.errno != errno.ENOENT:
+                    raise
+
+        ui.write(("%-6s free\n") % (name + ":"))
+        return 0
+
+    held += report(repo.svfs, "lock", repo.lock)
+    held += report(repo.vfs, "wlock", repo.wlock)
+
+    return held
+
+@command('debugmergestate', [], '')
+def debugmergestate(ui, repo, *args):
+    """print merge state
+
+    Use --verbose to print out information about whether v1 or v2 merge state
+    was chosen."""
+    def _hashornull(h):
+        if h == nullhex:
+            return 'null'
+        else:
+            return h
+
+    def printrecords(version):
+        ui.write(('* version %s records\n') % version)
+        if version == 1:
+            records = v1records
+        else:
+            records = v2records
+
+        for rtype, record in records:
+            # pretty print some record types
+            if rtype == 'L':
+                ui.write(('local: %s\n') % record)
+            elif rtype == 'O':
+                ui.write(('other: %s\n') % record)
+            elif rtype == 'm':
+                driver, mdstate = record.split('\0', 1)
+                ui.write(('merge driver: %s (state "%s")\n')
+                         % (driver, mdstate))
+            elif rtype in 'FDC':
+                r = record.split('\0')
+                f, state, hash, lfile, afile, anode, ofile = r[0:7]
+                if version == 1:
+                    onode = 'not stored in v1 format'
+                    flags = r[7]
+                else:
+                    onode, flags = r[7:9]
+                ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
+                         % (f, rtype, state, _hashornull(hash)))
+                ui.write(('  local path: %s (flags "%s")\n') % (lfile, flags))
+                ui.write(('  ancestor path: %s (node %s)\n')
+                         % (afile, _hashornull(anode)))
+                ui.write(('  other path: %s (node %s)\n')
+                         % (ofile, _hashornull(onode)))
+            elif rtype == 'f':
+                filename, rawextras = record.split('\0', 1)
+                extras = rawextras.split('\0')
+                i = 0
+                extrastrings = []
+                while i < len(extras):
+                    extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
+                    i += 2
+
+                ui.write(('file extras: %s (%s)\n')
+                         % (filename, ', '.join(extrastrings)))
+            elif rtype == 'l':
+                labels = record.split('\0', 2)
+                labels = [l for l in labels if len(l) > 0]
+                ui.write(('labels:\n'))
+                ui.write(('  local: %s\n' % labels[0]))
+                ui.write(('  other: %s\n' % labels[1]))
+                if len(labels) > 2:
+                    ui.write(('  base:  %s\n' % labels[2]))
+            else:
+                ui.write(('unrecognized entry: %s\t%s\n')
+                         % (rtype, record.replace('\0', '\t')))
+
+    # Avoid mergestate.read() since it may raise an exception for unsupported
+    # merge state records. We shouldn't be doing this, but this is OK since this
+    # command is pretty low-level.
+    ms = mergemod.mergestate(repo)
+
+    # sort so that reasonable information is on top
+    v1records = ms._readrecordsv1()
+    v2records = ms._readrecordsv2()
+    order = 'LOml'
+    def key(r):
+        idx = order.find(r[0])
+        if idx == -1:
+            return (1, r[1])
+        else:
+            return (0, idx)
+    v1records.sort(key=key)
+    v2records.sort(key=key)
+
+    if not v1records and not v2records:
+        ui.write(('no merge state found\n'))
+    elif not v2records:
+        ui.note(('no version 2 merge state\n'))
+        printrecords(1)
+    elif ms._v1v2match(v1records, v2records):
+        ui.note(('v1 and v2 states match: using v2\n'))
+        printrecords(2)
+    else:
+        ui.note(('v1 and v2 states mismatch: using v1\n'))
+        printrecords(1)
+        if ui.verbose:
+            printrecords(2)
+
+@command('debugnamecomplete', [], _('NAME...'))
+def debugnamecomplete(ui, repo, *args):
+    '''complete "names" - tags, open branch names, bookmark names'''
+
+    names = set()
+    # since we previously only listed open branches, we will handle that
+    # specially (after this for loop)
+    for name, ns in repo.names.iteritems():
+        if name != 'branches':
+            names.update(ns.listnames(repo))
+    names.update(tag for (tag, heads, tip, closed)
+                 in repo.branchmap().iterbranches() if not closed)
+    completions = set()
+    if not args:
+        args = ['']
+    for a in args:
+        completions.update(n for n in names if n.startswith(a))
+    ui.write('\n'.join(sorted(completions)))
+    ui.write('\n')
+
+@command('debugobsolete',
+        [('', 'flags', 0, _('markers flag')),
+         ('', 'record-parents', False,
+          _('record parent information for the precursor')),
+         ('r', 'rev', [], _('display markers relevant to REV')),
+         ('', 'index', False, _('display index of the marker')),
+         ('', 'delete', [], _('delete markers specified by indices')),
+        ] + commands.commitopts2 + commands.formatteropts,
+         _('[OBSOLETED [REPLACEMENT ...]]'))
+def debugobsolete(ui, repo, precursor=None, *successors, **opts):
+    """create arbitrary obsolete marker
+
+    With no arguments, displays the list of obsolescence markers."""
+
+    def parsenodeid(s):
+        try:
+            # We do not use revsingle/revrange functions here to accept
+            # arbitrary node identifiers, possibly not present in the
+            # local repository.
+            n = bin(s)
+            if len(n) != len(nullid):
+                raise TypeError()
+            return n
+        except TypeError:
+            raise error.Abort('changeset references must be full hexadecimal '
+                             'node identifiers')
+
+    if opts.get('delete'):
+        indices = []
+        for v in opts.get('delete'):
+            try:
+                indices.append(int(v))
+            except ValueError:
+                raise error.Abort(_('invalid index value: %r') % v,
+                                  hint=_('use integers for indices'))
+
+        if repo.currenttransaction():
+            raise error.Abort(_('cannot delete obsmarkers in the middle '
+                                'of transaction.'))
+
+        with repo.lock():
+            n = repair.deleteobsmarkers(repo.obsstore, indices)
+            ui.write(_('deleted %i obsolescence markers\n') % n)
+
+        return
+
+    if precursor is not None:
+        if opts['rev']:
+            raise error.Abort('cannot select revision when creating marker')
+        metadata = {}
+        metadata['user'] = opts['user'] or ui.username()
+        succs = tuple(parsenodeid(succ) for succ in successors)
+        l = repo.lock()
+        try:
+            tr = repo.transaction('debugobsolete')
+            try:
+                date = opts.get('date')
+                if date:
+                    date = util.parsedate(date)
+                else:
+                    date = None
+                prec = parsenodeid(precursor)
+                parents = None
+                if opts['record_parents']:
+                    if prec not in repo.unfiltered():
+                        raise error.Abort('cannot used --record-parents on '
+                                         'unknown changesets')
+                    parents = repo.unfiltered()[prec].parents()
+                    parents = tuple(p.node() for p in parents)
+                repo.obsstore.create(tr, prec, succs, opts['flags'],
+                                     parents=parents, date=date,
+                                     metadata=metadata)
+                tr.close()
+            except ValueError as exc:
+                raise error.Abort(_('bad obsmarker input: %s') % exc)
+            finally:
+                tr.release()
+        finally:
+            l.release()
+    else:
+        if opts['rev']:
+            revs = scmutil.revrange(repo, opts['rev'])
+            nodes = [repo[r].node() for r in revs]
+            markers = list(obsolete.getmarkers(repo, nodes=nodes))
+            markers.sort(key=lambda x: x._data)
+        else:
+            markers = obsolete.getmarkers(repo)
+
+        markerstoiter = markers
+        isrelevant = lambda m: True
+        if opts.get('rev') and opts.get('index'):
+            markerstoiter = obsolete.getmarkers(repo)
+            markerset = set(markers)
+            isrelevant = lambda m: m in markerset
+
+        fm = ui.formatter('debugobsolete', opts)
+        for i, m in enumerate(markerstoiter):
+            if not isrelevant(m):
+                # marker can be irrelevant when we're iterating over a set
+                # of markers (markerstoiter) which is bigger than the set
+                # of markers we want to display (markers)
+                # this can happen if both --index and --rev options are
+                # provided and thus we need to iterate over all of the markers
+                # to get the correct indices, but only display the ones that
+                # are relevant to --rev value
+                continue
+            fm.startitem()
+            ind = i if opts.get('index') else None
+            cmdutil.showmarker(fm, m, index=ind)
+        fm.end()
+
+@command('debugpathcomplete',
+         [('f', 'full', None, _('complete an entire path')),
+          ('n', 'normal', None, _('show only normal files')),
+          ('a', 'added', None, _('show only added files')),
+          ('r', 'removed', None, _('show only removed files'))],
+         _('FILESPEC...'))
+def debugpathcomplete(ui, repo, *specs, **opts):
+    '''complete part or all of a tracked path
+
+    This command supports shells that offer path name completion. It
+    currently completes only files already known to the dirstate.
+
+    Completion extends only to the next path segment unless
+    --full is specified, in which case entire paths are used.'''
+
+    def complete(path, acceptable):
+        dirstate = repo.dirstate
+        spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
+        rootdir = repo.root + pycompat.ossep
+        if spec != repo.root and not spec.startswith(rootdir):
+            return [], []
+        if os.path.isdir(spec):
+            spec += '/'
+        spec = spec[len(rootdir):]
+        fixpaths = pycompat.ossep != '/'
+        if fixpaths:
+            spec = spec.replace(pycompat.ossep, '/')
+        speclen = len(spec)
+        fullpaths = opts['full']
+        files, dirs = set(), set()
+        adddir, addfile = dirs.add, files.add
+        for f, st in dirstate.iteritems():
+            if f.startswith(spec) and st[0] in acceptable:
+                if fixpaths:
+                    f = f.replace('/', pycompat.ossep)
+                if fullpaths:
+                    addfile(f)
+                    continue
+                s = f.find(pycompat.ossep, speclen)
+                if s >= 0:
+                    adddir(f[:s])
+                else:
+                    addfile(f)
+        return files, dirs
+
+    acceptable = ''
+    if opts['normal']:
+        acceptable += 'nm'
+    if opts['added']:
+        acceptable += 'a'
+    if opts['removed']:
+        acceptable += 'r'
+    cwd = repo.getcwd()
+    if not specs:
+        specs = ['.']
+
+    files, dirs = set(), set()
+    for spec in specs:
+        f, d = complete(spec, acceptable or 'nmar')
+        files.update(f)
+        dirs.update(d)
+    files.update(dirs)
+    ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
+    ui.write('\n')
+
+@command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
+def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
+    '''access the pushkey key/value protocol
+
+    With two args, list the keys in the given namespace.
+
+    With five args, set a key to new if it currently is set to old.
+    Reports success or failure.
+    '''
+
+    target = hg.peer(ui, {}, repopath)
+    if keyinfo:
+        key, old, new = keyinfo
+        r = target.pushkey(namespace, key, old, new)
+        ui.status(str(r) + '\n')
+        return not r
+    else:
+        for k, v in sorted(target.listkeys(namespace).iteritems()):
+            ui.write("%s\t%s\n" % (util.escapestr(k),
+                                   util.escapestr(v)))
+
+@command('debugpvec', [], _('A B'))
+def debugpvec(ui, repo, a, b=None):
+    ca = scmutil.revsingle(repo, a)
+    cb = scmutil.revsingle(repo, b)
+    pa = pvec.ctxpvec(ca)
+    pb = pvec.ctxpvec(cb)
+    if pa == pb:
+        rel = "="
+    elif pa > pb:
+        rel = ">"
+    elif pa < pb:
+        rel = "<"
+    elif pa | pb:
+        rel = "|"
+    ui.write(_("a: %s\n") % pa)
+    ui.write(_("b: %s\n") % pb)
+    ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
+    ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
+             (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
+              pa.distance(pb), rel))
+
+@command('debugrebuilddirstate|debugrebuildstate',
+    [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
+     ('', 'minimal', None, _('only rebuild files that are inconsistent with '
+                             'the working copy parent')),
+    ],
+    _('[-r REV]'))
+def debugrebuilddirstate(ui, repo, rev, **opts):
+    """rebuild the dirstate as it would look like for the given revision
+
+    If no revision is specified the first current parent will be used.
+
+    The dirstate will be set to the files of the given revision.
+    The actual working directory content or existing dirstate
+    information such as adds or removes is not considered.
+
+    ``minimal`` will only rebuild the dirstate status for files that claim to be
+    tracked but are not in the parent manifest, or that exist in the parent
+    manifest but are not in the dirstate. It will not change adds, removes, or
+    modified files that are in the working copy parent.
+
+    One use of this command is to make the next :hg:`status` invocation
+    check the actual file content.
+    """
+    ctx = scmutil.revsingle(repo, rev)
+    with repo.wlock():
+        dirstate = repo.dirstate
+        changedfiles = None
+        # See command doc for what minimal does.
+        if opts.get('minimal'):
+            manifestfiles = set(ctx.manifest().keys())
+            dirstatefiles = set(dirstate)
+            manifestonly = manifestfiles - dirstatefiles
+            dsonly = dirstatefiles - manifestfiles
+            dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
+            changedfiles = manifestonly | dsnotadded
+
+        dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
+
+@command('debugrebuildfncache', [], '')
+def debugrebuildfncache(ui, repo):
+    """rebuild the fncache file"""
+    repair.rebuildfncache(ui, repo)
+
+@command('debugrename',
+    [('r', 'rev', '', _('revision to debug'), _('REV'))],
+    _('[-r REV] FILE'))
+def debugrename(ui, repo, file1, *pats, **opts):
+    """dump rename information"""
+
+    ctx = scmutil.revsingle(repo, opts.get('rev'))
+    m = scmutil.match(ctx, (file1,) + pats, opts)
+    for abs in ctx.walk(m):
+        fctx = ctx[abs]
+        o = fctx.filelog().renamed(fctx.filenode())
+        rel = m.rel(abs)
+        if o:
+            ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
+        else:
+            ui.write(_("%s not renamed\n") % rel)
+
+@command('debugrevlog', commands.debugrevlogopts +
+    [('d', 'dump', False, _('dump index data'))],
+    _('-c|-m|FILE'),
+    optionalrepo=True)
+def debugrevlog(ui, repo, file_=None, **opts):
+    """show data and statistics about a revlog"""
+    r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
+
+    if opts.get("dump"):
+        numrevs = len(r)
+        ui.write(("# rev p1rev p2rev start   end deltastart base   p1   p2"
+                 " rawsize totalsize compression heads chainlen\n"))
+        ts = 0
+        heads = set()
+
+        for rev in xrange(numrevs):
+            dbase = r.deltaparent(rev)
+            if dbase == -1:
+                dbase = rev
+            cbase = r.chainbase(rev)
+            clen = r.chainlen(rev)
+            p1, p2 = r.parentrevs(rev)
+            rs = r.rawsize(rev)
+            ts = ts + rs
+            heads -= set(r.parentrevs(rev))
+            heads.add(rev)
+            try:
+                compression = ts / r.end(rev)
+            except ZeroDivisionError:
+                compression = 0
+            ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
+                     "%11d %5d %8d\n" %
+                     (rev, p1, p2, r.start(rev), r.end(rev),
+                      r.start(dbase), r.start(cbase),
+                      r.start(p1), r.start(p2),
+                      rs, ts, compression, len(heads), clen))
+        return 0
+
+    v = r.version
+    format = v & 0xFFFF
+    flags = []
+    gdelta = False
+    if v & revlog.REVLOGNGINLINEDATA:
+        flags.append('inline')
+    if v & revlog.REVLOGGENERALDELTA:
+        gdelta = True
+        flags.append('generaldelta')
+    if not flags:
+        flags = ['(none)']
+
+    nummerges = 0
+    numfull = 0
+    numprev = 0
+    nump1 = 0
+    nump2 = 0
+    numother = 0
+    nump1prev = 0
+    nump2prev = 0
+    chainlengths = []
+
+    datasize = [None, 0, 0]
+    fullsize = [None, 0, 0]
+    deltasize = [None, 0, 0]
+    chunktypecounts = {}
+    chunktypesizes = {}
+
+    def addsize(size, l):
+        if l[0] is None or size < l[0]:
+            l[0] = size
+        if size > l[1]:
+            l[1] = size
+        l[2] += size
+
+    numrevs = len(r)
+    for rev in xrange(numrevs):
+        p1, p2 = r.parentrevs(rev)
+        delta = r.deltaparent(rev)
+        if format > 0:
+            addsize(r.rawsize(rev), datasize)
+        if p2 != nullrev:
+            nummerges += 1
+        size = r.length(rev)
+        if delta == nullrev:
+            chainlengths.append(0)
+            numfull += 1
+            addsize(size, fullsize)
+        else:
+            chainlengths.append(chainlengths[delta] + 1)
+            addsize(size, deltasize)
+            if delta == rev - 1:
+                numprev += 1
+                if delta == p1:
+                    nump1prev += 1
+                elif delta == p2:
+                    nump2prev += 1
+            elif delta == p1:
+                nump1 += 1
+            elif delta == p2:
+                nump2 += 1
+            elif delta != nullrev:
+                numother += 1
+
+        # Obtain data on the raw chunks in the revlog.
+        chunk = r._chunkraw(rev, rev)[1]
+        if chunk:
+            chunktype = chunk[0]
+        else:
+            chunktype = 'empty'
+
+        if chunktype not in chunktypecounts:
+            chunktypecounts[chunktype] = 0
+            chunktypesizes[chunktype] = 0
+
+        chunktypecounts[chunktype] += 1
+        chunktypesizes[chunktype] += size
+
+    # Adjust size min value for empty cases
+    for size in (datasize, fullsize, deltasize):
+        if size[0] is None:
+            size[0] = 0
+
+    numdeltas = numrevs - numfull
+    numoprev = numprev - nump1prev - nump2prev
+    totalrawsize = datasize[2]
+    datasize[2] /= numrevs
+    fulltotal = fullsize[2]
+    fullsize[2] /= numfull
+    deltatotal = deltasize[2]
+    if numrevs - numfull > 0:
+        deltasize[2] /= numrevs - numfull
+    totalsize = fulltotal + deltatotal
+    avgchainlen = sum(chainlengths) / numrevs
+    maxchainlen = max(chainlengths)
+    compratio = 1
+    if totalsize:
+        compratio = totalrawsize / totalsize
+
+    basedfmtstr = '%%%dd\n'
+    basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
+
+    def dfmtstr(max):
+        return basedfmtstr % len(str(max))
+    def pcfmtstr(max, padding=0):
+        return basepcfmtstr % (len(str(max)), ' ' * padding)
+
+    def pcfmt(value, total):
+        if total:
+            return (value, 100 * float(value) / total)
+        else:
+            return value, 100.0
+
+    ui.write(('format : %d\n') % format)
+    ui.write(('flags  : %s\n') % ', '.join(flags))
+
+    ui.write('\n')
+    fmt = pcfmtstr(totalsize)
+    fmt2 = dfmtstr(totalsize)
+    ui.write(('revisions     : ') + fmt2 % numrevs)
+    ui.write(('    merges    : ') + fmt % pcfmt(nummerges, numrevs))
+    ui.write(('    normal    : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
+    ui.write(('revisions     : ') + fmt2 % numrevs)
+    ui.write(('    full      : ') + fmt % pcfmt(numfull, numrevs))
+    ui.write(('    deltas    : ') + fmt % pcfmt(numdeltas, numrevs))
+    ui.write(('revision size : ') + fmt2 % totalsize)
+    ui.write(('    full      : ') + fmt % pcfmt(fulltotal, totalsize))
+    ui.write(('    deltas    : ') + fmt % pcfmt(deltatotal, totalsize))
+
+    def fmtchunktype(chunktype):
+        if chunktype == 'empty':
+            return '    %s     : ' % chunktype
+        elif chunktype in string.ascii_letters:
+            return '    0x%s (%s)  : ' % (hex(chunktype), chunktype)
+        else:
+            return '    0x%s      : ' % hex(chunktype)
+
+    ui.write('\n')
+    ui.write(('chunks        : ') + fmt2 % numrevs)
+    for chunktype in sorted(chunktypecounts):
+        ui.write(fmtchunktype(chunktype))
+        ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
+    ui.write(('chunks size   : ') + fmt2 % totalsize)
+    for chunktype in sorted(chunktypecounts):
+        ui.write(fmtchunktype(chunktype))
+        ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
+
+    ui.write('\n')
+    fmt = dfmtstr(max(avgchainlen, compratio))
+    ui.write(('avg chain length  : ') + fmt % avgchainlen)
+    ui.write(('max chain length  : ') + fmt % maxchainlen)
+    ui.write(('compression ratio : ') + fmt % compratio)
+
+    if format > 0:
+        ui.write('\n')
+        ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
+                 % tuple(datasize))
+    ui.write(('full revision size (min/max/avg)     : %d / %d / %d\n')
+             % tuple(fullsize))
+    ui.write(('delta size (min/max/avg)             : %d / %d / %d\n')
+             % tuple(deltasize))
+
+    if numdeltas > 0:
+        ui.write('\n')
+        fmt = pcfmtstr(numdeltas)
+        fmt2 = pcfmtstr(numdeltas, 4)
+        ui.write(('deltas against prev  : ') + fmt % pcfmt(numprev, numdeltas))
+        if numprev > 0:
+            ui.write(('    where prev = p1  : ') + fmt2 % pcfmt(nump1prev,
+                                                              numprev))
+            ui.write(('    where prev = p2  : ') + fmt2 % pcfmt(nump2prev,
+                                                              numprev))
+            ui.write(('    other            : ') + fmt2 % pcfmt(numoprev,
+                                                              numprev))
+        if gdelta:
+            ui.write(('deltas against p1    : ')
+                     + fmt % pcfmt(nump1, numdeltas))
+            ui.write(('deltas against p2    : ')
+                     + fmt % pcfmt(nump2, numdeltas))
+            ui.write(('deltas against other : ') + fmt % pcfmt(numother,
+                                                             numdeltas))
+
+@command('debugrevspec',
+    [('', 'optimize', None,
+      _('print parsed tree after optimizing (DEPRECATED)')),
+     ('p', 'show-stage', [],
+      _('print parsed tree at the given stage'), _('NAME')),
+     ('', 'no-optimized', False, _('evaluate tree without optimization')),
+     ('', 'verify-optimized', False, _('verify optimized result')),
+     ],
+    ('REVSPEC'))
+def debugrevspec(ui, repo, expr, **opts):
+    """parse and apply a revision specification
+
+    Use -p/--show-stage option to print the parsed tree at the given stages.
+    Use -p all to print tree at every stage.
+
+    Use --verify-optimized to compare the optimized result with the unoptimized
+    one. Returns 1 if the optimized result differs.
+    """
+    stages = [
+        ('parsed', lambda tree: tree),
+        ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
+        ('concatenated', revsetlang.foldconcat),
+        ('analyzed', revsetlang.analyze),
+        ('optimized', revsetlang.optimize),
+    ]
+    if opts['no_optimized']:
+        stages = stages[:-1]
+    if opts['verify_optimized'] and opts['no_optimized']:
+        raise error.Abort(_('cannot use --verify-optimized with '
+                            '--no-optimized'))
+    stagenames = set(n for n, f in stages)
+
+    showalways = set()
+    showchanged = set()
+    if ui.verbose and not opts['show_stage']:
+        # show parsed tree by --verbose (deprecated)
+        showalways.add('parsed')
+        showchanged.update(['expanded', 'concatenated'])
+        if opts['optimize']:
+            showalways.add('optimized')
+    if opts['show_stage'] and opts['optimize']:
+        raise error.Abort(_('cannot use --optimize with --show-stage'))
+    if opts['show_stage'] == ['all']:
+        showalways.update(stagenames)
+    else:
+        for n in opts['show_stage']:
+            if n not in stagenames:
+                raise error.Abort(_('invalid stage name: %s') % n)
+        showalways.update(opts['show_stage'])
+
+    treebystage = {}
+    printedtree = None
+    tree = revsetlang.parse(expr, lookup=repo.__contains__)
+    for n, f in stages:
+        treebystage[n] = tree = f(tree)
+        if n in showalways or (n in showchanged and tree != printedtree):
+            if opts['show_stage'] or n != 'parsed':
+                ui.write(("* %s:\n") % n)
+            ui.write(revsetlang.prettyformat(tree), "\n")
+            printedtree = tree
+
+    if opts['verify_optimized']:
+        arevs = revset.makematcher(treebystage['analyzed'])(repo)
+        brevs = revset.makematcher(treebystage['optimized'])(repo)
+        if ui.verbose:
+            ui.note(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
+            ui.note(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
+        arevs = list(arevs)
+        brevs = list(brevs)
+        if arevs == brevs:
+            return 0
+        ui.write(('--- analyzed\n'), label='diff.file_a')
+        ui.write(('+++ optimized\n'), label='diff.file_b')
+        sm = difflib.SequenceMatcher(None, arevs, brevs)
+        for tag, alo, ahi, blo, bhi in sm.get_opcodes():
+            if tag in ('delete', 'replace'):
+                for c in arevs[alo:ahi]:
+                    ui.write('-%s\n' % c, label='diff.deleted')
+            if tag in ('insert', 'replace'):
+                for c in brevs[blo:bhi]:
+                    ui.write('+%s\n' % c, label='diff.inserted')
+            if tag == 'equal':
+                for c in arevs[alo:ahi]:
+                    ui.write(' %s\n' % c)
+        return 1
+
+    func = revset.makematcher(tree)
+    revs = func(repo)
+    if ui.verbose:
+        ui.note(("* set:\n"), smartset.prettyformat(revs), "\n")
+    for c in revs:
+        ui.write("%s\n" % c)
+
+@command('debugsetparents', [], _('REV1 [REV2]'))
+def debugsetparents(ui, repo, rev1, rev2=None):
+    """manually set the parents of the current working directory
+
+    This is useful for writing repository conversion tools, but should
+    be used with care. For example, neither the working directory nor the
+    dirstate is updated, so file status may be incorrect after running this
+    command.
+
+    Returns 0 on success.
+    """
+
+    r1 = scmutil.revsingle(repo, rev1).node()
+    r2 = scmutil.revsingle(repo, rev2, 'null').node()
+
+    with repo.wlock():
+        repo.setparents(r1, r2)
+
+@command('debugsub',
+    [('r', 'rev', '',
+     _('revision to check'), _('REV'))],
+    _('[-r REV] [REV]'))
+def debugsub(ui, repo, rev=None):
+    ctx = scmutil.revsingle(repo, rev, None)
+    for k, v in sorted(ctx.substate.items()):
+        ui.write(('path %s\n') % k)
+        ui.write((' source   %s\n') % v[0])
+        ui.write((' revision %s\n') % v[1])
+
+@command('debugsuccessorssets',
+    [],
+    _('[REV]'))
+def debugsuccessorssets(ui, repo, *revs):
+    """show set of successors for revision
+
+    A successors set of changeset A is a consistent group of revisions that
+    succeed A. It contains non-obsolete changesets only.
+
+    In most cases a changeset A has a single successors set containing a single
+    successor (changeset A replaced by A').
+
+    A changeset that is made obsolete with no successors are called "pruned".
+    Such changesets have no successors sets at all.
+
+    A changeset that has been "split" will have a successors set containing
+    more than one successor.
+
+    A changeset that has been rewritten in multiple different ways is called
+    "divergent". Such changesets have multiple successor sets (each of which
+    may also be split, i.e. have multiple successors).
+
+    Results are displayed as follows::
+
+        <rev1>
+            <successors-1A>
+        <rev2>
+            <successors-2A>
+            <successors-2B1> <successors-2B2> <successors-2B3>
+
+    Here rev2 has two possible (i.e. divergent) successors sets. The first
+    holds one element, whereas the second holds three (i.e. the changeset has
+    been split).
+    """
+    # passed to successorssets caching computation from one call to another
+    cache = {}
+    ctx2str = str
+    node2str = short
+    if ui.debug():
+        def ctx2str(ctx):
+            return ctx.hex()
+        node2str = hex
+    for rev in scmutil.revrange(repo, revs):
+        ctx = repo[rev]
+        ui.write('%s\n'% ctx2str(ctx))
+        for succsset in obsolete.successorssets(repo, ctx.node(), cache):
+            if succsset:
+                ui.write('    ')
+                ui.write(node2str(succsset[0]))
+                for node in succsset[1:]:
+                    ui.write(' ')
+                    ui.write(node2str(node))
+            ui.write('\n')
+
+@command('debugtemplate',
+    [('r', 'rev', [], _('apply template on changesets'), _('REV')),
+     ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
+    _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
+    optionalrepo=True)
+def debugtemplate(ui, repo, tmpl, **opts):
+    """parse and apply a template
+
+    If -r/--rev is given, the template is processed as a log template and
+    applied to the given changesets. Otherwise, it is processed as a generic
+    template.
+
+    Use --verbose to print the parsed tree.
+    """
+    revs = None
+    if opts['rev']:
+        if repo is None:
+            raise error.RepoError(_('there is no Mercurial repository here '
+                                    '(.hg not found)'))
+        revs = scmutil.revrange(repo, opts['rev'])
+
+    props = {}
+    for d in opts['define']:
+        try:
+            k, v = (e.strip() for e in d.split('=', 1))
+            if not k or k == 'ui':
+                raise ValueError
+            props[k] = v
+        except ValueError:
+            raise error.Abort(_('malformed keyword definition: %s') % d)
+
+    if ui.verbose:
+        aliases = ui.configitems('templatealias')
+        tree = templater.parse(tmpl)
+        ui.note(templater.prettyformat(tree), '\n')
+        newtree = templater.expandaliases(tree, aliases)
+        if newtree != tree:
+            ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
+
+    mapfile = None
+    if revs is None:
+        k = 'debugtemplate'
+        t = formatter.maketemplater(ui, k, tmpl)
+        ui.write(templater.stringify(t(k, ui=ui, **props)))
+    else:
+        displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
+                                                mapfile, buffered=False)
+        for r in revs:
+            displayer.show(repo[r], **props)
+        displayer.close()
+
 @command('debugupgraderepo', [
     ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
     ('', 'run', False, _('performs an upgrade')),
@@ -874,4 +2073,44 @@
     should complete almost instantaneously and the chances of a consumer being
     unable to access the repository should be low.
     """
-    return repair.upgraderepo(ui, repo, run=run, optimize=optimize)
+    return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
+
+@command('debugwalk', commands.walkopts, _('[OPTION]... [FILE]...'),
+         inferrepo=True)
+def debugwalk(ui, repo, *pats, **opts):
+    """show how files match on given patterns"""
+    m = scmutil.match(repo[None], pats, opts)
+    items = list(repo.walk(m))
+    if not items:
+        return
+    f = lambda fn: fn
+    if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
+        f = lambda fn: util.normpath(fn)
+    fmt = 'f  %%-%ds  %%-%ds  %%s' % (
+        max([len(abs) for abs in items]),
+        max([len(m.rel(abs)) for abs in items]))
+    for abs in items:
+        line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
+        ui.write("%s\n" % line.rstrip())
+
+@command('debugwireargs',
+    [('', 'three', '', 'three'),
+    ('', 'four', '', 'four'),
+    ('', 'five', '', 'five'),
+    ] + commands.remoteopts,
+    _('REPO [OPTIONS]... [ONE [TWO]]'),
+    norepo=True)
+def debugwireargs(ui, repopath, *vals, **opts):
+    repo = hg.peer(ui, opts, repopath)
+    for opt in commands.remoteopts:
+        del opts[opt[1]]
+    args = {}
+    for k, v in opts.iteritems():
+        if v:
+            args[k] = v
+    # run twice to check that we don't mess up the stream for the next command
+    res1 = repo.debugwireargs(*vals, **args)
+    res2 = repo.debugwireargs(*vals, **args)
+    ui.write("%s\n" % res1)
+    if res1 != res2:
+        ui.warn("%s\n" % res2)
--- a/mercurial/demandimport.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/demandimport.py	Tue Apr 18 12:24:34 2017 -0400
@@ -76,9 +76,9 @@
         else:
             head = name
             after = []
-        object.__setattr__(self, "_data",
+        object.__setattr__(self, r"_data",
                            (head, globals, locals, after, level, set()))
-        object.__setattr__(self, "_module", None)
+        object.__setattr__(self, r"_module", None)
     def _extend(self, name):
         """add to the list of submodules to load"""
         self._data[3].append(name)
@@ -138,7 +138,7 @@
                 if modref and getattr(modref, head, None) == self:
                     setattr(modref, head, mod)
 
-            object.__setattr__(self, "_module", mod)
+            object.__setattr__(self, r"_module", mod)
 
     def __repr__(self):
         if self._module:
@@ -274,6 +274,7 @@
     'fcntl',
     'nt', # pathlib2 tests the existence of built-in 'nt' module
     'win32com.gen_py',
+    'win32com.shell', # 'appdirs' tries to import win32com.shell
     '_winreg', # 2.7 mimetypes needs immediate ImportError
     'pythoncom',
     # imported by tarfile, not available under Windows
--- a/mercurial/destutil.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/destutil.py	Tue Apr 18 12:24:34 2017 -0400
@@ -12,37 +12,10 @@
     bookmarks,
     error,
     obsolete,
+    scmutil,
 )
 
-def _destupdatevalidate(repo, rev, clean, check):
-    """validate that the destination comply to various rules
-
-    This exists as its own function to help wrapping from extensions."""
-    wc = repo[None]
-    p1 = wc.p1()
-    if not clean:
-        # Check that the update is linear.
-        #
-        # Mercurial do not allow update-merge for non linear pattern
-        # (that would be technically possible but was considered too confusing
-        # for user a long time ago)
-        #
-        # See mercurial.merge.update for details
-        if p1.rev() not in repo.changelog.ancestors([rev], inclusive=True):
-            dirty = wc.dirty(missing=True)
-            foreground = obsolete.foreground(repo, [p1.node()])
-            if not repo[rev].node() in foreground:
-                if dirty:
-                    msg = _("uncommitted changes")
-                    hint = _("commit and merge, or update --clean to"
-                             " discard changes")
-                    raise error.UpdateAbort(msg, hint=hint)
-                elif not check:  # destination is not a descendant.
-                    msg = _("not a linear update")
-                    hint = _("merge or update --check to force update")
-                    raise error.UpdateAbort(msg, hint=hint)
-
-def _destupdateobs(repo, clean, check):
+def _destupdateobs(repo, clean):
     """decide of an update destination from obsolescence markers"""
     node = None
     wc = repo[None]
@@ -78,7 +51,7 @@
                 movemark = repo['.'].node()
     return node, movemark, None
 
-def _destupdatebook(repo, clean, check):
+def _destupdatebook(repo, clean):
     """decide on an update destination from active bookmark"""
     # we also move the active bookmark, if any
     activemark = None
@@ -87,7 +60,7 @@
         activemark = node
     return node, movemark, activemark
 
-def _destupdatebranch(repo, clean, check):
+def _destupdatebranch(repo, clean):
     """decide on an update destination from current branch
 
     This ignores closed branch heads.
@@ -113,7 +86,7 @@
         node = repo['.'].node()
     return node, movemark, None
 
-def _destupdatebranchfallback(repo, clean, check):
+def _destupdatebranchfallback(repo, clean):
     """decide on an update destination from closed heads in current branch"""
     wc = repo[None]
     currentbranch = wc.branch()
@@ -143,7 +116,7 @@
                      'branchfallback': _destupdatebranchfallback,
                      }
 
-def destupdate(repo, clean=False, check=False):
+def destupdate(repo, clean=False):
     """destination for bare update operation
 
     return (rev, movemark, activemark)
@@ -156,13 +129,11 @@
     node = movemark = activemark = None
 
     for step in destupdatesteps:
-        node, movemark, activemark = destupdatestepmap[step](repo, clean, check)
+        node, movemark, activemark = destupdatestepmap[step](repo, clean)
         if node is not None:
             break
     rev = repo[node].rev()
 
-    _destupdatevalidate(repo, rev, clean, check)
-
     return rev, movemark, activemark
 
 msgdestmerge = {
@@ -372,9 +343,6 @@
 
 def desthistedit(ui, repo):
     """Default base revision to edit for `hg histedit`."""
-    # Avoid cycle: scmutil -> revset -> destutil
-    from . import scmutil
-
     default = ui.config('histedit', 'defaultrev', histeditdefaultrevset)
     if default:
         revs = scmutil.revrange(repo, [default])
--- a/mercurial/dirstate.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/dirstate.py	Tue Apr 18 12:24:34 2017 -0400
@@ -23,6 +23,7 @@
     pathutil,
     pycompat,
     scmutil,
+    txnutil,
     util,
 )
 
@@ -54,26 +55,16 @@
 def nonnormalentries(dmap):
     '''Compute the nonnormal dirstate entries from the dmap'''
     try:
-        return parsers.nonnormalentries(dmap)
+        return parsers.nonnormalotherparententries(dmap)
     except AttributeError:
-        return set(fname for fname, e in dmap.iteritems()
-                   if e[0] != 'n' or e[3] == -1)
-
-def _trypending(root, vfs, filename):
-    '''Open  file to be read according to HG_PENDING environment variable
-
-    This opens '.pending' of specified 'filename' only when HG_PENDING
-    is equal to 'root'.
-
-    This returns '(fp, is_pending_opened)' tuple.
-    '''
-    if root == encoding.environ.get('HG_PENDING'):
-        try:
-            return (vfs('%s.pending' % filename), True)
-        except IOError as inst:
-            if inst.errno != errno.ENOENT:
-                raise
-    return (vfs(filename), False)
+        nonnorm = set()
+        otherparent = set()
+        for fname, e in dmap.iteritems():
+            if e[0] != 'n' or e[3] == -1:
+                nonnorm.add(fname)
+            if e[0] == 'n' and e[2] == -2:
+                otherparent.add(fname)
+        return nonnorm, otherparent
 
 class dirstate(object):
 
@@ -104,6 +95,7 @@
         self._pendingfilename = '%s.pending' % self._filename
         self._plchangecallbacks = {}
         self._origpl = None
+        self._updatedfiles = set()
 
         # for consistent view between _pl() and _read() invocations
         self._pendingmode = None
@@ -145,7 +137,15 @@
 
     @propertycache
     def _nonnormalset(self):
-        return nonnormalentries(self._map)
+        nonnorm, otherparents = nonnormalentries(self._map)
+        self._otherparentset = otherparents
+        return nonnorm
+
+    @propertycache
+    def _otherparentset(self):
+        nonnorm, otherparents = nonnormalentries(self._map)
+        self._nonnormalset = nonnorm
+        return otherparents
 
     @propertycache
     def _filefoldmap(self):
@@ -355,7 +355,12 @@
         self._pl = p1, p2
         copies = {}
         if oldp2 != nullid and p2 == nullid:
-            for f, s in self._map.iteritems():
+            candidatefiles = self._nonnormalset.union(self._otherparentset)
+            for f in candidatefiles:
+                s = self._map.get(f)
+                if s is None:
+                    continue
+
                 # Discard 'm' markers when moving away from a merge state
                 if s[0] == 'm':
                     if f in self._copymap:
@@ -385,7 +390,7 @@
             raise
 
     def _opendirstatefile(self):
-        fp, mode = _trypending(self._root, self._opener, self._filename)
+        fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
         if self._pendingmode is not None and self._pendingmode != mode:
             fp.close()
             raise error.Abort(_('working directory state may be '
@@ -441,11 +446,13 @@
 
     def invalidate(self):
         for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
-                  "_pl", "_dirs", "_ignore", "_nonnormalset"):
+                  "_pl", "_dirs", "_ignore", "_nonnormalset",
+                  "_otherparentset"):
             if a in self.__dict__:
                 delattr(self, a)
         self._lastnormaltime = 0
         self._dirty = False
+        self._updatedfiles.clear()
         self._parentwriters = 0
         self._origpl = None
 
@@ -456,8 +463,11 @@
         self._dirty = True
         if source is not None:
             self._copymap[dest] = source
+            self._updatedfiles.add(source)
+            self._updatedfiles.add(dest)
         elif dest in self._copymap:
             del self._copymap[dest]
+            self._updatedfiles.add(dest)
 
     def copied(self, file):
         return self._copymap.get(file, None)
@@ -474,6 +484,8 @@
             if normed in self._filefoldmap:
                 del self._filefoldmap[normed]
 
+        self._updatedfiles.add(f)
+
     def _addpath(self, f, state, mode, size, mtime):
         oldstate = self[f]
         if state == 'a' or oldstate == 'r':
@@ -490,9 +502,12 @@
         if oldstate in "?r" and "_dirs" in self.__dict__:
             self._dirs.addpath(f)
         self._dirty = True
+        self._updatedfiles.add(f)
         self._map[f] = dirstatetuple(state, mode, size, mtime)
         if state != 'n' or mtime == -1:
             self._nonnormalset.add(f)
+        if size == -2:
+            self._otherparentset.add(f)
 
     def normal(self, f):
         '''Mark a file normal and clean.'''
@@ -567,6 +582,7 @@
                 size = -1
             elif entry[0] == 'n' and entry[2] == -2: # other parent
                 size = -2
+                self._otherparentset.add(f)
         self._map[f] = dirstatetuple('r', 0, size, 0)
         self._nonnormalset.add(f)
         if size == 0 and f in self._copymap:
@@ -666,11 +682,13 @@
     def clear(self):
         self._map = {}
         self._nonnormalset = set()
+        self._otherparentset = set()
         if "_dirs" in self.__dict__:
             delattr(self, "_dirs")
         self._copymap = {}
         self._pl = [nullid, nullid]
         self._lastnormaltime = 0
+        self._updatedfiles.clear()
         self._dirty = True
 
     def rebuild(self, parent, allfiles, changedfiles=None):
@@ -707,13 +725,15 @@
             # emulate dropping timestamp in 'parsers.pack_dirstate'
             now = _getfsnow(self._opener)
             dmap = self._map
-            for f, e in dmap.iteritems():
-                if e[0] == 'n' and e[3] == now:
+            for f in self._updatedfiles:
+                e = dmap.get(f)
+                if e is not None and e[0] == 'n' and e[3] == now:
                     dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
                     self._nonnormalset.add(f)
 
             # emulate that all 'dirstate.normal' results are written out
             self._lastnormaltime = 0
+            self._updatedfiles.clear()
 
             # delay writing in-memory changes out
             tr.addfilegenerator('dirstate', (self._filename,),
@@ -762,7 +782,7 @@
                     break
 
         st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
-        self._nonnormalset = nonnormalentries(self._map)
+        self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
         st.close()
         self._lastnormaltime = 0
         self._dirty = self._dirtypl = False
@@ -1059,7 +1079,7 @@
             # a) not matching matchfn b) ignored, c) missing, or d) under a
             # symlink directory.
             if not results and matchalways:
-                visit = dmap.keys()
+                visit = [f for f in dmap]
             else:
                 visit = [f for f in dmap if f not in results and matchfn(f)]
             visit.sort()
@@ -1095,9 +1115,9 @@
             else:
                 # We may not have walked the full directory tree above,
                 # so stat and check everything we missed.
-                nf = iter(visit).next
+                iv = iter(visit)
                 for st in util.statfiles([join(i) for i in visit]):
-                    results[nf()] = st
+                    results[next(iv)] = st
         return results
 
     def status(self, match, subrepos, ignored, clean, unknown):
@@ -1224,8 +1244,9 @@
         # use '_writedirstate' instead of 'write' to write changes certainly,
         # because the latter omits writing out if transaction is running.
         # output file will be used to create backup of dirstate at this point.
-        self._writedirstate(self._opener(filename, "w", atomictemp=True,
-                                         checkambig=True))
+        if self._dirty or not self._opener.exists(filename):
+            self._writedirstate(self._opener(filename, "w", atomictemp=True,
+                                             checkambig=True))
 
         if tr:
             # ensure that subsequent tr.writepending returns True for
@@ -1239,8 +1260,13 @@
             # end of this transaction
             tr.registertmp(filename, location='plain')
 
-        self._opener.write(prefix + self._filename + suffix,
-                           self._opener.tryread(filename))
+        backupname = prefix + self._filename + suffix
+        assert backupname != filename
+        self._opener.tryunlink(backupname)
+        # hardlink backup is okay because _writedirstate is always called
+        # with an "atomictemp=True" file.
+        util.copyfile(self._opener.join(filename),
+                      self._opener.join(backupname), hardlink=True)
 
     def restorebackup(self, tr, suffix='', prefix=''):
         '''Restore dirstate by backup file with suffix'''
--- a/mercurial/discovery.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/discovery.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,8 +7,11 @@
 
 from __future__ import absolute_import
 
+import functools
+
 from .i18n import _
 from .node import (
+    hex,
     nullid,
     short,
 )
@@ -17,7 +20,6 @@
     bookmarks,
     branchmap,
     error,
-    obsolete,
     phases,
     setdiscovery,
     treediscovery,
@@ -343,38 +345,13 @@
         oldhs.update(unsyncedheads)
         candidate_newhs.update(unsyncedheads)
         dhs = None # delta heads, the new heads on branch
-        discardedheads = set()
         if not repo.obsstore:
+            discardedheads = set()
             newhs = candidate_newhs
         else:
-            # remove future heads which are actually obsoleted by another
-            # pushed element:
-            #
-            # XXX as above, There are several cases this code does not handle
-            # XXX properly
-            #
-            # (1) if <nh> is public, it won't be affected by obsolete marker
-            #     and a new is created
-            #
-            # (2) if the new heads have ancestors which are not obsolete and
-            #     not ancestors of any other heads we will have a new head too.
-            #
-            # These two cases will be easy to handle for known changeset but
-            # much more tricky for unsynced changes.
-            #
-            # In addition, this code is confused by prune as it only looks for
-            # successors of the heads (none if pruned) leading to issue4354
-            newhs = set()
-            for nh in candidate_newhs:
-                if nh in repo and repo[nh].phase() <= phases.public:
-                    newhs.add(nh)
-                else:
-                    for suc in obsolete.allsuccessors(repo.obsstore, [nh]):
-                        if suc != nh and suc in allfuturecommon:
-                            discardedheads.add(nh)
-                            break
-                    else:
-                        newhs.add(nh)
+            newhs, discardedheads = _postprocessobsolete(pushop,
+                                                         allfuturecommon,
+                                                         candidate_newhs)
         unsynced = sorted(h for h in unsyncedheads if h not in discardedheads)
         if unsynced:
             if None in unsynced:
@@ -434,3 +411,109 @@
                 repo.ui.note((" %s\n") % short(h))
     if errormsg:
         raise error.Abort(errormsg, hint=hint)
+
+def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
+    """post process the list of new heads with obsolescence information
+
+    Exists as a sub-function to contain the complexity and allow extensions to
+    experiment with smarter logic.
+
+    Returns (newheads, discarded_heads) tuple
+    """
+    # known issue
+    #
+    # * We "silently" skip processing on all changeset unknown locally
+    #
+    # * if <nh> is public on the remote, it won't be affected by obsolete
+    #     marker and a new is created
+
+    # define various utilities and containers
+    repo = pushop.repo
+    unfi = repo.unfiltered()
+    tonode = unfi.changelog.node
+    public = phases.public
+    getphase = unfi._phasecache.phase
+    ispublic = (lambda r: getphase(unfi, r) == public)
+    hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore,
+                                     futurecommon)
+    successorsmarkers = unfi.obsstore.successors
+    newhs = set() # final set of new heads
+    discarded = set() # new head of fully replaced branch
+
+    localcandidate = set() # candidate heads known locally
+    unknownheads = set() # candidate heads unknown locally
+    for h in candidate_newhs:
+        if h in unfi:
+            localcandidate.add(h)
+        else:
+            if successorsmarkers.get(h) is not None:
+                msg = ('checkheads: remote head unknown locally has'
+                       ' local marker: %s\n')
+                repo.ui.debug(msg % hex(h))
+            unknownheads.add(h)
+
+    # fast path the simple case
+    if len(localcandidate) == 1:
+        return unknownheads | set(candidate_newhs), set()
+
+    # actually process branch replacement
+    while localcandidate:
+        nh = localcandidate.pop()
+        # run this check early to skip the evaluation of the whole branch
+        if (nh in futurecommon
+                or unfi[nh].phase() <= public):
+            newhs.add(nh)
+            continue
+
+        # Get all revs/nodes on the branch exclusive to this head
+        # (already filtered heads are "ignored"))
+        branchrevs = unfi.revs('only(%n, (%ln+%ln))',
+                               nh, localcandidate, newhs)
+        branchnodes = [tonode(r) for r in branchrevs]
+
+        # The branch won't be hidden on the remote if
+        # * any part of it is public,
+        # * any part of it is considered part of the result by previous logic,
+        # * if we have no markers to push to obsolete it.
+        if (any(ispublic(r) for r in branchrevs)
+                or any(n in futurecommon for n in branchnodes)
+                or any(not hasoutmarker(n) for n in branchnodes)):
+            newhs.add(nh)
+        else:
+            # note: there is a corner case if there is a merge in the branch.
+            # we might end up with -more- heads.  However, these heads are not
+            # "added" by the push, but more by the "removal" on the remote so I
+            # think is a okay to ignore them,
+            discarded.add(nh)
+    newhs |= unknownheads
+    return newhs, discarded
+
+def pushingmarkerfor(obsstore, pushset, node):
+    """true if some markers are to be pushed for node
+
+    We cannot just look in to the pushed obsmarkers from the pushop because
+    discovery might have filtered relevant markers. In addition listing all
+    markers relevant to all changesets in the pushed set would be too expensive
+    (O(len(repo)))
+
+    (note: There are cache opportunity in this function. but it would requires
+    a two dimensional stack.)
+    """
+    successorsmarkers = obsstore.successors
+    stack = [node]
+    seen = set(stack)
+    while stack:
+        current = stack.pop()
+        if current in pushset:
+            return True
+        markers = successorsmarkers.get(current, ())
+        # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
+        for m in markers:
+            nexts = m[1] # successors
+            if not nexts: # this is a prune marker
+                nexts = m[5] # parents
+            for n in nexts:
+                if n not in seen:
+                    seen.add(n)
+                    stack.append(n)
+    return False
--- a/mercurial/dispatch.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/dispatch.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import, print_function
 
-import atexit
 import difflib
 import errno
 import getopt
@@ -33,6 +32,7 @@
     extensions,
     fancyopts,
     fileset,
+    help,
     hg,
     hook,
     profiling,
@@ -58,9 +58,41 @@
         self.fout = fout
         self.ferr = ferr
 
+    def _runexithandlers(self):
+        exc = None
+        handlers = self.ui._exithandlers
+        try:
+            while handlers:
+                func, args, kwargs = handlers.pop()
+                try:
+                    func(*args, **kwargs)
+                except: # re-raises below
+                    if exc is None:
+                        exc = sys.exc_info()[1]
+                    self.ui.warn(('error in exit handlers:\n'))
+                    self.ui.traceback(force=True)
+        finally:
+            if exc is not None:
+                raise exc
+
 def run():
     "run the command in sys.argv"
-    sys.exit((dispatch(request(pycompat.sysargv[1:])) or 0) & 255)
+    req = request(pycompat.sysargv[1:])
+    err = None
+    try:
+        status = (dispatch(req) or 0) & 255
+    except error.StdioError as err:
+        status = -1
+    if util.safehasattr(req.ui, 'fout'):
+        try:
+            req.ui.fout.close()
+        except IOError as err:
+            status = -1
+    if util.safehasattr(req.ui, 'ferr'):
+        if err is not None and err.errno != errno.EPIPE:
+            req.ui.ferr.write('abort: %s\n' % err.strerror)
+        req.ui.ferr.close()
+    sys.exit(status & 255)
 
 def _getsimilar(symbols, value):
     sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
@@ -91,6 +123,9 @@
     if inst.hint:
         write(_("(%s)\n") % inst.hint)
 
+def _formatargs(args):
+    return ' '.join(util.shellquote(a) for a in args)
+
 def dispatch(req):
     "run the command specified in req.args"
     if req.ferr:
@@ -122,23 +157,34 @@
         _formatparse(ferr.write, inst)
         return -1
 
-    msg = ' '.join(' ' in a and repr(a) or a for a in req.args)
-    starttime = time.time()
+    msg = _formatargs(req.args)
+    starttime = util.timer()
     ret = None
     try:
         ret = _runcatch(req)
     except KeyboardInterrupt:
         try:
             req.ui.warn(_("interrupted!\n"))
+        except error.SignalInterrupt:
+            # maybe pager would quit without consuming all the output, and
+            # SIGPIPE was raised. we cannot print anything in this case.
+            pass
         except IOError as inst:
             if inst.errno != errno.EPIPE:
                 raise
         ret = -1
     finally:
-        duration = time.time() - starttime
+        duration = util.timer() - starttime
         req.ui.flush()
+        if req.ui.logblockedtimes:
+            req.ui._blockedtimes['command_duration'] = duration * 1000
+            req.ui.log('uiblocked', 'ui blocked ms', **req.ui._blockedtimes)
         req.ui.log("commandfinish", "%s exited %s after %0.2f seconds\n",
                    msg, ret or 0, duration)
+        try:
+            req._runexithandlers()
+        except: # exiting, so no re-raises
+            ret = ret or -1
     return ret
 
 def _runcatch(req):
@@ -244,12 +290,11 @@
             if '--debugger' in req.args:
                 traceback.print_exc()
                 debugmortem[debugger](sys.exc_info()[2])
-            ui.traceback()
             raise
 
-    return callcatch(ui, _runcatchfunc)
+    return _callcatch(ui, _runcatchfunc)
 
-def callcatch(ui, func):
+def _callcatch(ui, func):
     """like scmutil.callcatch but handles more high-level exceptions about
     config parsing and commands. besides, use handlecommandexception to handle
     uncaught exceptions.
@@ -261,28 +306,35 @@
                 (inst.args[0], " ".join(inst.args[1])))
     except error.CommandError as inst:
         if inst.args[0]:
+            ui.pager('help')
             ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
             commands.help_(ui, inst.args[0], full=False, command=True)
         else:
+            ui.pager('help')
             ui.warn(_("hg: %s\n") % inst.args[1])
             commands.help_(ui, 'shortlist')
     except error.ParseError as inst:
         _formatparse(ui.warn, inst)
         return -1
     except error.UnknownCommand as inst:
-        ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
+        nocmdmsg = _("hg: unknown command '%s'\n") % inst.args[0]
         try:
             # check if the command is in a disabled extension
             # (but don't check for extensions themselves)
-            commands.help_(ui, inst.args[0], unknowncmd=True)
+            formatted = help.formattedhelp(ui, inst.args[0], unknowncmd=True)
+            ui.warn(nocmdmsg)
+            ui.write(formatted)
         except (error.UnknownCommand, error.Abort):
             suggested = False
             if len(inst.args) == 2:
                 sim = _getsimilar(inst.args[1], inst.args[0])
                 if sim:
+                    ui.warn(nocmdmsg)
                     _reportsimilar(ui.warn, sim)
                     suggested = True
             if not suggested:
+                ui.pager('help')
+                ui.warn(nocmdmsg)
                 commands.help_(ui, 'shortlist')
     except IOError:
         raise
@@ -306,7 +358,7 @@
             if num < len(givenargs):
                 return givenargs[num]
             raise error.Abort(_('too few arguments for command alias'))
-        cmd = re.sub(r'\$(\d+|\$)', replacer, cmd)
+        cmd = re.sub(br'\$(\d+|\$)', replacer, cmd)
         givenargs = [x for i, x in enumerate(givenargs)
                      if i not in nums]
         args = pycompat.shlexsplit(cmd)
@@ -376,7 +428,8 @@
                         return ''
                 cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:])
                 cmd = aliasinterpolate(self.name, args, cmd)
-                return ui.system(cmd, environ=env)
+                return ui.system(cmd, environ=env,
+                                 blockedtag='alias_%s' % self.name)
             self.fn = fn
             return
 
@@ -418,7 +471,7 @@
 
     @property
     def args(self):
-        args = map(util.expandpath, self.givenargs)
+        args = pycompat.maplist(util.expandpath, self.givenargs)
         return aliasargs(self.fn, args)
 
     def __getattr__(self, name):
@@ -491,7 +544,8 @@
         args = aliasargs(entry[0], args)
         defaults = ui.config("defaults", cmd)
         if defaults:
-            args = map(util.expandpath, pycompat.shlexsplit(defaults)) + args
+            args = pycompat.maplist(
+                util.expandpath, pycompat.shlexsplit(defaults)) + args
         c = list(entry[1])
     else:
         cmd = None
@@ -686,107 +740,122 @@
     rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
     path, lui = _getlocal(ui, rpath)
 
-    # Configure extensions in phases: uisetup, extsetup, cmdtable, and
-    # reposetup. Programs like TortoiseHg will call _dispatch several
-    # times so we keep track of configured extensions in _loaded.
-    extensions.loadall(lui)
-    exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded]
-    # Propagate any changes to lui.__class__ by extensions
-    ui.__class__ = lui.__class__
-
-    # (uisetup and extsetup are handled in extensions.loadall)
-
-    for name, module in exts:
-        for objname, loadermod, loadername in extraloaders:
-            extraobj = getattr(module, objname, None)
-            if extraobj is not None:
-                getattr(loadermod, loadername)(ui, name, extraobj)
-        _loaded.add(name)
-
-    # (reposetup is handled in hg.repository)
-
     # Side-effect of accessing is debugcommands module is guaranteed to be
     # imported and commands.table is populated.
     debugcommands.command
 
-    addaliases(lui, commands.table)
-
-    # All aliases and commands are completely defined, now.
-    # Check abbreviation/ambiguity of shell alias.
-    shellaliasfn = _checkshellalias(lui, ui, args)
-    if shellaliasfn:
-        with profiling.maybeprofile(lui):
-            return shellaliasfn()
-
-    # check for fallback encoding
-    fallback = lui.config('ui', 'fallbackencoding')
-    if fallback:
-        encoding.fallbackencoding = fallback
-
-    fullargs = args
-    cmd, func, args, options, cmdoptions = _parse(lui, args)
-
-    if options["config"]:
-        raise error.Abort(_("option --config may not be abbreviated!"))
-    if options["cwd"]:
-        raise error.Abort(_("option --cwd may not be abbreviated!"))
-    if options["repository"]:
-        raise error.Abort(_(
-            "option -R has to be separated from other options (e.g. not -qR) "
-            "and --repository may only be abbreviated as --repo!"))
-
-    if options["encoding"]:
-        encoding.encoding = options["encoding"]
-    if options["encodingmode"]:
-        encoding.encodingmode = options["encodingmode"]
-    if options["time"]:
-        def get_times():
-            t = os.times()
-            if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
-                t = (t[0], t[1], t[2], t[3], time.clock())
-            return t
-        s = get_times()
-        def print_time():
-            t = get_times()
-            ui.warn(_("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
-                (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
-        atexit.register(print_time)
-
     uis = set([ui, lui])
 
     if req.repo:
         uis.add(req.repo.ui)
 
-    if options['verbose'] or options['debug'] or options['quiet']:
-        for opt in ('verbose', 'debug', 'quiet'):
-            val = str(bool(options[opt]))
-            for ui_ in uis:
-                ui_.setconfig('ui', opt, val, '--' + opt)
-
-    if options['profile']:
+    if '--profile' in args:
         for ui_ in uis:
             ui_.setconfig('profiling', 'enabled', 'true', '--profile')
 
-    if options['traceback']:
-        for ui_ in uis:
-            ui_.setconfig('ui', 'traceback', 'on', '--traceback')
+    with profiling.maybeprofile(lui):
+        # Configure extensions in phases: uisetup, extsetup, cmdtable, and
+        # reposetup. Programs like TortoiseHg will call _dispatch several
+        # times so we keep track of configured extensions in _loaded.
+        extensions.loadall(lui)
+        exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded]
+        # Propagate any changes to lui.__class__ by extensions
+        ui.__class__ = lui.__class__
+
+        # (uisetup and extsetup are handled in extensions.loadall)
+
+        for name, module in exts:
+            for objname, loadermod, loadername in extraloaders:
+                extraobj = getattr(module, objname, None)
+                if extraobj is not None:
+                    getattr(loadermod, loadername)(ui, name, extraobj)
+            _loaded.add(name)
+
+        # (reposetup is handled in hg.repository)
+
+        addaliases(lui, commands.table)
+
+        # All aliases and commands are completely defined, now.
+        # Check abbreviation/ambiguity of shell alias.
+        shellaliasfn = _checkshellalias(lui, ui, args)
+        if shellaliasfn:
+            return shellaliasfn()
+
+        # check for fallback encoding
+        fallback = lui.config('ui', 'fallbackencoding')
+        if fallback:
+            encoding.fallbackencoding = fallback
+
+        fullargs = args
+        cmd, func, args, options, cmdoptions = _parse(lui, args)
+
+        if options["config"]:
+            raise error.Abort(_("option --config may not be abbreviated!"))
+        if options["cwd"]:
+            raise error.Abort(_("option --cwd may not be abbreviated!"))
+        if options["repository"]:
+            raise error.Abort(_(
+                "option -R has to be separated from other options (e.g. not "
+                "-qR) and --repository may only be abbreviated as --repo!"))
 
-    if options['noninteractive']:
-        for ui_ in uis:
-            ui_.setconfig('ui', 'interactive', 'off', '-y')
+        if options["encoding"]:
+            encoding.encoding = options["encoding"]
+        if options["encodingmode"]:
+            encoding.encodingmode = options["encodingmode"]
+        if options["time"]:
+            def get_times():
+                t = os.times()
+                if t[4] == 0.0:
+                    # Windows leaves this as zero, so use time.clock()
+                    t = (t[0], t[1], t[2], t[3], time.clock())
+                return t
+            s = get_times()
+            def print_time():
+                t = get_times()
+                ui.warn(
+                    _("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
+                    (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
+            ui.atexit(print_time)
 
-    if cmdoptions.get('insecure', False):
+        if options['verbose'] or options['debug'] or options['quiet']:
+            for opt in ('verbose', 'debug', 'quiet'):
+                val = str(bool(options[opt]))
+                if pycompat.ispy3:
+                    val = val.encode('ascii')
+                for ui_ in uis:
+                    ui_.setconfig('ui', opt, val, '--' + opt)
+
+        if options['traceback']:
+            for ui_ in uis:
+                ui_.setconfig('ui', 'traceback', 'on', '--traceback')
+
+        if options['noninteractive']:
+            for ui_ in uis:
+                ui_.setconfig('ui', 'interactive', 'off', '-y')
+
+        if util.parsebool(options['pager']):
+            ui.pager('internal-always-' + cmd)
+        elif options['pager'] != 'auto':
+            ui.disablepager()
+
+        if cmdoptions.get('insecure', False):
+            for ui_ in uis:
+                ui_.insecureconnections = True
+
+        # setup color handling
+        coloropt = options['color']
         for ui_ in uis:
-            ui_.insecureconnections = True
+            if coloropt:
+                ui_.setconfig('ui', 'color', coloropt, '--color')
+            color.setup(ui_)
 
-    if options['version']:
-        return commands.version_(ui)
-    if options['help']:
-        return commands.help_(ui, cmd, command=cmd is not None)
-    elif not cmd:
-        return commands.help_(ui, 'shortlist')
+        if options['version']:
+            return commands.version_(ui)
+        if options['help']:
+            return commands.help_(ui, cmd, command=cmd is not None)
+        elif not cmd:
+            return commands.help_(ui, 'shortlist')
 
-    with profiling.maybeprofile(lui):
         repo = None
         cmdpats = args[:]
         if not func.norepo:
@@ -833,7 +902,7 @@
         elif rpath:
             ui.warn(_("warning: --repository ignored\n"))
 
-        msg = ' '.join(' ' in a and repr(a) or a for a in fullargs)
+        msg = _formatargs(fullargs)
         ui.log("command", '%s\n', msg)
         strcmdopt = pycompat.strkwargs(cmdoptions)
         d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
@@ -866,6 +935,8 @@
     if ui.config('ui', 'supportcontact', None) is None:
         for name, mod in extensions.extensions():
             testedwith = getattr(mod, 'testedwith', '')
+            if pycompat.ispy3 and isinstance(testedwith, str):
+                testedwith = testedwith.encode(u'utf-8')
             report = getattr(mod, 'buglink', _('the extension author.'))
             if not testedwith.strip():
                 # We found an untested extension. It's likely the culprit.
@@ -886,7 +957,7 @@
                 worst = name, nearest, report
     if worst[0] is not None:
         name, testedwith, report = worst
-        if not isinstance(testedwith, str):
+        if not isinstance(testedwith, (bytes, str)):
             testedwith = '.'.join([str(c) for c in testedwith])
         warning = (_('** Unknown exception encountered with '
                      'possibly-broken third-party extension %s\n'
@@ -900,7 +971,12 @@
             bugtracker = _("https://mercurial-scm.org/wiki/BugTracker")
         warning = (_("** unknown exception encountered, "
                      "please report by visiting\n** ") + bugtracker + '\n')
-    warning += ((_("** Python %s\n") % sys.version.replace('\n', '')) +
+    if pycompat.ispy3:
+        sysversion = sys.version.encode(u'utf-8')
+    else:
+        sysversion = sys.version
+    sysversion = sysversion.replace('\n', '')
+    warning += ((_("** Python %s\n") % sysversion) +
                 (_("** Mercurial Distributed SCM (version %s)\n") %
                  util.version()) +
                 (_("** Extensions loaded: %s\n") %
--- a/mercurial/encoding.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/encoding.py	Tue Apr 18 12:24:34 2017 -0400
@@ -196,6 +196,24 @@
     except LookupError as k:
         raise error.Abort(k, hint="please check your locale settings")
 
+def unitolocal(u):
+    """Convert a unicode string to a byte string of local encoding"""
+    return tolocal(u.encode('utf-8'))
+
+def unifromlocal(s):
+    """Convert a byte string of local encoding to a unicode string"""
+    return fromlocal(s).decode('utf-8')
+
+# converter functions between native str and byte string. use these if the
+# character encoding is not aware (e.g. exception message) or is known to
+# be locale dependent (e.g. date formatting.)
+if pycompat.ispy3:
+    strtolocal = unitolocal
+    strfromlocal = unifromlocal
+else:
+    strtolocal = pycompat.identity
+    strfromlocal = pycompat.identity
+
 if not _nativeenviron:
     # now encoding and helper functions are available, recreate the environ
     # dict to be exported to other modules
--- a/mercurial/error.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/error.py	Tue Apr 18 12:24:34 2017 -0400
@@ -22,7 +22,7 @@
     pass remaining arguments to the exception class.
     """
     def __init__(self, *args, **kw):
-        self.hint = kw.pop('hint', None)
+        self.hint = kw.pop(r'hint', None)
         super(Hint, self).__init__(*args, **kw)
 
 class RevlogError(Hint, Exception):
@@ -122,6 +122,12 @@
 class RequirementError(RepoError):
     """Exception raised if .hg/requires has an unknown entry."""
 
+class StdioError(IOError):
+    """Raised if I/O to stdout or stderr fails"""
+
+    def __init__(self, err):
+        IOError.__init__(self, err.errno, err.strerror)
+
 class UnsupportedMergeRecords(Abort):
     def __init__(self, recordtypes):
         from .i18n import _
@@ -246,3 +252,6 @@
 
 class CorruptedState(Exception):
     """error raised when a command is not able to read its state from file"""
+
+class PeerTransportError(Abort):
+    """Transport-level I/O error when communicating with a peer repo."""
--- a/mercurial/exchange.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/exchange.py	Tue Apr 18 12:24:34 2017 -0400
@@ -1737,9 +1737,15 @@
     if url.startswith('remote:http:') or url.startswith('remote:https:'):
         captureoutput = True
     try:
+        # note: outside bundle1, 'heads' is expected to be empty and this
+        # 'check_heads' call wil be a no-op
         check_heads(repo, heads, 'uploading changes')
         # push can proceed
-        if util.safehasattr(cg, 'params'):
+        if not util.safehasattr(cg, 'params'):
+            # legacy case: bundle1 (changegroup 01)
+            lockandtr[1] = repo.lock()
+            r = cg.apply(repo, source, url)
+        else:
             r = None
             try:
                 def gettransaction():
@@ -1778,9 +1784,6 @@
                                                   mandatory=False)
                         parts.append(part)
                 raise
-        else:
-            lockandtr[1] = repo.lock()
-            r = cg.apply(repo, source, url)
     finally:
         lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
         if recordout is not None:
--- a/mercurial/exewrapper.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/exewrapper.c	Tue Apr 18 12:24:34 2017 -0400
@@ -67,51 +67,35 @@
 	}
 
 	pydll = NULL;
-	/*
-	We first check, that environment variable PYTHONHOME is *not* set.
-	This just mimicks the behavior of the regular python.exe, which uses
-	PYTHONHOME to find its installation directory (if it has been set).
-	Note: Users of HackableMercurial are expected to *not* set PYTHONHOME!
-	*/
-	if (GetEnvironmentVariable("PYTHONHOME", envpyhome,
-				   sizeof(envpyhome)) == 0)
-	{
-		/*
-		Environment var PYTHONHOME is *not* set. Let's see if we are
-		running inside a HackableMercurial.
-		*/
+
+	p = strrchr(pyhome, '\\');
+	if (p == NULL) {
+		err = "can't find backslash in module filename";
+		goto bail;
+	}
+	*p = 0; /* cut at directory */
+
+	/* check for private Python of HackableMercurial */
+	strcat_s(pyhome, sizeof(pyhome), "\\hg-python");
 
-		p = strrchr(pyhome, '\\');
-		if (p == NULL) {
-			err = "can't find backslash in module filename";
+	hfind = FindFirstFile(pyhome, &fdata);
+	if (hfind != INVALID_HANDLE_VALUE) {
+		/* Path .\hg-python exists. We are probably in HackableMercurial
+		scenario, so let's load python dll from this dir. */
+		FindClose(hfind);
+		strcpy_s(pydllfile, sizeof(pydllfile), pyhome);
+		strcat_s(pydllfile, sizeof(pydllfile), "\\" HGPYTHONLIB ".dll");
+		pydll = LoadLibrary(pydllfile);
+		if (pydll == NULL) {
+			err = "failed to load private Python DLL " HGPYTHONLIB ".dll";
 			goto bail;
 		}
-		*p = 0; /* cut at directory */
-
-		/* check for private Python of HackableMercurial */
-		strcat_s(pyhome, sizeof(pyhome), "\\hg-python");
-
-		hfind = FindFirstFile(pyhome, &fdata);
-		if (hfind != INVALID_HANDLE_VALUE) {
-			/* path pyhome exists, let's use it */
-			FindClose(hfind);
-			strcpy_s(pydllfile, sizeof(pydllfile), pyhome);
-			strcat_s(pydllfile, sizeof(pydllfile),
-				 "\\" HGPYTHONLIB ".dll");
-			pydll = LoadLibrary(pydllfile);
-			if (pydll == NULL) {
-				err = "failed to load private Python DLL "
-				      HGPYTHONLIB ".dll";
-				goto bail;
-			}
-			Py_SetPythonHome = (void*)GetProcAddress(pydll,
-							"Py_SetPythonHome");
-			if (Py_SetPythonHome == NULL) {
-				err = "failed to get Py_SetPythonHome";
-				goto bail;
-			}
-			Py_SetPythonHome(pyhome);
+		Py_SetPythonHome = (void*)GetProcAddress(pydll, "Py_SetPythonHome");
+		if (Py_SetPythonHome == NULL) {
+			err = "failed to get Py_SetPythonHome";
+			goto bail;
 		}
+		Py_SetPythonHome(pyhome);
 	}
 
 	if (pydll == NULL) {
--- a/mercurial/extensions.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/extensions.py	Tue Apr 18 12:24:34 2017 -0400
@@ -8,6 +8,7 @@
 from __future__ import absolute_import
 
 import imp
+import inspect
 import os
 
 from .i18n import (
@@ -17,6 +18,7 @@
 
 from . import (
     cmdutil,
+    encoding,
     error,
     pycompat,
     util,
@@ -103,11 +105,16 @@
                 mod = _importh(name)
     return mod
 
+def _forbytes(inst):
+    """Portably format an import error into a form suitable for
+    %-formatting into bytestrings."""
+    return encoding.strtolocal(str(inst))
+
 def _reportimporterror(ui, err, failed, next):
     # note: this ui.debug happens before --debug is processed,
     #       Use --config ui.debug=1 to see them.
     ui.debug('could not import %s (%s): trying %s\n'
-             % (failed, err, next))
+             % (failed, _forbytes(err), next))
     if ui.debugflag:
         ui.traceback()
 
@@ -150,7 +157,7 @@
         try:
             extsetup(ui)
         except TypeError:
-            if extsetup.func_code.co_argcount != 0:
+            if inspect.getargspec(extsetup).args:
                 raise
             extsetup() # old extsetup with no ui argument
 
@@ -159,7 +166,7 @@
     newindex = len(_order)
     for (name, path) in result:
         if path:
-            if path[0] == '!':
+            if path[0:1] == '!':
                 _disabledextensions[name] = path[1:]
                 continue
         try:
@@ -167,6 +174,7 @@
         except KeyboardInterrupt:
             raise
         except Exception as inst:
+            inst = _forbytes(inst)
             if path:
                 ui.warn(_("*** failed to import extension %s from %s: %s\n")
                         % (name, path, inst))
@@ -362,7 +370,8 @@
     '''find paths of disabled extensions. returns a dict of {name: path}
     removes /__init__.py from packages if strip_init is True'''
     import hgext
-    extpath = os.path.dirname(os.path.abspath(hgext.__file__))
+    extpath = os.path.dirname(
+        os.path.abspath(pycompat.fsencode(hgext.__file__)))
     try: # might not be a filesystem path
         files = os.listdir(extpath)
     except OSError:
--- a/mercurial/filemerge.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/filemerge.py	Tue Apr 18 12:24:34 2017 -0400
@@ -35,7 +35,9 @@
 def _toolbool(ui, tool, part, default=False):
     return ui.configbool("merge-tools", tool + "." + part, default)
 
-def _toollist(ui, tool, part, default=[]):
+def _toollist(ui, tool, part, default=None):
+    if default is None:
+        default = []
     return ui.configlist("merge-tools", tool + "." + part, default)
 
 internals = {}
@@ -489,8 +491,11 @@
     args = util.interpolate(r'\$', replace, args,
                             lambda s: util.shellquote(util.localpath(s)))
     cmd = toolpath + ' ' + args
+    if _toolbool(ui, tool, "gui"):
+        repo.ui.status(_('running merge tool %s for file %s\n') %
+                       (tool, fcd.path()))
     repo.ui.debug('launching merge tool: %s\n' % cmd)
-    r = ui.system(cmd, cwd=repo.root, environ=env)
+    r = ui.system(cmd, cwd=repo.root, environ=env, blockedtag='mergetool')
     repo.ui.debug('merge tool returned: %s\n' % r)
     return True, r, False
 
@@ -538,6 +543,7 @@
 
     ui = repo.ui
     template = ui.config('ui', 'mergemarkertemplate', _defaultconflictmarker)
+    template = templater.unquotestring(template)
     tmpl = formatter.maketemplater(ui, 'conflictmarker', template)
 
     pad = max(len(l) for l in labels)
@@ -582,7 +588,7 @@
         pre = "%s~%s." % (os.path.basename(fullbase), prefix)
         (fd, name) = tempfile.mkstemp(prefix=pre, suffix=ext)
         data = repo.wwritedata(ctx.path(), ctx.data())
-        f = os.fdopen(fd, "wb")
+        f = os.fdopen(fd, pycompat.sysstr("wb"))
         f.write(data)
         f.close()
         return name
--- a/mercurial/fileset.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/fileset.py	Tue Apr 18 12:24:34 2017 -0400
@@ -15,6 +15,7 @@
     merge,
     parser,
     registrar,
+    scmutil,
     util,
 )
 
@@ -153,7 +154,7 @@
     """
     # i18n: "modified" is a keyword
     getargs(x, 0, 0, _("modified takes no arguments"))
-    s = mctx.status().modified
+    s = set(mctx.status().modified)
     return [f for f in mctx.subset if f in s]
 
 @predicate('added()', callstatus=True)
@@ -162,7 +163,7 @@
     """
     # i18n: "added" is a keyword
     getargs(x, 0, 0, _("added takes no arguments"))
-    s = mctx.status().added
+    s = set(mctx.status().added)
     return [f for f in mctx.subset if f in s]
 
 @predicate('removed()', callstatus=True)
@@ -171,7 +172,7 @@
     """
     # i18n: "removed" is a keyword
     getargs(x, 0, 0, _("removed takes no arguments"))
-    s = mctx.status().removed
+    s = set(mctx.status().removed)
     return [f for f in mctx.subset if f in s]
 
 @predicate('deleted()', callstatus=True)
@@ -180,7 +181,7 @@
     """
     # i18n: "deleted" is a keyword
     getargs(x, 0, 0, _("deleted takes no arguments"))
-    s = mctx.status().deleted
+    s = set(mctx.status().deleted)
     return [f for f in mctx.subset if f in s]
 
 @predicate('missing()', callstatus=True)
@@ -189,7 +190,7 @@
     """
     # i18n: "missing" is a keyword
     getargs(x, 0, 0, _("missing takes no arguments"))
-    s = mctx.status().deleted
+    s = set(mctx.status().deleted)
     return [f for f in mctx.subset if f in s]
 
 @predicate('unknown()', callstatus=True)
@@ -199,7 +200,7 @@
     """
     # i18n: "unknown" is a keyword
     getargs(x, 0, 0, _("unknown takes no arguments"))
-    s = mctx.status().unknown
+    s = set(mctx.status().unknown)
     return [f for f in mctx.subset if f in s]
 
 @predicate('ignored()', callstatus=True)
@@ -209,7 +210,7 @@
     """
     # i18n: "ignored" is a keyword
     getargs(x, 0, 0, _("ignored takes no arguments"))
-    s = mctx.status().ignored
+    s = set(mctx.status().ignored)
     return [f for f in mctx.subset if f in s]
 
 @predicate('clean()', callstatus=True)
@@ -218,7 +219,7 @@
     """
     # i18n: "clean" is a keyword
     getargs(x, 0, 0, _("clean takes no arguments"))
-    s = mctx.status().clean
+    s = set(mctx.status().clean)
     return [f for f in mctx.subset if f in s]
 
 def func(mctx, a, b):
@@ -438,6 +439,52 @@
             s.append(f)
     return s
 
+@predicate('revs(revs, pattern)')
+def revs(mctx, x):
+    """Evaluate set in the specified revisions. If the revset match multiple
+    revs, this will return file matching pattern in any of the revision.
+    """
+    # i18n: "revs" is a keyword
+    r, x = getargs(x, 2, 2, _("revs takes two arguments"))
+    # i18n: "revs" is a keyword
+    revspec = getstring(r, _("first argument to revs must be a revision"))
+    repo = mctx.ctx.repo()
+    revs = scmutil.revrange(repo, [revspec])
+
+    found = set()
+    result = []
+    for r in revs:
+        ctx = repo[r]
+        for f in getset(mctx.switch(ctx, _buildstatus(ctx, x)), x):
+            if f not in found:
+                found.add(f)
+                result.append(f)
+    return result
+
+@predicate('status(base, rev, pattern)')
+def status(mctx, x):
+    """Evaluate predicate using status change between ``base`` and
+    ``rev``. Examples:
+
+    - ``status(3, 7, added())`` - matches files added from "3" to "7"
+    """
+    repo = mctx.ctx.repo()
+    # i18n: "status" is a keyword
+    b, r, x = getargs(x, 3, 3, _("status takes three arguments"))
+    # i18n: "status" is a keyword
+    baseerr = _("first argument to status must be a revision")
+    baserevspec = getstring(b, baseerr)
+    if not baserevspec:
+        raise error.ParseError(baseerr)
+    reverr = _("second argument to status must be a revision")
+    revspec = getstring(r, reverr)
+    if not revspec:
+        raise error.ParseError(reverr)
+    basenode, node = scmutil.revpair(repo, [baserevspec, revspec])
+    basectx = repo[basenode]
+    ctx = repo[node]
+    return getset(mctx.switch(ctx, _buildstatus(ctx, x, basectx=basectx)), x)
+
 @predicate('subrepo([pattern])')
 def subrepo(mctx, x):
     """Subrepositories whose paths match the given pattern.
@@ -474,7 +521,7 @@
 }
 
 class matchctx(object):
-    def __init__(self, ctx, subset=None, status=None):
+    def __init__(self, ctx, subset, status=None):
         self.ctx = ctx
         self.subset = subset
         self._status = status
@@ -497,39 +544,71 @@
                 if (f in self.ctx and f not in removed) or f in unknown)
     def narrow(self, files):
         return matchctx(self.ctx, self.filter(files), self._status)
+    def switch(self, ctx, status=None):
+        subset = self.filter(_buildsubset(ctx, status))
+        return matchctx(ctx, subset, status)
+
+class fullmatchctx(matchctx):
+    """A match context where any files in any revisions should be valid"""
+
+    def __init__(self, ctx, status=None):
+        subset = _buildsubset(ctx, status)
+        super(fullmatchctx, self).__init__(ctx, subset, status)
+    def switch(self, ctx, status=None):
+        return fullmatchctx(ctx, status)
+
+# filesets using matchctx.switch()
+_switchcallers = [
+    'revs',
+    'status',
+]
 
 def _intree(funcs, tree):
     if isinstance(tree, tuple):
         if tree[0] == 'func' and tree[1][0] == 'symbol':
             if tree[1][1] in funcs:
                 return True
+            if tree[1][1] in _switchcallers:
+                # arguments won't be evaluated in the current context
+                return False
         for s in tree[1:]:
             if _intree(funcs, s):
                 return True
     return False
 
+def _buildsubset(ctx, status):
+    if status:
+        subset = []
+        for c in status:
+            subset.extend(c)
+        return subset
+    else:
+        return list(ctx.walk(ctx.match([])))
+
 def getfileset(ctx, expr):
     tree = parse(expr)
+    return getset(fullmatchctx(ctx, _buildstatus(ctx, tree)), tree)
 
+def _buildstatus(ctx, tree, basectx=None):
     # do we need status info?
+
+    # temporaty boolean to simplify the next conditional
+    purewdir = ctx.rev() is None and basectx is None
+
     if (_intree(_statuscallers, tree) or
         # Using matchctx.existing() on a workingctx requires us to check
         # for deleted files.
-        (ctx.rev() is None and _intree(_existingcallers, tree))):
+        (purewdir and _intree(_existingcallers, tree))):
         unknown = _intree(['unknown'], tree)
         ignored = _intree(['ignored'], tree)
 
         r = ctx.repo()
-        status = r.status(ctx.p1(), ctx,
-                          unknown=unknown, ignored=ignored, clean=True)
-        subset = []
-        for c in status:
-            subset.extend(c)
+        if basectx is None:
+            basectx = ctx.p1()
+        return r.status(basectx, ctx,
+                        unknown=unknown, ignored=ignored, clean=True)
     else:
-        status = None
-        subset = list(ctx.walk(ctx.match([])))
-
-    return getset(matchctx(ctx, subset, status), tree)
+        return None
 
 def prettyformat(tree):
     return parser.prettyformat(tree, ('string', 'symbol'))
--- a/mercurial/formatter.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/formatter.py	Tue Apr 18 12:24:34 2017 -0400
@@ -12,6 +12,7 @@
 
 - fm.write() for unconditional output
 - fm.condwrite() to show some extra data conditionally in plain output
+- fm.context() to provide changectx to template output
 - fm.data() to provide extra data to JSON or template output
 - fm.plain() to show raw text that isn't provided to JSON or template output
 
@@ -102,6 +103,7 @@
 
 from __future__ import absolute_import
 
+import itertools
 import os
 
 from .i18n import _
@@ -111,8 +113,8 @@
 )
 
 from . import (
-    encoding,
     error,
+    templatefilters,
     templatekw,
     templater,
     util,
@@ -171,6 +173,9 @@
         # name is mandatory argument for now, but it could be optional if
         # we have default template keyword, e.g. {item}
         return self._converter.formatlist(data, name, fmt, sep)
+    def context(self, **ctxs):
+        '''insert context objects to be used to render template keywords'''
+        pass
     def data(self, **data):
         '''insert data into item that's not shown in default output'''
         self._item.update(data)
@@ -257,66 +262,52 @@
         pass
 
 class debugformatter(baseformatter):
-    def __init__(self, ui, topic, opts):
+    def __init__(self, ui, out, topic, opts):
         baseformatter.__init__(self, ui, topic, opts, _nullconverter)
-        self._ui.write("%s = [\n" % self._topic)
+        self._out = out
+        self._out.write("%s = [\n" % self._topic)
     def _showitem(self):
-        self._ui.write("    " + repr(self._item) + ",\n")
+        self._out.write("    " + repr(self._item) + ",\n")
     def end(self):
         baseformatter.end(self)
-        self._ui.write("]\n")
+        self._out.write("]\n")
 
 class pickleformatter(baseformatter):
-    def __init__(self, ui, topic, opts):
+    def __init__(self, ui, out, topic, opts):
         baseformatter.__init__(self, ui, topic, opts, _nullconverter)
+        self._out = out
         self._data = []
     def _showitem(self):
         self._data.append(self._item)
     def end(self):
         baseformatter.end(self)
-        self._ui.write(pickle.dumps(self._data))
-
-def _jsonifyobj(v):
-    if isinstance(v, dict):
-        xs = ['"%s": %s' % (encoding.jsonescape(k), _jsonifyobj(u))
-              for k, u in sorted(v.iteritems())]
-        return '{' + ', '.join(xs) + '}'
-    elif isinstance(v, (list, tuple)):
-        return '[' + ', '.join(_jsonifyobj(e) for e in v) + ']'
-    elif v is None:
-        return 'null'
-    elif v is True:
-        return 'true'
-    elif v is False:
-        return 'false'
-    elif isinstance(v, (int, float)):
-        return str(v)
-    else:
-        return '"%s"' % encoding.jsonescape(v)
+        self._out.write(pickle.dumps(self._data))
 
 class jsonformatter(baseformatter):
-    def __init__(self, ui, topic, opts):
+    def __init__(self, ui, out, topic, opts):
         baseformatter.__init__(self, ui, topic, opts, _nullconverter)
-        self._ui.write("[")
-        self._ui._first = True
+        self._out = out
+        self._out.write("[")
+        self._first = True
     def _showitem(self):
-        if self._ui._first:
-            self._ui._first = False
+        if self._first:
+            self._first = False
         else:
-            self._ui.write(",")
+            self._out.write(",")
 
-        self._ui.write("\n {\n")
+        self._out.write("\n {\n")
         first = True
         for k, v in sorted(self._item.items()):
             if first:
                 first = False
             else:
-                self._ui.write(",\n")
-            self._ui.write('  "%s": %s' % (k, _jsonifyobj(v)))
-        self._ui.write("\n }")
+                self._out.write(",\n")
+            u = templatefilters.json(v, paranoid=False)
+            self._out.write('  "%s": %s' % (k, u))
+        self._out.write("\n }")
     def end(self):
         baseformatter.end(self)
-        self._ui.write("\n]\n")
+        self._out.write("\n]\n")
 
 class _templateconverter(object):
     '''convert non-primitive data types to be processed by templater'''
@@ -330,25 +321,46 @@
         data = util.sortdict(_iteritems(data))
         def f():
             yield _plainconverter.formatdict(data, key, value, fmt, sep)
-        return templatekw._hybrid(f(), data, lambda k: {key: k, value: data[k]},
-                                  lambda d: fmt % (d[key], d[value]))
+        return templatekw.hybriddict(data, key=key, value=value, fmt=fmt,
+                                     gen=f())
     @staticmethod
     def formatlist(data, name, fmt, sep):
         '''build object that can be evaluated as either plain string or list'''
         data = list(data)
         def f():
             yield _plainconverter.formatlist(data, name, fmt, sep)
-        return templatekw._hybrid(f(), data, lambda x: {name: x},
-                                  lambda d: fmt % d[name])
+        return templatekw.hybridlist(data, name=name, fmt=fmt, gen=f())
 
 class templateformatter(baseformatter):
-    def __init__(self, ui, topic, opts):
+    def __init__(self, ui, out, topic, opts):
         baseformatter.__init__(self, ui, topic, opts, _templateconverter)
+        self._out = out
         self._topic = topic
-        self._t = gettemplater(ui, topic, opts.get('template', ''))
+        self._t = gettemplater(ui, topic, opts.get('template', ''),
+                               cache=templatekw.defaulttempl)
+        self._counter = itertools.count()
+        self._cache = {}  # for templatekw/funcs to store reusable data
+    def context(self, **ctxs):
+        '''insert context objects to be used to render template keywords'''
+        assert all(k == 'ctx' for k in ctxs)
+        self._item.update(ctxs)
     def _showitem(self):
-        g = self._t(self._topic, ui=self._ui, **self._item)
-        self._ui.write(templater.stringify(g))
+        # TODO: add support for filectx. probably each template keyword or
+        # function will have to declare dependent resources. e.g.
+        # @templatekeyword(..., requires=('ctx',))
+        props = {}
+        if 'ctx' in self._item:
+            props.update(templatekw.keywords)
+        props['index'] = next(self._counter)
+        # explicitly-defined fields precede templatekw
+        props.update(self._item)
+        if 'ctx' in self._item:
+            # but template resources must be always available
+            props['templ'] = self._t
+            props['repo'] = props['ctx'].repo()
+            props['revcache'] = {}
+        g = self._t(self._topic, ui=self._ui, cache=self._cache, **props)
+        self._out.write(templater.stringify(g))
 
 def lookuptemplate(ui, topic, tmpl):
     # looks like a literal template?
@@ -382,17 +394,17 @@
     # constant string?
     return tmpl, None
 
-def gettemplater(ui, topic, spec):
+def gettemplater(ui, topic, spec, cache=None):
     tmpl, mapfile = lookuptemplate(ui, topic, spec)
     assert not (tmpl and mapfile)
     if mapfile:
-        return templater.templater.frommapfile(mapfile)
-    return maketemplater(ui, topic, tmpl)
+        return templater.templater.frommapfile(mapfile, cache=cache)
+    return maketemplater(ui, topic, tmpl, cache=cache)
 
-def maketemplater(ui, topic, tmpl, filters=None, cache=None):
+def maketemplater(ui, topic, tmpl, cache=None):
     """Create a templater from a string template 'tmpl'"""
     aliases = ui.configitems('templatealias')
-    t = templater.templater(filters=filters, cache=cache, aliases=aliases)
+    t = templater.templater(cache=cache, aliases=aliases)
     if tmpl:
         t.cache[topic] = tmpl
     return t
@@ -400,17 +412,17 @@
 def formatter(ui, topic, opts):
     template = opts.get("template", "")
     if template == "json":
-        return jsonformatter(ui, topic, opts)
+        return jsonformatter(ui, ui, topic, opts)
     elif template == "pickle":
-        return pickleformatter(ui, topic, opts)
+        return pickleformatter(ui, ui, topic, opts)
     elif template == "debug":
-        return debugformatter(ui, topic, opts)
+        return debugformatter(ui, ui, topic, opts)
     elif template != "":
-        return templateformatter(ui, topic, opts)
+        return templateformatter(ui, ui, topic, opts)
     # developer config: ui.formatdebug
     elif ui.configbool('ui', 'formatdebug'):
-        return debugformatter(ui, topic, opts)
+        return debugformatter(ui, ui, topic, opts)
     # deprecated config: ui.formatjson
     elif ui.configbool('ui', 'formatjson'):
-        return jsonformatter(ui, topic, opts)
+        return jsonformatter(ui, ui, topic, opts)
     return plainformatter(ui, topic, opts)
--- a/mercurial/graphmod.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/graphmod.py	Tue Apr 18 12:24:34 2017 -0400
@@ -22,6 +22,7 @@
 from .node import nullrev
 from . import (
     revset,
+    smartset,
     util,
 )
 
@@ -67,8 +68,8 @@
             if gp is None:
                 # precompute slow query as we know reachableroots() goes
                 # through all revs (issue4782)
-                if not isinstance(revs, revset.baseset):
-                    revs = revset.baseset(revs)
+                if not isinstance(revs, smartset.baseset):
+                    revs = smartset.baseset(revs)
                 gp = gpcache[mpar] = sorted(set(revset.reachableroots(
                     repo, revs, [mpar])))
             if not gp:
@@ -181,6 +182,9 @@
     knownparents = []
     newparents = []
     for ptype, parent in parents:
+        if parent == rev:
+            # self reference (should only be seen in null rev)
+            continue
         if parent in seen:
             knownparents.append(parent)
         else:
@@ -190,8 +194,7 @@
     ncols = len(seen)
     nextseen = seen[:]
     nextseen[nodeidx:nodeidx + 1] = newparents
-    edges = [(nodeidx, nextseen.index(p))
-             for p in knownparents if p != nullrev]
+    edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
 
     seen[:] = nextseen
     while len(newparents) > 2:
--- a/mercurial/help.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/help.py	Tue Apr 18 12:24:34 2017 -0400
@@ -33,14 +33,17 @@
     webcommands,
 )
 
-_exclkeywords = [
+_exclkeywords = set([
+    "(ADVANCED)",
     "(DEPRECATED)",
     "(EXPERIMENTAL)",
+    # i18n: "(ADVANCED)" is a keyword, must be translated consistently
+    _("(ADVANCED)"),
     # i18n: "(DEPRECATED)" is a keyword, must be translated consistently
     _("(DEPRECATED)"),
     # i18n: "(EXPERIMENTAL)" is a keyword, must be translated consistently
     _("(EXPERIMENTAL)"),
-    ]
+    ])
 
 def listexts(header, exts, indent=1, showdeprecated=False):
     '''return a text listing of the given extensions'''
@@ -186,6 +189,8 @@
 internalstable = sorted([
     (['bundles'], _('Bundles'),
      loaddoc('bundles', subdir='internals')),
+    (['censor'], _('Censor'),
+     loaddoc('censor', subdir='internals')),
     (['changegroups'], _('Changegroups'),
      loaddoc('changegroups', subdir='internals')),
     (['requirements'], _('Repository Requirements'),
@@ -205,6 +210,8 @@
     return ''.join(lines)
 
 helptable = sorted([
+    (['bundlespec'], _("Bundle File Formats"), loaddoc('bundlespec')),
+    (['color'], _("Colorizing Outputs"), loaddoc('color')),
     (["config", "hgrc"], _("Configuration Files"), loaddoc('config')),
     (["dates"], _("Date Formats"), loaddoc('dates')),
     (["patterns"], _("File Name Patterns"), loaddoc('patterns')),
@@ -230,6 +237,7 @@
      loaddoc('scripting')),
     (['internals'], _("Technical implementation topics"),
      internalshelp),
+    (['pager'], _("Pager Support"), loaddoc('pager')),
 ])
 
 # Maps topics with sub-topics to a list of their sub-topics.
@@ -276,6 +284,8 @@
         return makeitemsdoc(ui, topic, doc, marker, symbols, dedent=dedent)
     addtopichook(topic, add)
 
+addtopicsymbols('bundlespec', '.. bundlecompressionmarker',
+                util.bundlecompressiontopics())
 addtopicsymbols('filesets', '.. predicatesmarker', fileset.symbols)
 addtopicsymbols('merge-tools', '.. internaltoolsmarker',
                 filemerge.internalsdoc)
@@ -605,3 +615,49 @@
         rst.extend(helplist(None, **opts))
 
     return ''.join(rst)
+
+def formattedhelp(ui, name, keep=None, unknowncmd=False, full=True, **opts):
+    """get help for a given topic (as a dotted name) as rendered rst
+
+    Either returns the rendered help text or raises an exception.
+    """
+    if keep is None:
+        keep = []
+    else:
+        keep = list(keep) # make a copy so we can mutate this later
+    fullname = name
+    section = None
+    subtopic = None
+    if name and '.' in name:
+        name, remaining = name.split('.', 1)
+        remaining = encoding.lower(remaining)
+        if '.' in remaining:
+            subtopic, section = remaining.split('.', 1)
+        else:
+            if name in subtopics:
+                subtopic = remaining
+            else:
+                section = remaining
+    textwidth = ui.configint('ui', 'textwidth', 78)
+    termwidth = ui.termwidth() - 2
+    if textwidth <= 0 or termwidth < textwidth:
+        textwidth = termwidth
+    text = help_(ui, name,
+                 subtopic=subtopic, unknowncmd=unknowncmd, full=full, **opts)
+
+    formatted, pruned = minirst.format(text, textwidth, keep=keep,
+                                       section=section)
+
+    # We could have been given a weird ".foo" section without a name
+    # to look for, or we could have simply failed to found "foo.bar"
+    # because bar isn't a section of foo
+    if section and not (formatted and name):
+        raise error.Abort(_("help section not found: %s") % fullname)
+
+    if 'verbose' in pruned:
+        keep.append('omitted')
+    else:
+        keep.append('notomitted')
+    formatted, pruned = minirst.format(text, textwidth, keep=keep,
+                                       section=section)
+    return formatted
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/bundlespec.txt	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,84 @@
+Mercurial supports generating standalone "bundle" files that hold repository
+data. These "bundles" are typically saved locally and used later or exchanged
+between different repositories, possibly on different machines. Example
+commands using bundles are :hg:`bundle` and :hg:`unbundle`.
+
+Generation of bundle files is controlled by a "bundle specification"
+("bundlespec") string. This string tells the bundle generation process how
+to create the bundle.
+
+A "bundlespec" string is composed of the following elements:
+
+type
+    A string denoting the bundle format to use.
+
+compression
+    Denotes the compression engine to use compressing the raw bundle data.
+
+parameters
+    Arbitrary key-value parameters to further control bundle generation.
+
+A "bundlespec" string has the following formats:
+
+<type>
+    The literal bundle format string is used.
+
+<compression>-<type>
+    The compression engine and format are delimited by a hypthen (``-``).
+
+Optional parameters follow the ``<type>``. Parameters are URI escaped
+``key=value`` pairs. Each pair is delimited by a semicolon (``;``). The
+first parameter begins after a ``;`` immediately following the ``<type>``
+value.
+
+Available Types
+===============
+
+The following bundle <type> strings are available:
+
+v1
+    Produces a legacy "changegroup" version 1 bundle.
+
+    This format is compatible with nearly all Mercurial clients because it is
+    the oldest. However, it has some limitations, which is why it is no longer
+    the default for new repositories.
+
+    ``v1`` bundles can be used with modern repositories using the "generaldelta"
+    storage format. However, it may take longer to produce the bundle and the
+    resulting bundle may be significantly larger than a ``v2`` bundle.
+
+    ``v1`` bundles can only use the ``gzip``, ``bzip2``, and ``none`` compression
+    formats.
+
+v2
+    Produces a version 2 bundle.
+
+    Version 2 bundles are an extensible format that can store additional
+    repository data (such as bookmarks and phases information) and they can
+    store data more efficiently, resulting in smaller bundles.
+
+    Version 2 bundles can also use modern compression engines, such as
+    ``zstd``, making them faster to compress and often smaller.
+
+Available Compression Engines
+=============================
+
+The following bundle <compression> engines can be used:
+
+.. bundlecompressionmarker
+
+Examples
+========
+
+``v2``
+    Produce a ``v2`` bundle using default options, including compression.
+
+``none-v1``
+    Produce a ``v2`` bundle with no compression.
+
+``zstd-v2``
+    Produce a ``v2`` bundle with zstandard compression using default
+    settings.
+
+``zstd-v1``
+    This errors because ``zstd`` is not supported for ``v1`` types.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/color.txt	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,134 @@
+Mercurial can colorizes output from several commands.
+
+For example, the diff command shows additions in green and deletions
+in red, while the status command shows modified files in magenta. Many
+other commands have analogous colors. It is possible to customize
+these colors.
+
+To enable color use::
+
+  [ui]
+  color = auto
+
+Mode
+====
+
+Mercurial can use various system to display color. The supported modes are
+``ansi``, ``win32``, and ``terminfo``.  See :hg:`help config.color` for details
+about how to control the mode
+
+Effects
+=======
+
+Other effects in addition to color, like bold and underlined text, are
+also available. By default, the terminfo database is used to find the
+terminal codes used to change color and effect.  If terminfo is not
+available, then effects are rendered with the ECMA-48 SGR control
+function (aka ANSI escape codes).
+
+The available effects in terminfo mode are 'blink', 'bold', 'dim',
+'inverse', 'invisible', 'italic', 'standout', and 'underline'; in
+ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and
+'underline'.  How each is rendered depends on the terminal emulator.
+Some may not be available for a given terminal type, and will be
+silently ignored.
+
+If the terminfo entry for your terminal is missing codes for an effect
+or has the wrong codes, you can add or override those codes in your
+configuration::
+
+  [color]
+  terminfo.dim = \E[2m
+
+where '\E' is substituted with an escape character.
+
+Labels
+======
+
+Text receives color effects depending on the labels that it has. Many
+default Mercurial commands emit labelled text. You can also define
+your own labels in templates using the label function, see :hg:`help
+templates`. A single portion of text may have more than one label. In
+that case, effects given to the last label will override any other
+effects. This includes the special "none" effect, which nullifies
+other effects.
+
+Labels are normally invisible. In order to see these labels and their
+position in the text, use the global --color=debug option. The same
+anchor text may be associated to multiple labels, e.g.
+
+  [log.changeset changeset.secret|changeset:   22611:6f0a53c8f587]
+
+The following are the default effects for some default labels. Default
+effects may be overridden from your configuration file::
+
+  [color]
+  status.modified = blue bold underline red_background
+  status.added = green bold
+  status.removed = red bold blue_background
+  status.deleted = cyan bold underline
+  status.unknown = magenta bold underline
+  status.ignored = black bold
+
+  # 'none' turns off all effects
+  status.clean = none
+  status.copied = none
+
+  qseries.applied = blue bold underline
+  qseries.unapplied = black bold
+  qseries.missing = red bold
+
+  diff.diffline = bold
+  diff.extended = cyan bold
+  diff.file_a = red bold
+  diff.file_b = green bold
+  diff.hunk = magenta
+  diff.deleted = red
+  diff.inserted = green
+  diff.changed = white
+  diff.tab =
+  diff.trailingwhitespace = bold red_background
+
+  # Blank so it inherits the style of the surrounding label
+  changeset.public =
+  changeset.draft =
+  changeset.secret =
+
+  resolve.unresolved = red bold
+  resolve.resolved = green bold
+
+  bookmarks.active = green
+
+  branches.active = none
+  branches.closed = black bold
+  branches.current = green
+  branches.inactive = none
+
+  tags.normal = green
+  tags.local = black bold
+
+  rebase.rebased = blue
+  rebase.remaining = red bold
+
+  shelve.age = cyan
+  shelve.newest = green bold
+  shelve.name = blue bold
+
+  histedit.remaining = red bold
+
+Custom colors
+=============
+
+Because there are only eight standard colors, this module allows you
+to define color names for other color slots which might be available
+for your terminal type, assuming terminfo mode.  For instance::
+
+  color.brightblue = 12
+  color.pink = 207
+  color.orange = 202
+
+to set 'brightblue' to color slot 12 (useful for 16 color terminals
+that have brighter colors defined in the upper eight) and, 'pink' and
+'orange' to colors in 256-color xterm's default color cube.  These
+defined colors may then be used as any of the pre-defined eight,
+including appending '_background' to set the background to that color.
--- a/mercurial/help/config.txt	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/help/config.txt	Tue Apr 18 12:24:34 2017 -0400
@@ -56,6 +56,7 @@
 
   - ``<repo>/.hg/hgrc`` (per-repository)
   - ``$HOME/.hgrc`` (per-user)
+  - ``${XDG_CONFIG_HOME:-$HOME/.config}/hg/hgrc`` (per-user)
   - ``<install-root>/etc/mercurial/hgrc`` (per-installation)
   - ``<install-root>/etc/mercurial/hgrc.d/*.rc`` (per-installation)
   - ``/etc/mercurial/hgrc`` (per-system)
@@ -276,7 +277,7 @@
 will let you do ``hg echo foo`` to have ``foo`` printed in your
 terminal. A better example might be::
 
-   purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm
+   purge = !$HG status --no-status --unknown -0 re: | xargs -0 rm -f
 
 which will make ``hg purge`` delete all unknown files in the
 repository in the same manner as the purge extension.
@@ -322,12 +323,32 @@
 ``auth``
 --------
 
-Authentication credentials for HTTP authentication. This section
-allows you to store usernames and passwords for use when logging
-*into* HTTP servers. See :hg:`help config.web` if
-you want to configure *who* can login to your HTTP server.
-
-Each line has the following format::
+Authentication credentials and other authentication-like configuration
+for HTTP connections. This section allows you to store usernames and
+passwords for use when logging *into* HTTP servers. See
+:hg:`help config.web` if you want to configure *who* can login to
+your HTTP server.
+
+The following options apply to all hosts.
+
+``cookiefile``
+    Path to a file containing HTTP cookie lines. Cookies matching a
+    host will be sent automatically.
+
+    The file format uses the Mozilla cookies.txt format, which defines cookies
+    on their own lines. Each line contains 7 fields delimited by the tab
+    character (domain, is_domain_cookie, path, is_secure, expires, name,
+    value). For more info, do an Internet search for "Netscape cookies.txt
+    format."
+
+    Note: the cookies parser does not handle port numbers on domains. You
+    will need to remove ports from the domain for the cookie to be recognized.
+    This could result in a cookie being disclosed to an unwanted server.
+
+    The cookies file is read-only.
+
+Other options in this section are grouped by name and have the following
+format::
 
     <name>.<argument> = <value>
 
@@ -385,6 +406,46 @@
 If no suitable authentication entry is found, the user is prompted
 for credentials as usual if required by the remote.
 
+``color``
+---------
+
+Configure the Mercurial color mode. For details about how to define your custom
+effect and style see :hg:`help color`.
+
+``mode``
+    String: control the method used to output color. One of ``auto``, ``ansi``,
+    ``win32``, ``terminfo`` or ``debug``. In auto mode the color extension will
+    use ANSI mode by default (or win32 mode on Windows) if it detects a
+    terminal. Any invalid value will disable color.
+
+``pagermode``
+    String: optinal override of ``color.mode`` used with pager (from the pager
+    extensions).
+
+    On some systems, terminfo mode may cause problems when using
+    color with the pager extension and less -R. less with the -R option
+    will only display ECMA-48 color codes, and terminfo mode may sometimes
+    emit codes that less doesn't understand. You can work around this by
+    either using ansi mode (or auto mode), or by using less -r (which will
+    pass through all terminal control codes, not just color control
+    codes).
+
+    On some systems (such as MSYS in Windows), the terminal may support
+    a different color mode than the pager (activated via the "pager"
+    extension).
+
+``commands``
+------------
+
+``status.relative``
+    Make paths in ``hg status`` output relative to the current directory.
+    (default: False)
+
+``update.requiredest``
+    Require that the user pass a destination when running ``hg update``.
+    For example, ``hg update .::`` will be allowed, but a plain ``hg update``
+    will be disallowed.
+    (default: False)
 
 ``committemplate``
 ------------------
@@ -700,8 +761,8 @@
 Example for ``~/.hgrc``::
 
   [extensions]
-  # (the color extension will get loaded from Mercurial's path)
-  color =
+  # (the churn extension will get loaded from Mercurial's path)
+  churn =
   # (this extension will get loaded from the file specified)
   myfeature = ~/.hgext/myfeature.py
 
@@ -801,8 +862,12 @@
   priority.incoming.autobuild = 1
 
 Most hooks are run with environment variables set that give useful
-additional information. For each hook below, the environment
-variables it is passed are listed with names of the form ``$HG_foo``.
+additional information. For each hook below, the environment variables
+it is passed are listed with names of the form ``$HG_foo``. The
+``$HG_HOOKTYPE`` and ``$HG_HOOKNAME`` variables are set for all hooks.
+their respectively contains the type of hook which triggered the run and
+the full name of the hooks in the config. In the example about this will
+be ``$HG_HOOKTYPE=incoming`` and ``$HG_HOOKNAME=incoming.email``.
 
 ``changegroup``
   Run after a changegroup has been added via push, pull or unbundle.  ID of the
@@ -1796,6 +1861,13 @@
 
     By default, the first bundle advertised by the server is used.
 
+``color``
+    String: when to use to colorize output. possible value are auto, always,
+    never, or debug (default: auto). 'auto' will use color whenever it seems
+    possible. See :hg:`help color` for details.
+
+    (in addition a boolean can be used in place always/never)
+
 ``commitsubrepos``
     Whether to commit modified subrepositories when committing the
     parent repository. If False and one subrepository has uncommitted
--- a/mercurial/help/filesets.txt	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/help/filesets.txt	Tue Apr 18 12:24:34 2017 -0400
@@ -69,6 +69,10 @@
 
     hg revert "set:copied() and binary() and size('>1M')"
 
+- Revert files that were added to the working directory::
+
+    hg revert "set:revs('wdir()', added())"
+
 - Remove files listed in foo.lst that contain the letter a or b::
 
     hg remove "set: 'listfile:foo.lst' and (**a* or **b*)"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/internals/censor.txt	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,22 @@
+The censor system allows retroactively removing content from
+files. Actually censoring a node requires using the censor extension,
+but the functionality for handling censored nodes is partially in core.
+
+Censored nodes in a filelog have the flag ``REVIDX_ISCENSORED`` set,
+and the contents of the censored node are replaced with a censor
+tombstone. For historical reasons, the tombstone is packed in the
+filelog metadata field ``censored``. This allows censored nodes to be
+(mostly) safely transmitted through old formats like changegroup
+versions 1 and 2. When using changegroup formats older than 3, the
+receiver is required to re-add the ``REVIDX_ISCENSORED`` flag when
+storing the revision. This depends on the ``censored`` metadata key
+never being used for anything other than censoring revisions, which is
+true as of January 2017. Note that the revlog flag is the
+authoritative marker of a censored node: the tombstone should only be
+consulted when looking for a reason a node was censored or when revlog
+flags are unavailable as mentioned above.
+
+The tombstone data is a free-form string. It's expected that users of
+censor will want to record the reason for censoring a node in the
+tombstone. Censored nodes must be able to fit in the size of the
+content being censored.
--- a/mercurial/help/internals/changegroups.txt	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/help/internals/changegroups.txt	Tue Apr 18 12:24:34 2017 -0400
@@ -1,35 +1,49 @@
 Changegroups are representations of repository revlog data, specifically
-the changelog, manifest, and filelogs.
+the changelog data, root/flat manifest data, treemanifest data, and
+filelogs.
 
 There are 3 versions of changegroups: ``1``, ``2``, and ``3``. From a
-high-level, versions ``1`` and ``2`` are almost exactly the same, with
-the only difference being a header on entries in the changeset
-segment. Version ``3`` adds support for exchanging treemanifests and
-includes revlog flags in the delta header.
+high-level, versions ``1`` and ``2`` are almost exactly the same, with the
+only difference being an additional item in the *delta header*.  Version
+``3`` adds support for revlog flags in the *delta header* and optionally
+exchanging treemanifests (enabled by setting an option on the
+``changegroup`` part in the bundle2).
 
-Changegroups consists of 3 logical segments::
+Changegroups when not exchanging treemanifests consist of 3 logical
+segments::
 
    +---------------------------------+
    |           |          |          |
    | changeset | manifest | filelogs |
    |           |          |          |
+   |           |          |          |
    +---------------------------------+
 
+When exchanging treemanifests, there are 4 logical segments::
+
+   +-------------------------------------------------+
+   |           |          |               |          |
+   | changeset |   root   | treemanifests | filelogs |
+   |           | manifest |               |          |
+   |           |          |               |          |
+   +-------------------------------------------------+
+
 The principle building block of each segment is a *chunk*. A *chunk*
 is a framed piece of data::
 
    +---------------------------------------+
    |           |                           |
    |  length   |           data            |
-   | (32 bits) |       <length> bytes      |
+   | (4 bytes) |   (<length - 4> bytes)    |
    |           |                           |
    +---------------------------------------+
 
-Each chunk starts with a 32-bit big-endian signed integer indicating
-the length of the raw data that follows.
+All integers are big-endian signed integers. Each chunk starts with a 32-bit
+integer indicating the length of the entire chunk (including the length field
+itself).
 
-There is a special case chunk that has 0 length (``0x00000000``). We
-call this an *empty chunk*.
+There is a special case chunk that has a value of 0 for the length
+(``0x00000000``). We call this an *empty chunk*.
 
 Delta Groups
 ============
@@ -43,26 +57,27 @@
   +------------------------------------------------------------------------+
   |                |             |               |             |           |
   | chunk0 length  | chunk0 data | chunk1 length | chunk1 data |    0x0    |
-  |   (32 bits)    |  (various)  |   (32 bits)   |  (various)  | (32 bits) |
+  |   (4 bytes)    |  (various)  |   (4 bytes)   |  (various)  | (4 bytes) |
   |                |             |               |             |           |
-  +------------------------------------------------------------+-----------+
+  +------------------------------------------------------------------------+
 
 Each *chunk*'s data consists of the following::
 
-  +-----------------------------------------+
-  |              |              |           |
-  | delta header | mdiff header |   delta   |
-  |  (various)   |  (12 bytes)  | (various) |
-  |              |              |           |
-  +-----------------------------------------+
+  +---------------------------------------+
+  |                        |              |
+  |     delta header       |  delta data  |
+  |  (various by version)  |  (various)   |
+  |                        |              |
+  +---------------------------------------+
 
-The *length* field is the byte length of the remaining 3 logical pieces
-of data. The *delta* is a diff from an existing entry in the changelog.
+The *delta data* is a series of *delta*s that describe a diff from an existing
+entry (either that the recipient already has, or previously specified in the
+bundlei/changegroup).
 
 The *delta header* is different between versions ``1``, ``2``, and
 ``3`` of the changegroup format.
 
-Version 1::
+Version 1 (headerlen=80)::
 
    +------------------------------------------------------+
    |            |             |             |             |
@@ -71,7 +86,7 @@
    |            |             |             |             |
    +------------------------------------------------------+
 
-Version 2::
+Version 2 (headerlen=100)::
 
    +------------------------------------------------------------------+
    |            |             |             |            |            |
@@ -80,30 +95,35 @@
    |            |             |             |            |            |
    +------------------------------------------------------------------+
 
-Version 3::
+Version 3 (headerlen=102)::
 
    +------------------------------------------------------------------------------+
    |            |             |             |            |            |           |
-   |    node    |   p1 node   |   p2 node   | base node  | link node  | flags     |
+   |    node    |   p1 node   |   p2 node   | base node  | link node  |   flags   |
    | (20 bytes) |  (20 bytes) |  (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) |
    |            |             |             |            |            |           |
    +------------------------------------------------------------------------------+
 
-The *mdiff header* consists of 3 32-bit big-endian signed integers
-describing offsets at which to apply the following delta content::
+The *delta data* consists of ``chunklen - 4 - headerlen`` bytes, which contain a
+series of *delta*s, densely packed (no separators). These deltas describe a diff
+from an existing entry (either that the recipient already has, or previously
+specified in the bundle/changegroup). The format is described more fully in
+``hg help internals.bdiff``, but briefly::
 
-   +-------------------------------------+
-   |           |            |            |
-   |  offset   | old length | new length |
-   | (32 bits) |  (32 bits) |  (32 bits) |
-   |           |            |            |
-   +-------------------------------------+
+   +---------------------------------------------------------------+
+   |              |            |            |                      |
+   | start offset | end offset | new length |        content       |
+   |  (4 bytes)   |  (4 bytes) |  (4 bytes) | (<new length> bytes) |
+   |              |            |            |                      |
+   +---------------------------------------------------------------+
+
+Please note that the length field in the delta data does *not* include itself.
 
 In version 1, the delta is always applied against the previous node from
 the changegroup or the first parent if this is the first entry in the
 changegroup.
 
-In version 2, the delta base node is encoded in the entry in the
+In version 2 and up, the delta base node is encoded in the entry in the
 changegroup. This allows the delta to be expressed against any parent,
 which can result in smaller deltas and more efficient encoding of data.
 
@@ -111,43 +131,58 @@
 =================
 
 The *changeset segment* consists of a single *delta group* holding
-changelog data. It is followed by an *empty chunk* to denote the
-boundary to the *manifests segment*.
+changelog data. The *empty chunk* at the end of the *delta group* denotes
+the boundary to the *manifest segment*.
 
 Manifest Segment
 ================
 
-The *manifest segment* consists of a single *delta group* holding
-manifest data. It is followed by an *empty chunk* to denote the boundary
-to the *filelogs segment*.
+The *manifest segment* consists of a single *delta group* holding manifest
+data. If treemanifests are in use, it contains only the manifest for the
+root directory of the repository. Otherwise, it contains the entire
+manifest data. The *empty chunk* at the end of the *delta group* denotes
+the boundary to the next segment (either the *treemanifests segment* or the
+*filelogs segment*, depending on version and the request options).
+
+Treemanifests Segment
+---------------------
+
+The *treemanifests segment* only exists in changegroup version ``3``, and
+only if the 'treemanifest' param is part of the bundle2 changegroup part
+(it is not possible to use changegroup version 3 outside of bundle2).
+Aside from the filenames in the *treemanifests segment* containing a
+trailing ``/`` character, it behaves identically to the *filelogs segment*
+(see below). The final sub-segment is followed by an *empty chunk* (logically,
+a sub-segment with filename size 0). This denotes the boundary to the
+*filelogs segment*.
 
 Filelogs Segment
 ================
 
-The *filelogs* segment consists of multiple sub-segments, each
+The *filelogs segment* consists of multiple sub-segments, each
 corresponding to an individual file whose data is being described::
 
-   +--------------------------------------+
-   |          |          |          |     |
-   | filelog0 | filelog1 | filelog2 | ... |
-   |          |          |          |     |
-   +--------------------------------------+
+   +--------------------------------------------------+
+   |          |          |          |     |           |
+   | filelog0 | filelog1 | filelog2 | ... |    0x0    |
+   |          |          |          |     | (4 bytes) |
+   |          |          |          |     |           |
+   +--------------------------------------------------+
 
-In version ``3`` of the changegroup format, filelogs may include
-directory logs when treemanifests are in use. directory logs are
-identified by having a trailing '/' on their filename (see below).
-
-The final filelog sub-segment is followed by an *empty chunk* to denote
-the end of the segment and the overall changegroup.
+The final filelog sub-segment is followed by an *empty chunk* (logically,
+a sub-segment with filename size 0). This denotes the end of the segment
+and of the overall changegroup.
 
 Each filelog sub-segment consists of the following::
 
-   +------------------------------------------+
-   |               |            |             |
-   | filename size |  filename  | delta group |
-   |   (32 bits)   |  (various) |  (various)  |
-   |               |            |             |
-   +------------------------------------------+
+   +------------------------------------------------------+
+   |                 |                      |             |
+   | filename length |       filename       | delta group |
+   |    (4 bytes)    | (<length - 4> bytes) |  (various)  |
+   |                 |                      |             |
+   +------------------------------------------------------+
 
 That is, a *chunk* consisting of the filename (not terminated or padded)
-followed by N chunks constituting the *delta group* for this file.
+followed by N chunks constituting the *delta group* for this file. The
+*empty chunk* at the end of each *delta group* denotes the boundary to the
+next filelog sub-segment.
--- a/mercurial/help/internals/requirements.txt	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/help/internals/requirements.txt	Tue Apr 18 12:24:34 2017 -0400
@@ -55,6 +55,17 @@
 
 The requirement was added in Mercurial 1.3 (released July 2009).
 
+relshared
+=========
+
+Derivative of ``shared``; the location of the store is relative to the
+store of this repository.
+
+This requirement is set when a repository is created via :hg:`share`
+using the ``--relative`` option.
+
+The requirement was added in Mercurial 4.2 (released May 2017).
+
 dotencode
 =========
 
--- a/mercurial/help/internals/revlogs.txt	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/help/internals/revlogs.txt	Tue Apr 18 12:24:34 2017 -0400
@@ -108,9 +108,9 @@
 
 16-19 (4 bytes)
    Base or previous revision this revision's delta was produced against.
-   -1 means this revision holds full text (as opposed to a delta).
-   For generaldelta repos, this is the previous revision in the delta
-   chain. For non-generaldelta repos, this is the base or first
+   This revision holds full text (as opposed to a delta) if it points to
+   itself. For generaldelta repos, this is the previous revision in the
+   delta chain. For non-generaldelta repos, this is the base or first
    revision in the delta chain.
 
 20-23 (4 bytes)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/pager.txt	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,35 @@
+Some Mercurial commands produce a lot of output, and Mercurial will
+attempt to use a pager to make those commands more pleasant.
+
+To set the pager that should be used, set the application variable::
+
+  [pager]
+  pager = less -FRX
+
+If no pager is set, the pager extensions uses the environment variable
+$PAGER. If neither pager.pager, nor $PAGER is set, a default pager
+will be used, typically `more`.
+
+You can disable the pager for certain commands by adding them to the
+pager.ignore list::
+
+  [pager]
+  ignore = version, help, update
+
+To ignore global commands like :hg:`version` or :hg:`help`, you have
+to specify them in your user configuration file.
+
+To control whether the pager is used at all for an individual command,
+you can use --pager=<value>::
+
+  - use as needed: `auto`.
+  - require the pager: `yes` or `on`.
+  - suppress the pager: `no` or `off` (any unrecognized value
+  will also work).
+
+To globally turn off all attempts to use a pager, set::
+
+  [pager]
+  enable = false
+
+which will prevent the pager from running.
--- a/mercurial/help/patterns.txt	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/help/patterns.txt	Tue Apr 18 12:24:34 2017 -0400
@@ -13,7 +13,10 @@
 
 To use a plain path name without any pattern matching, start it with
 ``path:``. These path names must completely match starting at the
-current repository root.
+current repository root, and when the path points to a directory, it is matched
+recursively. To match all files in a directory non-recursively (not including
+any files in subdirectories), ``rootfilesin:`` can be used, specifying an
+absolute path (relative to the repository root).
 
 To use an extended glob, start a name with ``glob:``. Globs are rooted
 at the current directory; a glob such as ``*.c`` will only match files
@@ -39,12 +42,15 @@
 All patterns, except for ``glob:`` specified in command line (not for
 ``-I`` or ``-X`` options), can match also against directories: files
 under matched directories are treated as matched.
+For ``-I`` and ``-X`` options, ``glob:`` will match directories recursively.
 
 Plain examples::
 
-  path:foo/bar   a name bar in a directory named foo in the root
-                 of the repository
-  path:path:name a file or directory named "path:name"
+  path:foo/bar        a name bar in a directory named foo in the root
+                      of the repository
+  path:path:name      a file or directory named "path:name"
+  rootfilesin:foo/bar the files in a directory called foo/bar, but not any files
+                      in its subdirectories and not a file bar in directory foo
 
 Glob examples::
 
@@ -52,6 +58,8 @@
   *.c            any name ending in ".c" in the current directory
   **.c           any name ending in ".c" in any subdirectory of the
                  current directory including itself.
+  foo/*          any file in directory foo plus all its subdirectories,
+                 recursively
   foo/*.c        any name ending in ".c" in the directory foo
   foo/**.c       any name ending in ".c" in any subdirectory of foo
                  including itself.
--- a/mercurial/help/subrepos.txt	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/help/subrepos.txt	Tue Apr 18 12:24:34 2017 -0400
@@ -136,6 +136,10 @@
     subrepository changes are available when referenced by top-level
     repositories.  Push is a no-op for Subversion subrepositories.
 
+:serve: serve does not recurse into subrepositories unless
+    -S/--subrepos is specified.  Git and Subversion subrepositories
+    are currently silently ignored.
+
 :status: status does not recurse into subrepositories unless
     -S/--subrepos is specified. Subrepository changes are displayed as
     regular Mercurial changes on the subrepository
--- a/mercurial/hg.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/hg.py	Tue Apr 18 12:24:34 2017 -0400
@@ -40,6 +40,7 @@
     url,
     util,
     verify as verifymod,
+    vfs as vfsmod,
 )
 
 release = lock.release
@@ -102,7 +103,7 @@
     if u.fragment:
         branch = u.fragment
         u.fragment = None
-    return str(u), (branch, branches or [])
+    return bytes(u), (branch, branches or [])
 
 schemes = {
     'bundle': bundlerepo,
@@ -195,7 +196,8 @@
         return ''
     return os.path.basename(os.path.normpath(path))
 
-def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None):
+def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
+          relative=False):
     '''create a shared repository'''
 
     if not islocal(source):
@@ -218,8 +220,8 @@
 
     sharedpath = srcrepo.sharedpath # if our source is already sharing
 
-    destwvfs = scmutil.vfs(dest, realpath=True)
-    destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
+    destwvfs = vfsmod.vfs(dest, realpath=True)
+    destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
 
     if destvfs.lexists():
         raise error.Abort(_('destination already exists'))
@@ -235,7 +237,16 @@
         if inst.errno != errno.ENOENT:
             raise
 
-    requirements += 'shared\n'
+    if relative:
+        try:
+            sharedpath = os.path.relpath(sharedpath, destvfs.base)
+            requirements += 'relshared\n'
+        except IOError as e:
+            raise error.Abort(_('cannot calculate relative path'),
+                              hint=str(e))
+    else:
+        requirements += 'shared\n'
+
     destvfs.write('requires', requirements)
     destvfs.write('sharedpath', sharedpath)
 
@@ -302,8 +313,8 @@
             else:
                 ui.progress(topic, pos + num)
         srcpublishing = srcrepo.publishing()
-        srcvfs = scmutil.vfs(srcrepo.sharedpath)
-        dstvfs = scmutil.vfs(destpath)
+        srcvfs = vfsmod.vfs(srcrepo.sharedpath)
+        dstvfs = vfsmod.vfs(destpath)
         for f in srcrepo.store.copylist():
             if srcpublishing and f.endswith('phaseroots'):
                 continue
@@ -359,7 +370,7 @@
         if e.errno != errno.EEXIST:
             raise
 
-    poolvfs = scmutil.vfs(pooldir)
+    poolvfs = vfsmod.vfs(pooldir)
     basename = os.path.basename(sharepath)
 
     with lock.lock(poolvfs, '%s.lock' % basename):
@@ -464,7 +475,7 @@
     if not dest:
         raise error.Abort(_("empty destination path is not valid"))
 
-    destvfs = scmutil.vfs(dest, expandpath=True)
+    destvfs = vfsmod.vfs(dest, expandpath=True)
     if destvfs.lexists():
         if not destvfs.isdir():
             raise error.Abort(_("destination '%s' already exists") % dest)
@@ -548,7 +559,7 @@
 
             destlock = copystore(ui, srcrepo, destpath)
             # copy bookmarks over
-            srcbookmarks = srcrepo.join('bookmarks')
+            srcbookmarks = srcrepo.vfs.join('bookmarks')
             dstbookmarks = os.path.join(destpath, 'bookmarks')
             if os.path.exists(srcbookmarks):
                 util.copyfile(srcbookmarks, dstbookmarks)
@@ -556,7 +567,7 @@
             # Recomputing branch cache might be slow on big repos,
             # so just copy it
             def copybranchcache(fname):
-                srcbranchcache = srcrepo.join('cache/%s' % fname)
+                srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
                 dstbranchcache = os.path.join(dstcachedir, fname)
                 if os.path.exists(srcbranchcache):
                     if not os.path.exists(dstcachedir):
@@ -602,14 +613,10 @@
                     else:
                         stream = None
                 # internal config: ui.quietbookmarkmove
-                quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
-                try:
-                    local.ui.setconfig(
-                        'ui', 'quietbookmarkmove', True, 'clone')
+                overrides = {('ui', 'quietbookmarkmove'): True}
+                with local.ui.configoverride(overrides, 'clone'):
                     exchange.pull(local, srcpeer, revs,
                                   streamclonerequested=stream)
-                finally:
-                    local.ui.restoreconfig(quiet)
             elif srcrepo:
                 exchange.push(srcrepo, destpeer, revs=revs,
                               bookmarks=srcrepo._bookmarks.keys())
@@ -681,18 +688,19 @@
     repo.ui.status(_("%d files updated, %d files merged, "
                      "%d files removed, %d files unresolved\n") % stats)
 
-def updaterepo(repo, node, overwrite):
+def updaterepo(repo, node, overwrite, updatecheck=None):
     """Update the working directory to node.
 
     When overwrite is set, changes are clobbered, merged else
 
     returns stats (see pydoc mercurial.merge.applyupdates)"""
     return mergemod.update(repo, node, False, overwrite,
-                           labels=['working copy', 'destination'])
+                           labels=['working copy', 'destination'],
+                           updatecheck=updatecheck)
 
-def update(repo, node, quietempty=False):
-    """update the working directory to node, merging linear changes"""
-    stats = updaterepo(repo, node, False)
+def update(repo, node, quietempty=False, updatecheck=None):
+    """update the working directory to node"""
+    stats = updaterepo(repo, node, False, updatecheck=updatecheck)
     _showstats(repo, stats, quietempty)
     if stats[3]:
         repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
@@ -704,7 +712,7 @@
 def clean(repo, node, show_stats=True, quietempty=False):
     """forcibly switch the working directory to node, clobbering changes"""
     stats = updaterepo(repo, node, True)
-    util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
+    repo.vfs.unlinkpath('graftstate', ignoremissing=True)
     if show_stats:
         _showstats(repo, stats, quietempty)
     return stats[3] > 0
@@ -712,7 +720,7 @@
 # naming conflict in updatetotally()
 _clean = clean
 
-def updatetotally(ui, repo, checkout, brev, clean=False, check=False):
+def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
     """Update the working directory with extra care for non-file components
 
     This takes care of non-file components below:
@@ -724,22 +732,38 @@
     :checkout: to which revision the working directory is updated
     :brev: a name, which might be a bookmark to be activated after updating
     :clean: whether changes in the working directory can be discarded
-    :check: whether changes in the working directory should be checked
+    :updatecheck: how to deal with a dirty working directory
+
+    Valid values for updatecheck are (None => linear):
+
+     * abort: abort if the working directory is dirty
+     * none: don't check (merge working directory changes into destination)
+     * linear: check that update is linear before merging working directory
+               changes into destination
+     * noconflict: check that the update does not result in file merges
 
     This returns whether conflict is detected at updating or not.
     """
+    if updatecheck is None:
+        updatecheck = ui.config('experimental', 'updatecheck')
+        if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
+            # If not configured, or invalid value configured
+            updatecheck = 'linear'
     with repo.wlock():
         movemarkfrom = None
         warndest = False
         if checkout is None:
-            updata = destutil.destupdate(repo, clean=clean, check=check)
+            updata = destutil.destupdate(repo, clean=clean)
             checkout, movemarkfrom, brev = updata
             warndest = True
 
         if clean:
             ret = _clean(repo, checkout)
         else:
-            ret = _update(repo, checkout)
+            if updatecheck == 'abort':
+                cmdutil.bailifchanged(repo, merge=False)
+                updatecheck = 'none'
+            ret = _update(repo, checkout, updatecheck=updatecheck)
 
         if not ret and movemarkfrom:
             if movemarkfrom == repo['.'].node():
@@ -802,7 +826,7 @@
         if not chlist:
             ui.status(_("no changes found\n"))
             return subreporecurse()
-
+        ui.pager('incoming')
         displayer = cmdutil.show_changeset(ui, other, opts, buffered)
         displaychlist(other, chlist, displayer)
         displayer.close()
@@ -870,6 +894,7 @@
 
     if opts.get('newest_first'):
         o.reverse()
+    ui.pager('outgoing')
     displayer = cmdutil.show_changeset(ui, repo, opts)
     count = 0
     for n in o:
--- a/mercurial/hgweb/common.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/hgweb/common.py	Tue Apr 18 12:24:34 2017 -0400
@@ -91,11 +91,13 @@
 
 
 class ErrorResponse(Exception):
-    def __init__(self, code, message=None, headers=[]):
+    def __init__(self, code, message=None, headers=None):
         if message is None:
             message = _statusmessage(code)
         Exception.__init__(self, message)
         self.code = code
+        if headers is None:
+            headers = []
         self.headers = headers
 
 class continuereader(object):
@@ -133,6 +135,17 @@
 def get_mtime(spath):
     return get_stat(spath, "00changelog.i").st_mtime
 
+def ispathsafe(path):
+    """Determine if a path is safe to use for filesystem access."""
+    parts = path.split('/')
+    for part in parts:
+        if (part in ('', os.curdir, os.pardir) or
+            pycompat.ossep in part or
+            pycompat.osaltsep is not None and pycompat.osaltsep in part):
+            return False
+
+    return True
+
 def staticfile(directory, fname, req):
     """return a file inside directory with guessed Content-Type header
 
@@ -142,13 +155,10 @@
     Return an empty string if fname is illegal or file not found.
 
     """
-    parts = fname.split('/')
-    for part in parts:
-        if (part in ('', os.curdir, os.pardir) or
-            pycompat.ossep in part or
-            pycompat.osaltsep is not None and pycompat.osaltsep in part):
-            return
-    fpath = os.path.join(*parts)
+    if not ispathsafe(fname):
+        return
+
+    fpath = os.path.join(*fname.split('/'))
     if isinstance(directory, str):
         directory = [directory]
     for d in directory:
@@ -158,9 +168,9 @@
     try:
         os.stat(path)
         ct = mimetypes.guess_type(path)[0] or "text/plain"
-        fp = open(path, 'rb')
-        data = fp.read()
-        fp.close()
+        with open(path, 'rb') as fh:
+            data = fh.read()
+
         req.respond(HTTP_OK, ct, body=data)
     except TypeError:
         raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
--- a/mercurial/hgweb/hgweb_mod.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/hgweb/hgweb_mod.py	Tue Apr 18 12:24:34 2017 -0400
@@ -335,7 +335,7 @@
         req.url = req.env['SCRIPT_NAME']
         if not req.url.endswith('/'):
             req.url += '/'
-        if 'REPO_NAME' in req.env:
+        if req.env.get('REPO_NAME'):
             req.url += req.env['REPO_NAME'] + '/'
 
         if 'PATH_INFO' in req.env:
--- a/mercurial/hgweb/hgwebdir_mod.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/hgweb/hgwebdir_mod.py	Tue Apr 18 12:24:34 2017 -0400
@@ -254,15 +254,32 @@
                 return []
 
             # top-level index
-            elif not virtual:
+
+            repos = dict(self.repos)
+
+            if (not virtual or virtual == 'index') and virtual not in repos:
                 req.respond(HTTP_OK, ctype)
                 return self.makeindex(req, tmpl)
 
             # nested indexes and hgwebs
 
-            repos = dict(self.repos)
-            virtualrepo = virtual
-            while virtualrepo:
+            if virtual.endswith('/index') and virtual not in repos:
+                subdir = virtual[:-len('index')]
+                if any(r.startswith(subdir) for r in repos):
+                    req.respond(HTTP_OK, ctype)
+                    return self.makeindex(req, tmpl, subdir)
+
+            def _virtualdirs():
+                # Check the full virtual path, each parent, and the root ('')
+                if virtual != '':
+                    yield virtual
+
+                    for p in util.finddirs(virtual):
+                        yield p
+
+                yield ''
+
+            for virtualrepo in _virtualdirs():
                 real = repos.get(virtualrepo)
                 if real:
                     req.env['REPO_NAME'] = virtualrepo
@@ -276,11 +293,6 @@
                     except error.RepoError as inst:
                         raise ErrorResponse(HTTP_SERVER_ERROR, str(inst))
 
-                up = virtualrepo.rfind('/')
-                if up < 0:
-                    break
-                virtualrepo = virtualrepo[:up]
-
             # browse subdirectories
             subdir = virtual + '/'
             if [r for r in repos if r.startswith(subdir)]:
@@ -352,8 +364,7 @@
                             pass
 
                 parts = [name]
-                if 'PATH_INFO' in req.env:
-                    parts.insert(0, req.env['PATH_INFO'].rstrip('/'))
+                parts.insert(0, '/' + subdir.rstrip('/'))
                 if req.env['SCRIPT_NAME']:
                     parts.insert(0, req.env['SCRIPT_NAME'])
                 url = re.sub(r'/+', '/', '/'.join(parts) + '/')
--- a/mercurial/hgweb/webcommands.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/hgweb/webcommands.py	Tue Apr 18 12:24:34 2017 -0400
@@ -28,11 +28,14 @@
 
 from .. import (
     archival,
+    context,
     encoding,
     error,
     graphmod,
     revset,
+    revsetlang,
     scmutil,
+    smartset,
     templatefilters,
     templater,
     util,
@@ -238,20 +241,20 @@
 
         revdef = 'reverse(%s)' % query
         try:
-            tree = revset.parse(revdef)
+            tree = revsetlang.parse(revdef)
         except error.ParseError:
             # can't parse to a revset tree
             return MODE_KEYWORD, query
 
-        if revset.depth(tree) <= 2:
+        if revsetlang.depth(tree) <= 2:
             # no revset syntax used
             return MODE_KEYWORD, query
 
         if any((token, (value or '')[:3]) == ('string', 're:')
-                    for token, value, pos in revset.tokenize(revdef)):
+               for token, value, pos in revsetlang.tokenize(revdef)):
             return MODE_KEYWORD, query
 
-        funcsused = revset.funcsused(tree)
+        funcsused = revsetlang.funcsused(tree)
         if not funcsused.issubset(revset.safesymbols):
             return MODE_KEYWORD, query
 
@@ -752,13 +755,13 @@
     if fctx is not None:
         path = fctx.path()
         ctx = fctx.changectx()
+    basectx = ctx.p1()
 
-    parity = paritygen(web.stripecount)
     style = web.config('web', 'style', 'paper')
     if 'style' in req.form:
         style = req.form['style'][0]
 
-    diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style)
+    diffs = webutil.diffs(web, tmpl, ctx, basectx, [path], style)
     if fctx is not None:
         rename = webutil.renamelink(fctx)
         ctx = fctx
@@ -966,11 +969,20 @@
         except ValueError:
             pass
 
+    lrange = webutil.linerange(req)
+
     lessvars = copy.copy(tmpl.defaults['sessionvars'])
     lessvars['revcount'] = max(revcount / 2, 1)
     morevars = copy.copy(tmpl.defaults['sessionvars'])
     morevars['revcount'] = revcount * 2
 
+    patch = 'patch' in req.form
+    if patch:
+        lessvars['patch'] = morevars['patch'] = req.form['patch'][0]
+    descend = 'descend' in req.form
+    if descend:
+        lessvars['descend'] = morevars['descend'] = req.form['descend'][0]
+
     count = fctx.filerev() + 1
     start = max(0, count - revcount) # first rev on this page
     end = min(count, start + revcount) # last rev on this page
@@ -979,26 +991,74 @@
     repo = web.repo
     revs = fctx.filelog().revs(start, end - 1)
     entries = []
-    for i in revs:
-        iterfctx = fctx.filectx(i)
-        entries.append(dict(
-            parity=next(parity),
-            filerev=i,
-            file=f,
-            rename=webutil.renamelink(iterfctx),
-            **webutil.commonentry(repo, iterfctx)))
-    entries.reverse()
+
+    diffstyle = web.config('web', 'style', 'paper')
+    if 'style' in req.form:
+        diffstyle = req.form['style'][0]
+
+    def diff(fctx, linerange=None):
+        ctx = fctx.changectx()
+        basectx = ctx.p1()
+        path = fctx.path()
+        return webutil.diffs(web, tmpl, ctx, basectx, [path], diffstyle,
+                             linerange=linerange,
+                             lineidprefix='%s-' % ctx.hex()[:12])
+
+    linerange = None
+    if lrange is not None:
+        linerange = webutil.formatlinerange(*lrange)
+        # deactivate numeric nav links when linerange is specified as this
+        # would required a dedicated "revnav" class
+        nav = None
+        if descend:
+            it = context.blockdescendants(fctx, *lrange)
+        else:
+            it = context.blockancestors(fctx, *lrange)
+        for i, (c, lr) in enumerate(it, 1):
+            diffs = None
+            if patch:
+                diffs = diff(c, linerange=lr)
+            # follow renames accross filtered (not in range) revisions
+            path = c.path()
+            entries.append(dict(
+                parity=next(parity),
+                filerev=c.rev(),
+                file=path,
+                diff=diffs,
+                linerange=webutil.formatlinerange(*lr),
+                **webutil.commonentry(repo, c)))
+            if i == revcount:
+                break
+        lessvars['linerange'] = webutil.formatlinerange(*lrange)
+        morevars['linerange'] = lessvars['linerange']
+    else:
+        for i in revs:
+            iterfctx = fctx.filectx(i)
+            diffs = None
+            if patch:
+                diffs = diff(iterfctx)
+            entries.append(dict(
+                parity=next(parity),
+                filerev=i,
+                file=f,
+                diff=diffs,
+                rename=webutil.renamelink(iterfctx),
+                **webutil.commonentry(repo, iterfctx)))
+        entries.reverse()
+        revnav = webutil.filerevnav(web.repo, fctx.path())
+        nav = revnav.gen(end - 1, revcount, count)
 
     latestentry = entries[:1]
 
-    revnav = webutil.filerevnav(web.repo, fctx.path())
-    nav = revnav.gen(end - 1, revcount, count)
     return tmpl("filelog",
                 file=f,
                 nav=nav,
                 symrev=webutil.symrevorshortnode(req, fctx),
                 entries=entries,
+                descend=descend,
+                patch=patch,
                 latestentry=latestentry,
+                linerange=linerange,
                 revcount=revcount,
                 morevars=morevars,
                 lessvars=lessvars,
@@ -1148,7 +1208,7 @@
         # We have to feed a baseset to dagwalker as it is expecting smartset
         # object. This does not have a big impact on hgweb performance itself
         # since hgweb graphing code is not itself lazy yet.
-        dag = graphmod.dagwalker(web.repo, revset.baseset(revs))
+        dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
         # As we said one line above... not lazy.
         tree = list(graphmod.colored(dag, web.repo))
 
--- a/mercurial/hgweb/webutil.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/hgweb/webutil.py	Tue Apr 18 12:24:34 2017 -0400
@@ -18,6 +18,7 @@
 
 from .common import (
     ErrorResponse,
+    HTTP_BAD_REQUEST,
     HTTP_NOT_FOUND,
     paritygen,
 )
@@ -26,6 +27,7 @@
     context,
     error,
     match,
+    mdiff,
     patch,
     pathutil,
     templatefilters,
@@ -72,6 +74,8 @@
         """return True if any revision to navigate over"""
         return self._first() is not None
 
+    __bool__ = __nonzero__
+
     def _first(self):
         """return the minimum non-filtered changeset or None"""
         try:
@@ -142,7 +146,9 @@
         return hex(self._changelog.node(self._revlog.linkrev(rev)))
 
 class _siblings(object):
-    def __init__(self, siblings=[], hiderev=None):
+    def __init__(self, siblings=None, hiderev=None):
+        if siblings is None:
+            siblings = []
         self.siblings = [s for s in siblings if s.node() != nullid]
         if len(self.siblings) == 1 and self.siblings[0].rev() == hiderev:
             self.siblings = []
@@ -313,6 +319,26 @@
 
     return fctx
 
+def linerange(req):
+    linerange = req.form.get('linerange')
+    if linerange is None:
+        return None
+    if len(linerange) > 1:
+        raise ErrorResponse(HTTP_BAD_REQUEST,
+                            'redundant linerange parameter')
+    try:
+        fromline, toline = map(int, linerange[0].split(':', 1))
+    except ValueError:
+        raise ErrorResponse(HTTP_BAD_REQUEST,
+                            'invalid linerange parameter')
+    try:
+        return util.processlinerange(fromline, toline)
+    except error.ParseError as exc:
+        raise ErrorResponse(HTTP_BAD_REQUEST, str(exc))
+
+def formatlinerange(fromline, toline):
+    return '%d:%d' % (fromline + 1, toline)
+
 def commonentry(repo, ctx):
     node = ctx.node()
     return {
@@ -384,8 +410,7 @@
     if 'style' in req.form:
         style = req.form['style'][0]
 
-    parity = paritygen(web.stripecount)
-    diff = diffs(web.repo, tmpl, ctx, basectx, None, parity, style)
+    diff = diffs(web, tmpl, ctx, basectx, None, style)
 
     parity = paritygen(web.stripecount)
     diffstatsgen = diffstatgen(ctx, basectx)
@@ -410,18 +435,12 @@
     if len(files) > max:
         yield tmpl('fileellipses')
 
-def diffs(repo, tmpl, ctx, basectx, files, parity, style):
+def diffs(web, tmpl, ctx, basectx, files, style, linerange=None,
+          lineidprefix=''):
 
-    def countgen():
-        start = 1
-        while True:
-            yield start
-            start += 1
-
-    blockcount = countgen()
-    def prettyprintlines(diff, blockno):
-        for lineno, l in enumerate(diff.splitlines(True)):
-            difflineno = "%d.%d" % (blockno, lineno + 1)
+    def prettyprintlines(lines, blockno):
+        for lineno, l in enumerate(lines, 1):
+            difflineno = "%d.%d" % (blockno, lineno)
             if l.startswith('+'):
                 ltype = "difflineplus"
             elif l.startswith('-'):
@@ -432,39 +451,35 @@
                 ltype = "diffline"
             yield tmpl(ltype,
                        line=l,
-                       lineno=lineno + 1,
-                       lineid="l%s" % difflineno,
+                       lineno=lineno,
+                       lineid=lineidprefix + "l%s" % difflineno,
                        linenumber="% 8s" % difflineno)
 
+    repo = web.repo
     if files:
         m = match.exact(repo.root, repo.getcwd(), files)
     else:
         m = match.always(repo.root, repo.getcwd())
 
     diffopts = patch.diffopts(repo.ui, untrusted=True)
-    if basectx is None:
-        parents = ctx.parents()
-        if parents:
-            node1 = parents[0].node()
-        else:
-            node1 = nullid
-    else:
-        node1 = basectx.node()
+    node1 = basectx.node()
     node2 = ctx.node()
+    parity = paritygen(web.stripecount)
 
-    block = []
-    for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
-        if chunk.startswith('diff') and block:
-            blockno = next(blockcount)
+    diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts)
+    for blockno, (header, hunks) in enumerate(diffhunks, 1):
+        if style != 'raw':
+            header = header[1:]
+        lines = [h + '\n' for h in header]
+        for hunkrange, hunklines in hunks:
+            if linerange is not None and hunkrange is not None:
+                s1, l1, s2, l2 = hunkrange
+                if not mdiff.hunkinrange((s2, l2), linerange):
+                    continue
+            lines.extend(hunklines)
+        if lines:
             yield tmpl('diffblock', parity=next(parity), blockno=blockno,
-                       lines=prettyprintlines(''.join(block), blockno))
-            block = []
-        if chunk.startswith('diff') and style != 'raw':
-            chunk = ''.join(chunk.splitlines(True)[1:])
-        block.append(chunk)
-    blockno = next(blockcount)
-    yield tmpl('diffblock', parity=next(parity), blockno=blockno,
-               lines=prettyprintlines(''.join(block), blockno))
+                       lines=prettyprintlines(lines, blockno))
 
 def compare(tmpl, context, leftlines, rightlines):
     '''Generator function that provides side-by-side comparison data.'''
--- a/mercurial/hook.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/hook.py	Tue Apr 18 12:24:34 2017 -0400
@@ -9,7 +9,6 @@
 
 import os
 import sys
-import time
 
 from .i18n import _
 from . import (
@@ -20,7 +19,7 @@
     util,
 )
 
-def _pythonhook(ui, repo, name, hname, funcname, args, throw):
+def _pythonhook(ui, repo, htype, hname, funcname, args, throw):
     '''call python hook. hook is callable object, looked up as
     name in python module. if callable returns "true", hook
     fails, else passes. if hook raises exception, treated as
@@ -88,10 +87,10 @@
                 % (hname, funcname))
 
     ui.note(_("calling hook %s: %s\n") % (hname, funcname))
-    starttime = time.time()
+    starttime = util.timer()
 
     try:
-        r = obj(ui=ui, repo=repo, hooktype=name, **args)
+        r = obj(ui=ui, repo=repo, hooktype=htype, **args)
     except Exception as exc:
         if isinstance(exc, error.Abort):
             ui.warn(_('error: %s hook failed: %s\n') %
@@ -106,19 +105,19 @@
         ui.traceback()
         return True, True
     finally:
-        duration = time.time() - starttime
+        duration = util.timer() - starttime
         ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n',
-               name, funcname, duration)
+               htype, funcname, duration)
     if r:
         if throw:
             raise error.HookAbort(_('%s hook failed') % hname)
         ui.warn(_('warning: %s hook failed\n') % hname)
     return r, False
 
-def _exthook(ui, repo, name, cmd, args, throw):
+def _exthook(ui, repo, htype, name, cmd, args, throw):
     ui.note(_("running hook %s: %s\n") % (name, cmd))
 
-    starttime = time.time()
+    starttime = util.timer()
     env = {}
 
     # make in-memory changes visible to external process
@@ -127,6 +126,8 @@
         repo.dirstate.write(tr)
         if tr and tr.writepending():
             env['HG_PENDING'] = repo.root
+    env['HG_HOOKTYPE'] = htype
+    env['HG_HOOKNAME'] = name
 
     for k, v in args.iteritems():
         if callable(v):
@@ -143,9 +144,9 @@
         cwd = repo.root
     else:
         cwd = pycompat.getcwd()
-    r = ui.system(cmd, environ=env, cwd=cwd)
+    r = ui.system(cmd, environ=env, cwd=cwd, blockedtag='exthook-%s' % (name,))
 
-    duration = time.time() - starttime
+    duration = util.timer() - starttime
     ui.log('exthook', 'exthook-%s: %s finished in %0.2f seconds\n',
            name, cmd, duration)
     if r:
@@ -187,22 +188,22 @@
     global _redirect
     _redirect = state
 
-def hook(ui, repo, name, throw=False, **args):
+def hook(ui, repo, htype, throw=False, **args):
     if not ui.callhooks:
         return False
 
     hooks = []
     for hname, cmd in _allhooks(ui):
-        if hname.split('.')[0] == name and cmd:
+        if hname.split('.')[0] == htype and cmd:
             hooks.append((hname, cmd))
 
-    res = runhooks(ui, repo, name, hooks, throw=throw, **args)
+    res = runhooks(ui, repo, htype, hooks, throw=throw, **args)
     r = False
     for hname, cmd in hooks:
         r = res[hname][0] or r
     return r
 
-def runhooks(ui, repo, name, hooks, throw=False, **args):
+def runhooks(ui, repo, htype, hooks, throw=False, **args):
     res = {}
     oldstdout = -1
 
@@ -224,13 +225,14 @@
             if cmd is _fromuntrusted:
                 if throw:
                     raise error.HookAbort(
-                        _('untrusted hook %s not executed') % name,
+                        _('untrusted hook %s not executed') % hname,
                         hint = _("see 'hg help config.trusted'"))
-                ui.warn(_('warning: untrusted hook %s not executed\n') % name)
+                ui.warn(_('warning: untrusted hook %s not executed\n') % hname)
                 r = 1
                 raised = False
             elif callable(cmd):
-                r, raised = _pythonhook(ui, repo, name, hname, cmd, args, throw)
+                r, raised = _pythonhook(ui, repo, htype, hname, cmd, args,
+                                        throw)
             elif cmd.startswith('python:'):
                 if cmd.count(':') >= 2:
                     path, cmd = cmd[7:].rsplit(':', 1)
@@ -245,10 +247,10 @@
                     hookfn = getattr(mod, cmd)
                 else:
                     hookfn = cmd[7:].strip()
-                r, raised = _pythonhook(ui, repo, name, hname, hookfn, args,
+                r, raised = _pythonhook(ui, repo, htype, hname, hookfn, args,
                                         throw)
             else:
-                r = _exthook(ui, repo, hname, cmd, args, throw)
+                r = _exthook(ui, repo, htype, hname, cmd, args, throw)
                 raised = False
 
             res[hname] = r, raised
--- a/mercurial/httpclient/__init__.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/httpclient/__init__.py	Tue Apr 18 12:24:34 2017 -0400
@@ -631,7 +631,7 @@
         self.close()
         self._connect(pheaders)
 
-    def request(self, method, path, body=None, headers={},
+    def request(self, method, path, body=None, headers=None,
                 expect_continue=False):
         """Send a request to the server.
 
@@ -642,6 +642,8 @@
         available. Use the `getresponse()` method to retrieve the
         response.
         """
+        if headers is None:
+            headers = {}
         method = _ensurebytes(method)
         path = _ensurebytes(path)
         if self.busy():
--- a/mercurial/httpconnection.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/httpconnection.py	Tue Apr 18 12:24:34 2017 -0400
@@ -44,10 +44,10 @@
         self._total = self.length // 1024 * 2
 
     def read(self, *args, **kwargs):
-        try:
-            ret = self._data.read(*args, **kwargs)
-        except EOFError:
+        ret = self._data.read(*args, **kwargs)
+        if not ret:
             self.ui.progress(_('sending'), None)
+            return ret
         self._pos += len(ret)
         # We pass double the max for total because we currently have
         # to send the bundle twice in the case of a server that
@@ -67,13 +67,16 @@
 # moved here from url.py to avoid a cycle
 def readauthforuri(ui, uri, user):
     # Read configuration
-    config = dict()
+    groups = {}
     for key, val in ui.configitems('auth'):
+        if key in ('cookiefile',):
+            continue
+
         if '.' not in key:
             ui.warn(_("ignoring invalid [auth] key '%s'\n") % key)
             continue
         group, setting = key.rsplit('.', 1)
-        gdict = config.setdefault(group, dict())
+        gdict = groups.setdefault(group, {})
         if setting in ('username', 'cert', 'key'):
             val = util.expandpath(val)
         gdict[setting] = val
@@ -83,7 +86,7 @@
     bestuser = None
     bestlen = 0
     bestauth = None
-    for group, auth in config.iteritems():
+    for group, auth in groups.iteritems():
         if user and user != auth.get('username', user):
             # If a username was set in the URI, the entry username
             # must either match it or be unset
--- a/mercurial/httppeer.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/httppeer.py	Tue Apr 18 12:24:34 2017 -0400
@@ -20,6 +20,7 @@
     bundle2,
     error,
     httpconnection,
+    pycompat,
     statichttprepo,
     url,
     util,
@@ -30,30 +31,6 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
-# FUTURE: consider refactoring this API to use generators. This will
-# require a compression engine API to emit generators.
-def decompressresponse(response, engine):
-    try:
-        reader = engine.decompressorreader(response)
-    except httplib.HTTPException:
-        raise IOError(None, _('connection ended unexpectedly'))
-
-    # We need to wrap reader.read() so HTTPException on subsequent
-    # reads is also converted.
-    # Ideally we'd use super() here. However, if ``reader`` isn't a new-style
-    # class, this can raise:
-    # TypeError: super() argument 1 must be type, not classobj
-    origread = reader.read
-    class readerproxy(reader.__class__):
-        def read(self, *args, **kwargs):
-            try:
-                return origread(*args, **kwargs)
-            except httplib.HTTPException:
-                raise IOError(None, _('connection ended unexpectedly'))
-
-    reader.__class__ = readerproxy
-    return reader
-
 def encodevalueinheaders(value, header, limit):
     """Encode a string value into multiple HTTP headers.
 
@@ -74,6 +51,41 @@
 
     return result
 
+def _wraphttpresponse(resp):
+    """Wrap an HTTPResponse with common error handlers.
+
+    This ensures that any I/O from any consumer raises the appropriate
+    error and messaging.
+    """
+    origread = resp.read
+
+    class readerproxy(resp.__class__):
+        def read(self, size=None):
+            try:
+                return origread(size)
+            except httplib.IncompleteRead as e:
+                # e.expected is an integer if length known or None otherwise.
+                if e.expected:
+                    msg = _('HTTP request error (incomplete response; '
+                            'expected %d bytes got %d)') % (e.expected,
+                                                           len(e.partial))
+                else:
+                    msg = _('HTTP request error (incomplete response)')
+
+                raise error.PeerTransportError(
+                    msg,
+                    hint=_('this may be an intermittent network failure; '
+                           'if the error persists, consider contacting the '
+                           'network or server operator'))
+            except httplib.HTTPException as e:
+                raise error.PeerTransportError(
+                    _('HTTP request error (%s)') % e,
+                    hint=_('this may be an intermittent failure; '
+                           'if the error persists, consider contacting the '
+                           'network or server operator'))
+
+    resp.__class__ = readerproxy
+
 class httppeer(wireproto.wirepeer):
     def __init__(self, ui, path):
         self.path = path
@@ -206,7 +218,9 @@
                 headers[header] = value
                 varyheaders.append(header)
 
-        headers['Vary'] = ','.join(varyheaders)
+        if varyheaders:
+            headers['Vary'] = ','.join(varyheaders)
+
         req = self.requestbuilder(cu, data, headers)
 
         if data is not None:
@@ -222,6 +236,10 @@
             self.ui.debug('http error while sending %s command\n' % cmd)
             self.ui.traceback()
             raise IOError(None, inst)
+
+        # Insert error handlers for common I/O failures.
+        _wraphttpresponse(resp)
+
         # record the url we got redirected to
         resp_url = resp.geturl()
         if resp_url.endswith(qs):
@@ -257,9 +275,11 @@
                 raise error.RepoError(_("'%s' sent a broken Content-Type "
                                         "header (%s)") % (safeurl, proto))
 
+            # TODO consider switching to a decompression reader that uses
+            # generators.
             if version_info == (0, 1):
                 if _compressible:
-                    return decompressresponse(resp, util.compengines['zlib'])
+                    return util.compengines['zlib'].decompressorreader(resp)
                 return resp
             elif version_info == (0, 2):
                 # application/mercurial-0.2 always identifies the compression
@@ -267,13 +287,13 @@
                 elen = struct.unpack('B', resp.read(1))[0]
                 ename = resp.read(elen)
                 engine = util.compengines.forwiretype(ename)
-                return decompressresponse(resp, engine)
+                return engine.decompressorreader(resp)
             else:
                 raise error.RepoError(_("'%s' uses newer protocol %s") %
                                       (safeurl, version))
 
         if _compressible:
-            return decompressresponse(resp, util.compengines['zlib'])
+            return util.compengines['zlib'].decompressorreader(resp)
 
         return resp
 
@@ -327,7 +347,7 @@
         try:
             # dump bundle to disk
             fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
-            fh = os.fdopen(fd, "wb")
+            fh = os.fdopen(fd, pycompat.sysstr("wb"))
             d = fp.read(4096)
             while d:
                 fh.write(d)
--- a/mercurial/i18n.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/i18n.py	Tue Apr 18 12:24:34 2017 -0400
@@ -21,7 +21,7 @@
 if getattr(sys, 'frozen', None) is not None:
     module = pycompat.sysexecutable
 else:
-    module = __file__
+    module = pycompat.fsencode(__file__)
 
 try:
     unicode
--- a/mercurial/keepalive.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/keepalive.py	Tue Apr 18 12:24:34 2017 -0400
@@ -298,11 +298,12 @@
 
     def _start_transaction(self, h, req):
         # What follows mostly reimplements HTTPConnection.request()
-        # except it adds self.parent.addheaders in the mix.
-        headers = dict(self.parent.addheaders)
-        headers.update(req.headers)
-        headers.update(req.unredirected_hdrs)
-        headers = dict((n.lower(), v) for n, v in headers.items())
+        # except it adds self.parent.addheaders in the mix and sends headers
+        # in a deterministic order (to make testing easier).
+        headers = util.sortdict(self.parent.addheaders)
+        headers.update(sorted(req.headers.items()))
+        headers.update(sorted(req.unredirected_hdrs.items()))
+        headers = util.sortdict((n.lower(), v) for n, v in headers.items())
         skipheaders = {}
         for n in ('host', 'accept-encoding'):
             if n in headers:
@@ -310,14 +311,16 @@
         try:
             if req.has_data():
                 data = req.get_data()
-                h.putrequest('POST', req.get_selector(), **skipheaders)
+                h.putrequest(
+                    req.get_method(), req.get_selector(), **skipheaders)
                 if 'content-type' not in headers:
                     h.putheader('Content-type',
                                 'application/x-www-form-urlencoded')
                 if 'content-length' not in headers:
                     h.putheader('Content-length', '%d' % len(data))
             else:
-                h.putrequest('GET', req.get_selector(), **skipheaders)
+                h.putrequest(
+                    req.get_method(), req.get_selector(), **skipheaders)
         except socket.error as err:
             raise urlerr.urlerror(err)
         for k, v in headers.items():
--- a/mercurial/localrepo.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/localrepo.py	Tue Apr 18 12:24:34 2017 -0400
@@ -28,6 +28,7 @@
     bundle2,
     changegroup,
     changelog,
+    color,
     context,
     dirstate,
     dirstateguard,
@@ -48,14 +49,18 @@
     peer,
     phases,
     pushkey,
+    pycompat,
     repoview,
     revset,
+    revsetlang,
     scmutil,
     store,
     subrepo,
     tags as tagsmod,
     transaction,
+    txnutil,
     util,
+    vfs as vfsmod,
 )
 
 release = lockmod.release
@@ -66,6 +71,8 @@
     """All filecache usage on repo are done for logic that should be unfiltered
     """
 
+    def join(self, obj, fname):
+        return obj.vfs.join(fname)
     def __get__(self, repo, type=None):
         if repo is None:
             return self
@@ -113,7 +120,9 @@
 class localpeer(peer.peerrepository):
     '''peer for a local repo; reflects only the most recent API'''
 
-    def __init__(self, repo, caps=moderncaps):
+    def __init__(self, repo, caps=None):
+        if caps is None:
+            caps = moderncaps.copy()
         peer.peerrepository.__init__(self)
         self._repo = repo.filtered('served')
         self.ui = repo.ui
@@ -241,7 +250,7 @@
     supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
                             'manifestv2'))
     _basesupported = supportedformats | set(('store', 'fncache', 'shared',
-                                             'dotencode'))
+                                             'relshared', 'dotencode'))
     openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
     filtername = None
 
@@ -251,16 +260,21 @@
 
     def __init__(self, baseui, path, create=False):
         self.requirements = set()
-        self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
-        self.wopener = self.wvfs
+        # wvfs: rooted at the repository root, used to access the working copy
+        self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
+        # vfs: rooted at .hg, used to access repo files outside of .hg/store
+        self.vfs = None
+        # svfs: usually rooted at .hg/store, used to access repository history
+        # If this is a shared repository, this vfs may point to another
+        # repository's .hg/store directory.
+        self.svfs = None
         self.root = self.wvfs.base
         self.path = self.wvfs.join(".hg")
         self.origroot = path
         self.auditor = pathutil.pathauditor(self.root, self._checknested)
         self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
                                                 realfs=False)
-        self.vfs = scmutil.vfs(self.path)
-        self.opener = self.vfs
+        self.vfs = vfsmod.vfs(self.path)
         self.baseui = baseui
         self.ui = baseui.copy()
         self.ui.copy = baseui.copy # prevent copying repo configuration
@@ -269,8 +283,8 @@
         # This list it to be filled by extension during repo setup
         self._phasedefaults = []
         try:
-            self.ui.readconfig(self.join("hgrc"), self.root)
-            extensions.loadall(self.ui)
+            self.ui.readconfig(self.vfs.join("hgrc"), self.root)
+            self._loadextensions()
         except IOError:
             pass
 
@@ -283,6 +297,7 @@
                     setupfunc(self.ui, self.supported)
         else:
             self.supported = self._basesupported
+        color.setup(self.ui)
 
         # Add compression engines.
         for name in util.compengines:
@@ -321,8 +336,10 @@
 
         self.sharedpath = self.path
         try:
-            vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
-                              realpath=True)
+            sharedpath = self.vfs.read("sharedpath").rstrip('\n')
+            if 'relshared' in self.requirements:
+                sharedpath = self.vfs.join(sharedpath)
+            vfs = vfsmod.vfs(sharedpath, realpath=True)
             s = vfs.base
             if not vfs.exists():
                 raise error.RepoError(
@@ -333,7 +350,7 @@
                 raise
 
         self.store = store.store(
-                self.requirements, self.sharedpath, scmutil.vfs)
+                self.requirements, self.sharedpath, vfsmod.vfs)
         self.spath = self.store.path
         self.svfs = self.store.vfs
         self.sjoin = self.store.join
@@ -368,9 +385,22 @@
         # generic mapping between names and nodes
         self.names = namespaces.namespaces()
 
+    @property
+    def wopener(self):
+        self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
+        return self.wvfs
+
+    @property
+    def opener(self):
+        self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
+        return self.vfs
+
     def close(self):
         self._writecaches()
 
+    def _loadextensions(self):
+        extensions.loadall(self.ui)
+
     def _writecaches(self):
         if self._revbranchcache:
             self._revbranchcache.write()
@@ -461,9 +491,9 @@
         """Return a filtered version of a repository"""
         # build a new class with the mixin and the current class
         # (possibly subclass of the repo)
-        class proxycls(repoview.repoview, self.unfiltered().__class__):
+        class filteredrepo(repoview.repoview, self.unfiltered().__class__):
             pass
-        return proxycls(self, name)
+        return filteredrepo(self, name)
 
     @repofilecache('bookmarks', 'bookmarks.current')
     def _bookmarks(self):
@@ -509,10 +539,8 @@
     @storecache('00changelog.i')
     def changelog(self):
         c = changelog.changelog(self.svfs)
-        if 'HG_PENDING' in encoding.environ:
-            p = encoding.environ['HG_PENDING']
-            if p.startswith(self.root):
-                c.readpending('00changelog.i.a')
+        if txnutil.mayhavepending(self.root):
+            c.readpending('00changelog.i.a')
         return c
 
     def _constructmanifest(self):
@@ -560,6 +588,8 @@
     def __nonzero__(self):
         return True
 
+    __bool__ = __nonzero__
+
     def __len__(self):
         return len(self.changelog)
 
@@ -570,15 +600,16 @@
         '''Find revisions matching a revset.
 
         The revset is specified as a string ``expr`` that may contain
-        %-formatting to escape certain types. See ``revset.formatspec``.
+        %-formatting to escape certain types. See ``revsetlang.formatspec``.
 
         Revset aliases from the configuration are not expanded. To expand
-        user aliases, consider calling ``scmutil.revrange()``.
+        user aliases, consider calling ``scmutil.revrange()`` or
+        ``repo.anyrevs([expr], user=True)``.
 
         Returns a revset.abstractsmartset, which is a list-like interface
         that contains integer revisions.
         '''
-        expr = revset.formatspec(expr, *args)
+        expr = revsetlang.formatspec(expr, *args)
         m = revset.match(None, expr)
         return m(self)
 
@@ -594,6 +625,18 @@
         for r in self.revs(expr, *args):
             yield self[r]
 
+    def anyrevs(self, specs, user=False):
+        '''Find revisions matching one of the given revsets.
+
+        Revset aliases from the configuration are not expanded by default. To
+        expand user aliases, specify ``user=True``.
+        '''
+        if user:
+            m = revset.matchany(self.ui, specs, repo=self)
+        else:
+            m = revset.matchany(None, specs)
+        return m(self)
+
     def url(self):
         return 'file:' + self.root
 
@@ -606,109 +649,10 @@
         """
         return hook.hook(self.ui, self, name, throw, **args)
 
-    @unfilteredmethod
-    def _tag(self, names, node, message, local, user, date, extra=None,
-             editor=False):
-        if isinstance(names, str):
-            names = (names,)
-
-        branches = self.branchmap()
-        for name in names:
-            self.hook('pretag', throw=True, node=hex(node), tag=name,
-                      local=local)
-            if name in branches:
-                self.ui.warn(_("warning: tag %s conflicts with existing"
-                " branch name\n") % name)
-
-        def writetags(fp, names, munge, prevtags):
-            fp.seek(0, 2)
-            if prevtags and prevtags[-1] != '\n':
-                fp.write('\n')
-            for name in names:
-                if munge:
-                    m = munge(name)
-                else:
-                    m = name
-
-                if (self._tagscache.tagtypes and
-                    name in self._tagscache.tagtypes):
-                    old = self.tags().get(name, nullid)
-                    fp.write('%s %s\n' % (hex(old), m))
-                fp.write('%s %s\n' % (hex(node), m))
-            fp.close()
-
-        prevtags = ''
-        if local:
-            try:
-                fp = self.vfs('localtags', 'r+')
-            except IOError:
-                fp = self.vfs('localtags', 'a')
-            else:
-                prevtags = fp.read()
-
-            # local tags are stored in the current charset
-            writetags(fp, names, None, prevtags)
-            for name in names:
-                self.hook('tag', node=hex(node), tag=name, local=local)
-            return
-
-        try:
-            fp = self.wfile('.hgtags', 'rb+')
-        except IOError as e:
-            if e.errno != errno.ENOENT:
-                raise
-            fp = self.wfile('.hgtags', 'ab')
-        else:
-            prevtags = fp.read()
-
-        # committed tags are stored in UTF-8
-        writetags(fp, names, encoding.fromlocal, prevtags)
-
-        fp.close()
-
-        self.invalidatecaches()
-
-        if '.hgtags' not in self.dirstate:
-            self[None].add(['.hgtags'])
-
-        m = matchmod.exact(self.root, '', ['.hgtags'])
-        tagnode = self.commit(message, user, date, extra=extra, match=m,
-                              editor=editor)
-
-        for name in names:
-            self.hook('tag', node=hex(node), tag=name, local=local)
-
-        return tagnode
-
     def tag(self, names, node, message, local, user, date, editor=False):
-        '''tag a revision with one or more symbolic names.
-
-        names is a list of strings or, when adding a single tag, names may be a
-        string.
-
-        if local is True, the tags are stored in a per-repository file.
-        otherwise, they are stored in the .hgtags file, and a new
-        changeset is committed with the change.
-
-        keyword arguments:
-
-        local: whether to store tags in non-version-controlled file
-        (default False)
-
-        message: commit message to use if committing
-
-        user: name of user to use if committing
-
-        date: date tuple to use if committing'''
-
-        if not local:
-            m = matchmod.exact(self.root, '', ['.hgtags'])
-            if any(self.status(match=m, unknown=True, ignored=True)):
-                raise error.Abort(_('working copy of .hgtags is changed'),
-                                 hint=_('please commit .hgtags manually'))
-
-        self.tags() # instantiate the cache
-        self._tag(names, node, message, local, user, date, editor=editor)
+        self.ui.deprecwarn("use 'tagsmod.tag' instead of 'repo.tag'", '4.2')
+        tagsmod.tag(self, names, node, message, local, user, date,
+                    editor=editor)
 
     @filteredpropertycache
     def _tagscache(self):
@@ -763,10 +707,12 @@
         # be one tagtype for all such "virtual" tags?  Or is the status
         # quo fine?
 
-        alltags = {}                    # map tag name to (node, hist)
-        tagtypes = {}
 
-        tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
+        # map tag name to (node, hist)
+        alltags = tagsmod.findglobaltags(self.ui, self)
+        # map tag name to tag type
+        tagtypes = dict((tag, 'global') for tag in alltags)
+
         tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
 
         # Build the return dicts.  Have to re-encode tag names because
@@ -896,6 +842,7 @@
         return None
 
     def join(self, f, *insidef):
+        self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.2')
         return self.vfs.join(os.path.join(f, *insidef))
 
     def wjoin(self, f, *insidef):
@@ -938,9 +885,12 @@
         return self.dirstate.pathto(f, cwd)
 
     def wfile(self, f, mode='r'):
+        self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
         return self.wvfs(f, mode)
 
     def _link(self, f):
+        self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
+                           '4.2')
         return self.wvfs.islink(f)
 
     def _loadfilter(self, filter):
@@ -988,7 +938,7 @@
         self._datafilters[name] = filter
 
     def wread(self, filename):
-        if self._link(filename):
+        if self.wvfs.islink(filename):
             data = self.wvfs.readlink(filename)
         else:
             data = self.wvfs.read(filename)
@@ -1038,7 +988,8 @@
                 hint=_("run 'hg recover' to clean up transaction"))
 
         idbase = "%.40f#%f" % (random.random(), time.time())
-        txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
+        ha = hex(hashlib.sha1(idbase).digest())
+        txnid = 'TXN:' + ha
         self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
 
         self._writejournal(desc)
@@ -1050,10 +1001,82 @@
         vfsmap = {'plain': self.vfs} # root of .hg/
         # we must avoid cyclic reference between repo and transaction.
         reporef = weakref.ref(self)
-        def validate(tr):
+        # Code to track tag movement
+        #
+        # Since tags are all handled as file content, it is actually quite hard
+        # to track these movement from a code perspective. So we fallback to a
+        # tracking at the repository level. One could envision to track changes
+        # to the '.hgtags' file through changegroup apply but that fails to
+        # cope with case where transaction expose new heads without changegroup
+        # being involved (eg: phase movement).
+        #
+        # For now, We gate the feature behind a flag since this likely comes
+        # with performance impacts. The current code run more often than needed
+        # and do not use caches as much as it could.  The current focus is on
+        # the behavior of the feature so we disable it by default. The flag
+        # will be removed when we are happy with the performance impact.
+        #
+        # Once this feature is no longer experimental move the following
+        # documentation to the appropriate help section:
+        #
+        # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
+        # tags (new or changed or deleted tags). In addition the details of
+        # these changes are made available in a file at:
+        #     ``REPOROOT/.hg/changes/tags.changes``.
+        # Make sure you check for HG_TAG_MOVED before reading that file as it
+        # might exist from a previous transaction even if no tag were touched
+        # in this one. Changes are recorded in a line base format::
+        #
+        #     <action> <hex-node> <tag-name>\n
+        #
+        # Actions are defined as follow:
+        #   "-R": tag is removed,
+        #   "+A": tag is added,
+        #   "-M": tag is moved (old value),
+        #   "+M": tag is moved (new value),
+        tracktags = lambda x: None
+        # experimental config: experimental.hook-track-tags
+        shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
+                                             False)
+        if desc != 'strip' and shouldtracktags:
+            oldheads = self.changelog.headrevs()
+            def tracktags(tr2):
+                repo = reporef()
+                oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
+                newheads = repo.changelog.headrevs()
+                newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
+                # notes: we compare lists here.
+                # As we do it only once buiding set would not be cheaper
+                changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
+                if changes:
+                    tr2.hookargs['tag_moved'] = '1'
+                    with repo.vfs('changes/tags.changes', 'w',
+                                  atomictemp=True) as changesfile:
+                        # note: we do not register the file to the transaction
+                        # because we needs it to still exist on the transaction
+                        # is close (for txnclose hooks)
+                        tagsmod.writediff(changesfile, changes)
+        def validate(tr2):
             """will run pre-closing hooks"""
+            # XXX the transaction API is a bit lacking here so we take a hacky
+            # path for now
+            #
+            # We cannot add this as a "pending" hooks since the 'tr.hookargs'
+            # dict is copied before these run. In addition we needs the data
+            # available to in memory hooks too.
+            #
+            # Moreover, we also need to make sure this runs before txnclose
+            # hooks and there is no "pending" mechanism that would execute
+            # logic only if hooks are about to run.
+            #
+            # Fixing this limitation of the transaction is also needed to track
+            # other families of changes (bookmarks, phases, obsolescence).
+            #
+            # This will have to be fixed before we remove the experimental
+            # gating.
+            tracktags(tr2)
             reporef().hook('pretxnclose', throw=True,
-                           txnname=desc, **tr.hookargs)
+                           txnname=desc, **pycompat.strkwargs(tr.hookargs))
         def releasefn(tr, success):
             repo = reporef()
             if success:
@@ -1094,7 +1117,7 @@
 
             def hook():
                 reporef().hook('txnclose', throw=False, txnname=desc,
-                               **hookargs)
+                               **pycompat.strkwargs(hookargs))
             reporef()._afterlock(hook)
         tr.addfinalize('txnclose-hook', txnclosehook)
         def txnaborthook(tr2):
@@ -1270,7 +1293,7 @@
         redundant one doesn't).
         '''
         unfiltered = self.unfiltered() # all file caches are stored unfiltered
-        for k in self._filecache.keys():
+        for k in list(self._filecache.keys()):
             # dirstate is invalidated separately in invalidatedirstate()
             if k == 'dirstate':
                 continue
@@ -1852,6 +1875,11 @@
                                   listsubrepos)
 
     def heads(self, start=None):
+        if start is None:
+            cl = self.changelog
+            headrevs = reversed(cl.headrevs())
+            return [cl.node(rev) for rev in headrevs]
+
         heads = self.changelog.heads(start)
         # sort the output in rev descending order
         return sorted(heads, key=self.changelog.rev, reverse=True)
@@ -1972,6 +2000,10 @@
     renamefiles = [tuple(t) for t in files]
     def a():
         for vfs, src, dest in renamefiles:
+            # if src and dest refer to a same file, vfs.rename is a no-op,
+            # leaving both src and dest on disk. delete dest to make sure
+            # the rename couldn't be such a no-op.
+            vfs.tryunlink(dest)
             try:
                 vfs.rename(src, dest)
             except OSError: # journal file does not yet exist
--- a/mercurial/lock.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/lock.py	Tue Apr 18 12:24:34 2017 -0400
@@ -9,15 +9,36 @@
 
 import contextlib
 import errno
+import os
 import socket
 import time
 import warnings
 
 from . import (
+    encoding,
     error,
+    pycompat,
     util,
 )
 
+def _getlockprefix():
+    """Return a string which is used to differentiate pid namespaces
+
+    It's useful to detect "dead" processes and remove stale locks with
+    confidence. Typically it's just hostname. On modern linux, we include an
+    extra Linux-specific pid namespace identifier.
+    """
+    result = socket.gethostname()
+    if pycompat.ispy3:
+        result = result.encode(pycompat.sysstr(encoding.encoding), 'replace')
+    if pycompat.sysplatform.startswith('linux'):
+        try:
+            result += '/%x' % os.stat('/proc/self/ns/pid').st_ino
+        except OSError as ex:
+            if ex.errno not in (errno.ENOENT, errno.EACCES, errno.ENOTDIR):
+                raise
+    return result
+
 class lock(object):
     '''An advisory lock held by one process to control access to a set
     of files.  Non-cooperating processes or incorrectly written scripts
@@ -99,8 +120,8 @@
             self.held += 1
             return
         if lock._host is None:
-            lock._host = socket.gethostname()
-        lockname = '%s:%s' % (lock._host, self.pid)
+            lock._host = _getlockprefix()
+        lockname = '%s:%d' % (lock._host, self.pid)
         retry = 5
         while not self.held and retry:
             retry -= 1
--- a/mercurial/mail.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/mail.py	Tue Apr 18 12:24:34 2017 -0400
@@ -353,4 +353,4 @@
         except UnicodeDecodeError:
             pass
         uparts.append(part.decode('ISO-8859-1'))
-    return encoding.tolocal(u' '.join(uparts).encode('UTF-8'))
+    return encoding.unitolocal(u' '.join(uparts))
--- a/mercurial/manifest.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/manifest.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,12 +7,15 @@
 
 from __future__ import absolute_import
 
-import array
 import heapq
 import os
 import struct
 
 from .i18n import _
+from .node import (
+    bin,
+    hex,
+)
 from . import (
     error,
     mdiff,
@@ -38,9 +41,9 @@
         prev = l
         f, n = l.split('\0')
         if len(n) > 40:
-            yield f, revlog.bin(n[:40]), n[40:]
+            yield f, bin(n[:40]), n[40:]
         else:
-            yield f, revlog.bin(n), ''
+            yield f, bin(n), ''
 
 def _parsev2(data):
     metadataend = data.find('\n')
@@ -124,6 +127,8 @@
         zeropos = data.find('\x00', pos)
         return data[pos:zeropos]
 
+    __next__ = next
+
 class lazymanifestiterentries(object):
     def __init__(self, lm):
         self.lm = lm
@@ -147,8 +152,10 @@
         self.pos += 1
         return (data[pos:zeropos], hashval, flags)
 
+    __next__ = next
+
 def unhexlify(data, extra, pos, length):
-    s = data[pos:pos + length].decode('hex')
+    s = bin(data[pos:pos + length])
     if extra:
         s += chr(extra & 0xff)
     return s
@@ -173,7 +180,7 @@
         if not data:
             return []
         pos = data.find("\n")
-        if pos == -1 or data[-1] != '\n':
+        if pos == -1 or data[-1:] != '\n':
             raise ValueError("Manifest did not end in a newline.")
         positions = [0]
         prev = data[:data.find('\x00')]
@@ -251,8 +258,8 @@
         return self.data[start:end]
 
     def __getitem__(self, key):
-        if not isinstance(key, str):
-            raise TypeError("getitem: manifest keys must be a string.")
+        if not isinstance(key, bytes):
+            raise TypeError("getitem: manifest keys must be a bytes.")
         needle = self.bsearch(key)
         if needle == -1:
             raise KeyError
@@ -277,17 +284,17 @@
             self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
 
     def __setitem__(self, key, value):
-        if not isinstance(key, str):
-            raise TypeError("setitem: manifest keys must be a string.")
+        if not isinstance(key, bytes):
+            raise TypeError("setitem: manifest keys must be a byte string.")
         if not isinstance(value, tuple) or len(value) != 2:
             raise TypeError("Manifest values must be a tuple of (node, flags).")
         hashval = value[0]
-        if not isinstance(hashval, str) or not 20 <= len(hashval) <= 22:
-            raise TypeError("node must be a 20-byte string")
+        if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
+            raise TypeError("node must be a 20-byte byte string")
         flags = value[1]
         if len(hashval) == 22:
             hashval = hashval[:-1]
-        if not isinstance(flags, str) or len(flags) > 1:
+        if not isinstance(flags, bytes) or len(flags) > 1:
             raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
         needle, found = self.bsearch2(key)
         if found:
@@ -351,7 +358,7 @@
         self.extradata = []
 
     def _pack(self, d):
-        return d[0] + '\x00' + d[1][:20].encode('hex') + d[2] + '\n'
+        return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
 
     def text(self):
         self._compact()
@@ -427,6 +434,8 @@
         # makes it easier for extensions to override.
         return len(self._lm) != 0
 
+    __bool__ = __nonzero__
+
     def __setitem__(self, key, node):
         self._lm[key] = node, self.flags(key, '')
 
@@ -445,8 +454,12 @@
     def keys(self):
         return list(self.iterkeys())
 
-    def filesnotin(self, m2):
+    def filesnotin(self, m2, match=None):
         '''Set of files in this manifest that are not in the other'''
+        if match:
+            m1 = self.matches(match)
+            m2 = m2.matches(match)
+            return m1.filesnotin(m2)
         diff = self.diff(m2)
         files = set(filepath
                     for filepath, hashflags in diff.iteritems()
@@ -523,7 +536,7 @@
         m._lm = self._lm.filtercopy(match)
         return m
 
-    def diff(self, m2, clean=False):
+    def diff(self, m2, match=None, clean=False):
         '''Finds changes between the current manifest and m2.
 
         Args:
@@ -538,6 +551,10 @@
         the nodeid will be None and the flags will be the empty
         string.
         '''
+        if match:
+            m1 = self.matches(match)
+            m2 = m2.matches(match)
+            return m1.diff(m2, clean=clean)
         return self._lm.diff(m2._lm, clean)
 
     def setflag(self, key, flag):
@@ -574,7 +591,7 @@
             return self._lm.text()
 
     def fastdelta(self, base, changes):
-        """Given a base manifest text as an array.array and a list of changes
+        """Given a base manifest text as a bytearray and a list of changes
         relative to that text, compute a delta that can be used by revlog.
         """
         delta = []
@@ -620,8 +637,9 @@
         else:
             # For large changes, it's much cheaper to just build the text and
             # diff it.
-            arraytext = array.array('c', self.text())
-            deltatext = mdiff.textdiff(base, arraytext)
+            arraytext = bytearray(self.text())
+            deltatext = mdiff.textdiff(
+                util.buffer(base), util.buffer(arraytext))
 
         return arraytext, deltatext
 
@@ -632,10 +650,10 @@
     that string.  If start == end the string was not found and
     they indicate the proper sorted insertion point.
 
-    m should be a buffer or a string
-    s is a string'''
+    m should be a buffer, a memoryview or a byte string.
+    s is a byte string'''
     def advance(i, c):
-        while i < lenm and m[i] != c:
+        while i < lenm and m[i:i + 1] != c:
             i += 1
         return i
     if not s:
@@ -646,10 +664,10 @@
     while lo < hi:
         mid = (lo + hi) // 2
         start = mid
-        while start > 0 and m[start - 1] != '\n':
+        while start > 0 and m[start - 1:start] != '\n':
             start -= 1
         end = advance(start, '\0')
-        if m[start:end] < s:
+        if bytes(m[start:end]) < s:
             # we know that after the null there are 40 bytes of sha1
             # this translates to the bisect lo = mid + 1
             lo = advance(end + 40, '\n') + 1
@@ -679,12 +697,12 @@
     # for large addlist arrays, building a new array is cheaper
     # than repeatedly modifying the existing one
     currentposition = 0
-    newaddlist = array.array('c')
+    newaddlist = bytearray()
 
     for start, end, content in x:
         newaddlist += addlist[currentposition:start]
         if content:
-            newaddlist += array.array('c', content)
+            newaddlist += bytearray(content)
 
         currentposition = end
 
@@ -906,8 +924,13 @@
             copy._copyfunc = self._copyfunc
         return copy
 
-    def filesnotin(self, m2):
+    def filesnotin(self, m2, match=None):
         '''Set of files in this manifest that are not in the other'''
+        if match:
+            m1 = self.matches(match)
+            m2 = m2.matches(match)
+            return m1.filesnotin(m2)
+
         files = set()
         def _filesnotin(t1, t2):
             if t1._node == t2._node and not t1._dirty and not t2._dirty:
@@ -1025,7 +1048,7 @@
             ret._dirty = True
         return ret
 
-    def diff(self, m2, clean=False):
+    def diff(self, m2, match=None, clean=False):
         '''Finds changes between the current manifest and m2.
 
         Args:
@@ -1040,6 +1063,10 @@
         the nodeid will be None and the flags will be the empty
         string.
         '''
+        if match:
+            m1 = self.matches(match)
+            m2 = m2.matches(match)
+            return m1.diff(m2, clean=clean)
         result = {}
         emptytree = treemanifest()
         def _diff(t1, t2):
@@ -1128,11 +1155,32 @@
                 subp1, subp2 = subp2, subp1
             writesubtree(subm, subp1, subp2)
 
+    def walksubtrees(self, matcher=None):
+        """Returns an iterator of the subtrees of this manifest, including this
+        manifest itself.
+
+        If `matcher` is provided, it only returns subtrees that match.
+        """
+        if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
+            return
+        if not matcher or matcher(self._dir[:-1]):
+            yield self
+
+        self._load()
+        for d, subm in self._dirs.iteritems():
+            for subtree in subm.walksubtrees(matcher=matcher):
+                yield subtree
+
 class manifestrevlog(revlog.revlog):
     '''A revlog that stores manifest texts. This is responsible for caching the
     full-text manifest contents.
     '''
-    def __init__(self, opener, dir='', dirlogcache=None):
+    def __init__(self, opener, dir='', dirlogcache=None, indexfile=None):
+        """Constructs a new manifest revlog
+
+        `indexfile` - used by extensions to have two manifests at once, like
+        when transitioning between flatmanifeset and treemanifests.
+        """
         # During normal operations, we expect to deal with not more than four
         # revs at a time (such as during commit --amend). When rebasing large
         # stacks of commits, the number can go up, hence the config knob below.
@@ -1150,12 +1198,16 @@
 
         self._fulltextcache = util.lrucachedict(cachesize)
 
-        indexfile = "00manifest.i"
         if dir:
             assert self._treeondisk, 'opts is %r' % opts
             if not dir.endswith('/'):
                 dir = dir + '/'
-            indexfile = "meta/" + dir + "00manifest.i"
+
+        if indexfile is None:
+            indexfile = '00manifest.i'
+            if dir:
+                indexfile = "meta/" + dir + indexfile
+
         self._dir = dir
         # The dirlogcache is kept on the root manifest log
         if dir:
@@ -1214,7 +1266,7 @@
             else:
                 text = m.text(self._usemanifestv2)
                 n = self.addrevision(text, transaction, link, p1, p2)
-                arraytext = array.array('c', text)
+                arraytext = bytearray(text)
 
         if arraytext is not None:
             self.fulltextcache[n] = arraytext
@@ -1224,7 +1276,7 @@
     def _addtree(self, m, transaction, link, m1, m2, readtree):
         # If the manifest is unchanged compared to one parent,
         # don't write a new revision
-        if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
+        if self._dir != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(m2)):
             return m.node()
         def writesubtree(subm, subp1, subp2):
             sublog = self.dirlog(subm.dir())
@@ -1232,13 +1284,17 @@
                        readtree=readtree)
         m.writesubtrees(m1, m2, writesubtree)
         text = m.dirtext(self._usemanifestv2)
-        # Double-check whether contents are unchanged to one parent
-        if text == m1.dirtext(self._usemanifestv2):
-            n = m1.node()
-        elif text == m2.dirtext(self._usemanifestv2):
-            n = m2.node()
-        else:
+        n = None
+        if self._dir != '':
+            # Double-check whether contents are unchanged to one parent
+            if text == m1.dirtext(self._usemanifestv2):
+                n = m1.node()
+            elif text == m2.dirtext(self._usemanifestv2):
+                n = m2.node()
+
+        if not n:
             n = self.addrevision(text, transaction, link, m1.node(), m2.node())
+
         # Save nodeid so parent manifest can calculate its nodeid
         m.setnode(n)
         return n
@@ -1252,8 +1308,6 @@
     class do not care about the implementation details of the actual manifests
     they receive (i.e. tree or flat or lazily loaded, etc)."""
     def __init__(self, opener, repo):
-        self._repo = repo
-
         usetreemanifest = False
         cachesize = 4
 
@@ -1300,7 +1354,7 @@
                     if node not in dirlog.nodemap:
                         raise LookupError(node, dirlog.indexfile,
                                           _('no node'))
-                m = treemanifestctx(self._repo, dir, node)
+                m = treemanifestctx(self, dir, node)
             else:
                 raise error.Abort(
                         _("cannot ask for manifest directory '%s' in a flat "
@@ -1311,9 +1365,9 @@
                     raise LookupError(node, self._revlog.indexfile,
                                       _('no node'))
             if self._treeinmem:
-                m = treemanifestctx(self._repo, '', node)
+                m = treemanifestctx(self, '', node)
             else:
-                m = manifestctx(self._repo, node)
+                m = manifestctx(self, node)
 
         if node != revlog.nullid:
             mancache = self._dirmancache.get(dir)
@@ -1328,18 +1382,18 @@
         self._revlog.clearcaches()
 
 class memmanifestctx(object):
-    def __init__(self, repo):
-        self._repo = repo
+    def __init__(self, manifestlog):
+        self._manifestlog = manifestlog
         self._manifestdict = manifestdict()
 
     def _revlog(self):
-        return self._repo.manifestlog._revlog
+        return self._manifestlog._revlog
 
     def new(self):
-        return memmanifestctx(self._repo)
+        return memmanifestctx(self._manifestlog)
 
     def copy(self):
-        memmf = memmanifestctx(self._repo)
+        memmf = memmanifestctx(self._manifestlog)
         memmf._manifestdict = self.read().copy()
         return memmf
 
@@ -1354,8 +1408,8 @@
     """A class representing a single revision of a manifest, including its
     contents, its parent revs, and its linkrev.
     """
-    def __init__(self, repo, node):
-        self._repo = repo
+    def __init__(self, manifestlog, node):
+        self._manifestlog = manifestlog
         self._data = None
 
         self._node = node
@@ -1368,16 +1422,16 @@
         #self.linkrev = revlog.linkrev(rev)
 
     def _revlog(self):
-        return self._repo.manifestlog._revlog
+        return self._manifestlog._revlog
 
     def node(self):
         return self._node
 
     def new(self):
-        return memmanifestctx(self._repo)
+        return memmanifestctx(self._manifestlog)
 
     def copy(self):
-        memmf = memmanifestctx(self._repo)
+        memmf = memmanifestctx(self._manifestlog)
         memmf._manifestdict = self.read().copy()
         return memmf
 
@@ -1386,13 +1440,13 @@
         return self._revlog().parents(self._node)
 
     def read(self):
-        if not self._data:
+        if self._data is None:
             if self._node == revlog.nullid:
                 self._data = manifestdict()
             else:
                 rl = self._revlog()
                 text = rl.revision(self._node)
-                arraytext = array.array('c', text)
+                arraytext = bytearray(text)
                 rl._fulltextcache[self._node] = arraytext
                 self._data = manifestdict(text)
         return self._data
@@ -1422,7 +1476,7 @@
         if revlog._usemanifestv2:
             # Need to perform a slow delta
             r0 = revlog.deltaparent(revlog.rev(self._node))
-            m0 = self._repo.manifestlog[revlog.node(r0)].read()
+            m0 = self._manifestlog[revlog.node(r0)].read()
             m1 = self.read()
             md = manifestdict()
             for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
@@ -1440,19 +1494,19 @@
         return self.read().find(key)
 
 class memtreemanifestctx(object):
-    def __init__(self, repo, dir=''):
-        self._repo = repo
+    def __init__(self, manifestlog, dir=''):
+        self._manifestlog = manifestlog
         self._dir = dir
         self._treemanifest = treemanifest()
 
     def _revlog(self):
-        return self._repo.manifestlog._revlog
+        return self._manifestlog._revlog
 
     def new(self, dir=''):
-        return memtreemanifestctx(self._repo, dir=dir)
+        return memtreemanifestctx(self._manifestlog, dir=dir)
 
     def copy(self):
-        memmf = memtreemanifestctx(self._repo, dir=self._dir)
+        memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
         memmf._treemanifest = self._treemanifest.copy()
         return memmf
 
@@ -1461,13 +1515,13 @@
 
     def write(self, transaction, link, p1, p2, added, removed):
         def readtree(dir, node):
-            return self._repo.manifestlog.get(dir, node).read()
+            return self._manifestlog.get(dir, node).read()
         return self._revlog().add(self._treemanifest, transaction, link, p1, p2,
                                   added, removed, readtree=readtree)
 
 class treemanifestctx(object):
-    def __init__(self, repo, dir, node):
-        self._repo = repo
+    def __init__(self, manifestlog, dir, node):
+        self._manifestlog = manifestlog
         self._dir = dir
         self._data = None
 
@@ -1481,10 +1535,10 @@
         #self.linkrev = revlog.linkrev(rev)
 
     def _revlog(self):
-        return self._repo.manifestlog._revlog.dirlog(self._dir)
+        return self._manifestlog._revlog.dirlog(self._dir)
 
     def read(self):
-        if not self._data:
+        if self._data is None:
             rl = self._revlog()
             if self._node == revlog.nullid:
                 self._data = treemanifest()
@@ -1495,14 +1549,13 @@
                 def readsubtree(dir, subm):
                     # Set verify to False since we need to be able to create
                     # subtrees for trees that don't exist on disk.
-                    return self._repo.manifestlog.get(dir, subm,
-                                                      verify=False).read()
+                    return self._manifestlog.get(dir, subm, verify=False).read()
                 m.read(gettext, readsubtree)
                 m.setnode(self._node)
                 self._data = m
             else:
                 text = rl.revision(self._node)
-                arraytext = array.array('c', text)
+                arraytext = bytearray(text)
                 rl.fulltextcache[self._node] = arraytext
                 self._data = treemanifest(dir=self._dir, text=text)
 
@@ -1512,10 +1565,10 @@
         return self._node
 
     def new(self, dir=''):
-        return memtreemanifestctx(self._repo, dir=dir)
+        return memtreemanifestctx(self._manifestlog, dir=dir)
 
     def copy(self):
-        memmf = memtreemanifestctx(self._repo, dir=self._dir)
+        memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
         memmf._treemanifest = self.read().copy()
         return memmf
 
@@ -1542,7 +1595,7 @@
         else:
             # Need to perform a slow delta
             r0 = revlog.deltaparent(revlog.rev(self._node))
-            m0 = self._repo.manifestlog.get(self._dir, revlog.node(r0)).read()
+            m0 = self._manifestlog.get(self._dir, revlog.node(r0)).read()
             m1 = self.read()
             md = treemanifest(dir=self._dir)
             for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
--- a/mercurial/match.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/match.py	Tue Apr 18 12:24:34 2017 -0400
@@ -85,7 +85,7 @@
     return True
 
 class match(object):
-    def __init__(self, root, cwd, patterns, include=[], exclude=[],
+    def __init__(self, root, cwd, patterns, include=None, exclude=None,
                  default='glob', exact=False, auditor=None, ctx=None,
                  listsubrepos=False, warn=None, badfn=None):
         """build an object to match a set of file patterns
@@ -104,7 +104,10 @@
         a pattern is one of:
         'glob:<glob>' - a glob relative to cwd
         're:<regexp>' - a regular expression
-        'path:<path>' - a path relative to repository root
+        'path:<path>' - a path relative to repository root, which is matched
+                        recursively
+        'rootfilesin:<path>' - a path relative to repository root, which is
+                        matched non-recursively (will not match subdirectories)
         'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
         'relpath:<path>' - a path relative to cwd
         'relre:<regexp>' - a regexp that needn't match the start of a name
@@ -114,6 +117,10 @@
                               the same directory
         '<something>' - a pattern of the specified default type
         """
+        if include is None:
+            include = []
+        if exclude is None:
+            exclude = []
 
         self._root = root
         self._cwd = cwd
@@ -122,9 +129,12 @@
         self._always = False
         self._pathrestricted = bool(include or exclude or patterns)
         self._warn = warn
+
+        # roots are directories which are recursively included/excluded.
         self._includeroots = set()
+        self._excluderoots = set()
+        # dirs are directories which are non-recursively included.
         self._includedirs = set(['.'])
-        self._excluderoots = set()
 
         if badfn is not None:
             self.bad = badfn
@@ -134,14 +144,20 @@
             kindpats = self._normalize(include, 'glob', root, cwd, auditor)
             self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)',
                                               listsubrepos, root)
-            self._includeroots.update(_roots(kindpats))
-            self._includedirs.update(util.dirs(self._includeroots))
+            roots, dirs = _rootsanddirs(kindpats)
+            self._includeroots.update(roots)
+            self._includedirs.update(dirs)
             matchfns.append(im)
         if exclude:
             kindpats = self._normalize(exclude, 'glob', root, cwd, auditor)
             self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)',
                                               listsubrepos, root)
             if not _anypats(kindpats):
+                # Only consider recursive excludes as such - if a non-recursive
+                # exclude is used, we must still recurse into the excluded
+                # directory, at least to find subdirectories. In such a case,
+                # the regex still won't match the non-recursively-excluded
+                # files.
                 self._excluderoots.update(_roots(kindpats))
             matchfns.append(lambda f: not em(f))
         if exact:
@@ -153,7 +169,7 @@
         elif patterns:
             kindpats = self._normalize(patterns, default, root, cwd, auditor)
             if not _kindpatsalwaysmatch(kindpats):
-                self._files = _roots(kindpats)
+                self._files = _explicitfiles(kindpats)
                 self._anypats = self._anypats or _anypats(kindpats)
                 self.patternspat, pm = _buildmatch(ctx, kindpats, '$',
                                                    listsubrepos, root)
@@ -238,7 +254,7 @@
             return 'all'
         if dir in self._excluderoots:
             return False
-        if (self._includeroots and
+        if ((self._includeroots or self._includedirs != set(['.'])) and
             '.' not in self._includeroots and
             dir not in self._includeroots and
             dir not in self._includedirs and
@@ -286,7 +302,7 @@
         for kind, pat in [_patsplit(p, default) for p in patterns]:
             if kind in ('glob', 'relpath'):
                 pat = pathutil.canonpath(root, cwd, pat, auditor)
-            elif kind in ('relglob', 'path'):
+            elif kind in ('relglob', 'path', 'rootfilesin'):
                 pat = util.normpath(pat)
             elif kind in ('listfile', 'listfile0'):
                 try:
@@ -419,7 +435,9 @@
         # m.exact(file) must be based off of the actual user input, otherwise
         # inexact case matches are treated as exact, and not noted without -v.
         if self._files:
-            self._fileroots = set(_roots(self._kp))
+            roots, dirs = _rootsanddirs(self._kp)
+            self._fileroots = set(roots)
+            self._fileroots.update(dirs)
 
     def _normalize(self, patterns, default, root, cwd, auditor):
         self._kp = super(icasefsmatcher, self)._normalize(patterns, default,
@@ -447,7 +465,8 @@
     if ':' in pattern:
         kind, pat = pattern.split(':', 1)
         if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
-                    'listfile', 'listfile0', 'set', 'include', 'subinclude'):
+                    'listfile', 'listfile0', 'set', 'include', 'subinclude',
+                    'rootfilesin'):
             return kind, pat
     return default, pattern
 
@@ -476,9 +495,9 @@
     group = 0
     escape = util.re.escape
     def peek():
-        return i < n and pat[i]
+        return i < n and pat[i:i + 1]
     while i < n:
-        c = pat[i]
+        c = pat[i:i + 1]
         i += 1
         if c not in '*?[{},\\':
             res += escape(c)
@@ -496,18 +515,18 @@
             res += '.'
         elif c == '[':
             j = i
-            if j < n and pat[j] in '!]':
+            if j < n and pat[j:j + 1] in '!]':
                 j += 1
-            while j < n and pat[j] != ']':
+            while j < n and pat[j:j + 1] != ']':
                 j += 1
             if j >= n:
                 res += '\\['
             else:
                 stuff = pat[i:j].replace('\\','\\\\')
                 i = j + 1
-                if stuff[0] == '!':
+                if stuff[0:1] == '!':
                     stuff = '^' + stuff[1:]
-                elif stuff[0] == '^':
+                elif stuff[0:1] == '^':
                     stuff = '\\' + stuff
                 res = '%s[%s]' % (res, stuff)
         elif c == '{':
@@ -540,6 +559,14 @@
         if pat == '.':
             return ''
         return '^' + util.re.escape(pat) + '(?:/|$)'
+    if kind == 'rootfilesin':
+        if pat == '.':
+            escaped = ''
+        else:
+            # Pattern is a directory name.
+            escaped = util.re.escape(pat) + '/'
+        # Anything after the pattern must be a non-directory.
+        return '^' + escaped + '[^/]+$'
     if kind == 'relglob':
         return '(?:|.*/)' + _globre(pat) + globsuffix
     if kind == 'relpath':
@@ -609,17 +636,16 @@
                     raise error.Abort(_("invalid pattern (%s): %s") % (k, p))
         raise error.Abort(_("invalid pattern"))
 
-def _roots(kindpats):
-    '''return roots and exact explicitly listed files from patterns
+def _patternrootsanddirs(kindpats):
+    '''Returns roots and directories corresponding to each pattern.
 
-    >>> _roots([('glob', 'g/*', ''), ('glob', 'g', ''), ('glob', 'g*', '')])
-    ['g', 'g', '.']
-    >>> _roots([('relpath', 'r', ''), ('path', 'p/p', ''), ('path', '', '')])
-    ['r', 'p/p', '.']
-    >>> _roots([('relglob', 'rg*', ''), ('re', 're/', ''), ('relre', 'rr', '')])
-    ['.', '.', '.']
+    This calculates the roots and directories exactly matching the patterns and
+    returns a tuple of (roots, dirs) for each. It does not return other
+    directories which may also need to be considered, like the parent
+    directories.
     '''
     r = []
+    d = []
     for kind, pat, source in kindpats:
         if kind == 'glob': # find the non-glob prefix
             root = []
@@ -630,13 +656,63 @@
             r.append('/'.join(root) or '.')
         elif kind in ('relpath', 'path'):
             r.append(pat or '.')
+        elif kind in ('rootfilesin',):
+            d.append(pat or '.')
         else: # relglob, re, relre
             r.append('.')
-    return r
+    return r, d
+
+def _roots(kindpats):
+    '''Returns root directories to match recursively from the given patterns.'''
+    roots, dirs = _patternrootsanddirs(kindpats)
+    return roots
+
+def _rootsanddirs(kindpats):
+    '''Returns roots and exact directories from patterns.
+
+    roots are directories to match recursively, whereas exact directories should
+    be matched non-recursively. The returned (roots, dirs) tuple will also
+    include directories that need to be implicitly considered as either, such as
+    parent directories.
+
+    >>> _rootsanddirs(\
+        [('glob', 'g/h/*', ''), ('glob', 'g/h', ''), ('glob', 'g*', '')])
+    (['g/h', 'g/h', '.'], ['g'])
+    >>> _rootsanddirs(\
+        [('rootfilesin', 'g/h', ''), ('rootfilesin', '', '')])
+    ([], ['g/h', '.', 'g'])
+    >>> _rootsanddirs(\
+        [('relpath', 'r', ''), ('path', 'p/p', ''), ('path', '', '')])
+    (['r', 'p/p', '.'], ['p'])
+    >>> _rootsanddirs(\
+        [('relglob', 'rg*', ''), ('re', 're/', ''), ('relre', 'rr', '')])
+    (['.', '.', '.'], [])
+    '''
+    r, d = _patternrootsanddirs(kindpats)
+
+    # Append the parents as non-recursive/exact directories, since they must be
+    # scanned to get to either the roots or the other exact directories.
+    d.extend(util.dirs(d))
+    d.extend(util.dirs(r))
+
+    return r, d
+
+def _explicitfiles(kindpats):
+    '''Returns the potential explicit filenames from the patterns.
+
+    >>> _explicitfiles([('path', 'foo/bar', '')])
+    ['foo/bar']
+    >>> _explicitfiles([('rootfilesin', 'foo/bar', '')])
+    []
+    '''
+    # Keep only the pattern kinds where one can specify filenames (vs only
+    # directory names).
+    filable = [kp for kp in kindpats if kp[0] not in ('rootfilesin',)]
+    return _roots(filable)
 
 def _anypats(kindpats):
     for kind, pat, source in kindpats:
-        if kind in ('glob', 're', 'relglob', 'relre', 'set'):
+        if kind in ('glob', 're', 'relglob', 'relre', 'set', 'rootfilesin'):
             return True
 
 _commentre = None
@@ -668,12 +744,12 @@
     syntax = 'relre:'
     patterns = []
 
-    fp = open(filepath)
+    fp = open(filepath, 'rb')
     for lineno, line in enumerate(util.iterfile(fp), start=1):
         if "#" in line:
             global _commentre
             if not _commentre:
-                _commentre = util.re.compile(r'((?:^|[^\\])(?:\\\\)*)#.*')
+                _commentre = util.re.compile(br'((?:^|[^\\])(?:\\\\)*)#.*')
             # remove comments prefixed by an even number of escapes
             m = _commentre.search(line)
             if m:
--- a/mercurial/mdiff.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/mdiff.py	Tue Apr 18 12:24:34 2017 -0400
@@ -17,6 +17,7 @@
     bdiff,
     error,
     mpatch,
+    pycompat,
     util,
 )
 
@@ -61,6 +62,7 @@
         }
 
     def __init__(self, **opts):
+        opts = pycompat.byteskwargs(opts)
         for k in self.defaults.keys():
             v = opts.get(k)
             if v is None:
@@ -115,6 +117,31 @@
         s1 = i1
         s2 = i2
 
+def hunkinrange(hunk, linerange):
+    """Return True if `hunk` defined as (start, length) is in `linerange`
+    defined as (lowerbound, upperbound).
+
+    >>> hunkinrange((5, 10), (2, 7))
+    True
+    >>> hunkinrange((5, 10), (6, 12))
+    True
+    >>> hunkinrange((5, 10), (13, 17))
+    True
+    >>> hunkinrange((5, 10), (3, 17))
+    True
+    >>> hunkinrange((5, 10), (1, 3))
+    False
+    >>> hunkinrange((5, 10), (18, 20))
+    False
+    >>> hunkinrange((5, 10), (1, 5))
+    False
+    >>> hunkinrange((5, 10), (15, 27))
+    False
+    """
+    start, length = hunk
+    lowerbound, upperbound = linerange
+    return lowerbound < start + length and start < upperbound
+
 def blocksinrange(blocks, rangeb):
     """filter `blocks` like (a1, a2, b1, b2) from items outside line range
     `rangeb` from ``(b1, b2)`` point of view.
@@ -148,7 +175,7 @@
                     uba = a1 + (ubb - b1)
                 else:
                     uba = a2
-        if lbb < b2 and b1 < ubb:
+        if hunkinrange((b1, (b2 - b1)), rangeb):
             filteredblocks.append(block)
     if lba is None or uba is None or uba < lba:
         raise error.Abort(_('line range exceeds file size'))
@@ -196,15 +223,23 @@
         yield s1, '='
 
 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
+    """Return a unified diff as a (headers, hunks) tuple.
+
+    If the diff is not null, `headers` is a list with unified diff header
+    lines "--- <original>" and "+++ <new>" and `hunks` is a generator yielding
+    (hunkrange, hunklines) coming from _unidiff().
+    Otherwise, `headers` and `hunks` are empty.
+    """
     def datetag(date, fn=None):
         if not opts.git and not opts.nodates:
-            return '\t%s\n' % date
+            return '\t%s' % date
         if fn and ' ' in fn:
-            return '\t\n'
-        return '\n'
+            return '\t'
+        return ''
 
+    sentinel = [], ()
     if not a and not b:
-        return ""
+        return sentinel
 
     if opts.noprefix:
         aprefix = bprefix = ''
@@ -217,10 +252,17 @@
     fn1 = util.pconvert(fn1)
     fn2 = util.pconvert(fn2)
 
+    def checknonewline(lines):
+        for text in lines:
+            if text[-1:] != '\n':
+                text += "\n\ No newline at end of file\n"
+            yield text
+
     if not opts.text and (util.binary(a) or util.binary(b)):
         if a and b and len(a) == len(b) and a == b:
-            return ""
-        l = ['Binary file %s has changed\n' % fn1]
+            return sentinel
+        headerlines = []
+        hunks = (None, ['Binary file %s has changed\n' % fn1]),
     elif not a:
         b = splitnewlines(b)
         if a is None:
@@ -228,8 +270,11 @@
         else:
             l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
         l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
-        l3 = "@@ -0,0 +1,%d @@\n" % len(b)
-        l = [l1, l2, l3] + ["+" + e for e in b]
+        headerlines = [l1, l2]
+        size = len(b)
+        hunkrange = (0, 0, 1, size)
+        hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b]
+        hunks = (hunkrange, checknonewline(hunklines)),
     elif not b:
         a = splitnewlines(a)
         l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
@@ -237,28 +282,42 @@
             l2 = '+++ /dev/null%s' % datetag(epoch)
         else:
             l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
-        l3 = "@@ -1,%d +0,0 @@\n" % len(a)
-        l = [l1, l2, l3] + ["-" + e for e in a]
+        headerlines = [l1, l2]
+        size = len(a)
+        hunkrange = (1, size, 0, 0)
+        hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a]
+        hunks = (hunkrange, checknonewline(hunklines)),
     else:
-        al = splitnewlines(a)
-        bl = splitnewlines(b)
-        l = list(_unidiff(a, b, al, bl, opts=opts))
-        if not l:
-            return ""
+        diffhunks = _unidiff(a, b, opts=opts)
+        try:
+            hunkrange, hunklines = next(diffhunks)
+        except StopIteration:
+            return sentinel
 
-        l.insert(0, "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)))
-        l.insert(1, "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)))
+        headerlines = [
+            "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
+            "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
+        ]
+        def rewindhunks():
+            yield hunkrange, checknonewline(hunklines)
+            for hr, hl in diffhunks:
+                yield hr, checknonewline(hl)
 
-    for ln in xrange(len(l)):
-        if l[ln][-1] != '\n':
-            l[ln] += "\n\ No newline at end of file\n"
+        hunks = rewindhunks()
 
-    return "".join(l)
+    return headerlines, hunks
+
+def _unidiff(t1, t2, opts=defaultopts):
+    """Yield hunks of a headerless unified diff from t1 and t2 texts.
 
-# creates a headerless unified diff
-# t1 and t2 are the text to be diffed
-# l1 and l2 are the text broken up into lines
-def _unidiff(t1, t2, l1, l2, opts=defaultopts):
+    Each hunk consists of a (hunkrange, hunklines) tuple where `hunkrange` is a
+    tuple (s1, l1, s2, l2) representing the range information of the hunk to
+    form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines
+    of the hunk combining said header followed by line additions and
+    deletions.
+    """
+    l1 = splitnewlines(t1)
+    l2 = splitnewlines(t2)
     def contextend(l, len):
         ret = l + opts.context
         if ret > len:
@@ -300,12 +359,13 @@
         if blen:
             bstart += 1
 
-        yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
-                                           bstart, blen, func)
-        for x in delta:
-            yield x
-        for x in xrange(a2, aend):
-            yield ' ' + l1[x]
+        hunkrange = astart, alen, bstart, blen
+        hunklines = (
+            ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
+            + delta
+            + [' ' + l1[x] for x in xrange(a2, aend)]
+        )
+        yield hunkrange, hunklines
 
     # bdiff.blocks gives us the matching sequences in the files.  The loop
     # below finds the spaces between those matching sequences and translates
--- a/mercurial/merge.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/merge.py	Tue Apr 18 12:24:34 2017 -0400
@@ -27,6 +27,7 @@
     copies,
     error,
     filemerge,
+    match as matchmod,
     obsolete,
     pycompat,
     scmutil,
@@ -123,7 +124,7 @@
             self._mdstate = 's'
         else:
             self._mdstate = 'u'
-        shutil.rmtree(self._repo.join('merge'), True)
+        shutil.rmtree(self._repo.vfs.join('merge'), True)
         self._results = {}
         self._dirty = False
 
@@ -314,13 +315,15 @@
     @util.propertycache
     def localctx(self):
         if self._local is None:
-            raise RuntimeError("localctx accessed but self._local isn't set")
+            msg = "localctx accessed but self._local isn't set"
+            raise error.ProgrammingError(msg)
         return self._repo[self._local]
 
     @util.propertycache
     def otherctx(self):
         if self._other is None:
-            raise RuntimeError("otherctx accessed but self._other isn't set")
+            msg = "otherctx accessed but self._other isn't set"
+            raise error.ProgrammingError(msg)
         return self._repo[self._other]
 
     def active(self):
@@ -818,11 +821,10 @@
         if any(wctx.sub(s).dirty() for s in wctx.substate):
             m1['.hgsubstate'] = modifiednodeid
 
-    # Compare manifests
-    if matcher is not None:
-        m1 = m1.matches(matcher)
-        m2 = m2.matches(matcher)
-    diff = m1.diff(m2)
+    diff = m1.diff(m2, match=matcher)
+
+    if matcher is None:
+        matcher = matchmod.always('', '')
 
     actions = {}
     for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
@@ -927,7 +929,7 @@
                         # new file added in a directory that was moved
                         df = dirmove[d] + f[len(d):]
                         break
-                if df in m1:
+                if df is not None and df in m1:
                     actions[df] = ('m', (df, f, f, False, pa.node()),
                             "local directory rename - respect move from " + f)
                 elif acceptremote:
@@ -1060,8 +1062,7 @@
     yields tuples for progress updates
     """
     verbose = repo.ui.verbose
-    unlink = util.unlinkpath
-    wjoin = repo.wjoin
+    unlinkpath = repo.wvfs.unlinkpath
     audit = repo.wvfs.audit
     try:
         cwd = pycompat.getcwd()
@@ -1076,7 +1077,7 @@
             repo.ui.note(_("removing %s\n") % f)
         audit(f)
         try:
-            unlink(wjoin(f), ignoremissing=True)
+            unlinkpath(f, ignoremissing=True)
         except OSError as inst:
             repo.ui.warn(_("update failed to remove %s: %s!\n") %
                          (f, inst.strerror))
@@ -1190,7 +1191,7 @@
         if os.path.lexists(repo.wjoin(f)):
             repo.ui.debug("removing %s\n" % f)
             audit(f)
-            util.unlinkpath(repo.wjoin(f))
+            repo.wvfs.unlinkpath(f)
 
     numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
 
@@ -1247,7 +1248,7 @@
         repo.ui.note(_("moving %s to %s\n") % (f0, f))
         audit(f)
         repo.wwrite(f, wctx.filectx(f0).data(), flags)
-        util.unlinkpath(repo.wjoin(f0))
+        repo.wvfs.unlinkpath(f0)
         updated += 1
 
     # local directory rename, get
@@ -1444,11 +1445,12 @@
             repo.dirstate.normal(f)
 
 def update(repo, node, branchmerge, force, ancestor=None,
-           mergeancestor=False, labels=None, matcher=None, mergeforce=False):
+           mergeancestor=False, labels=None, matcher=None, mergeforce=False,
+           updatecheck=None):
     """
     Perform a merge between the working directory and the given node
 
-    node = the node to update to, or None if unspecified
+    node = the node to update to
     branchmerge = whether to merge between branches
     force = whether to force branch merging or file overwriting
     matcher = a matcher to filter file lists (dirstate not updated)
@@ -1464,34 +1466,47 @@
     The table below shows all the behaviors of the update command
     given the -c and -C or no options, whether the working directory
     is dirty, whether a revision is specified, and the relationship of
-    the parent rev to the target rev (linear, on the same named
-    branch, or on another named branch).
+    the parent rev to the target rev (linear or not). Match from top first. The
+    -n option doesn't exist on the command line, but represents the
+    experimental.updatecheck=noconflict option.
 
     This logic is tested by test-update-branches.t.
 
-    -c  -C  dirty  rev  |  linear      same    cross
-     n   n    n     n   |    ok        (1)       x
-     n   n    n     y   |    ok        ok       ok
-     n   n    y     n   |   merge      (2)      (2)
-     n   n    y     y   |   merge      (3)      (3)
-     n   y    *     *   |   discard   discard   discard
-     y   n    y     *   |    (4)       (4)      (4)
-     y   n    n     *   |    ok        ok       ok
-     y   y    *     *   |    (5)       (5)      (5)
+    -c  -C  -n  -m  dirty  rev  linear  |  result
+     y   y   *   *    *     *     *     |    (1)
+     y   *   y   *    *     *     *     |    (1)
+     y   *   *   y    *     *     *     |    (1)
+     *   y   y   *    *     *     *     |    (1)
+     *   y   *   y    *     *     *     |    (1)
+     *   *   y   y    *     *     *     |    (1)
+     *   *   *   *    *     n     n     |     x
+     *   *   *   *    n     *     *     |    ok
+     n   n   n   n    y     *     y     |   merge
+     n   n   n   n    y     y     n     |    (2)
+     n   n   n   y    y     *     *     |   merge
+     n   n   y   n    y     *     *     |  merge if no conflict
+     n   y   n   n    y     *     *     |  discard
+     y   n   n   n    y     *     *     |    (3)
 
     x = can't happen
     * = don't-care
-    1 = abort: not a linear update (merge or update --check to force update)
-    2 = abort: uncommitted changes (commit and merge, or update --clean to
-                 discard changes)
-    3 = abort: uncommitted changes (commit or update --clean to discard changes)
-    4 = abort: uncommitted changes (checked in commands.py)
-    5 = incompatible options (checked in commands.py)
+    1 = incompatible options (checked in commands.py)
+    2 = abort: uncommitted changes (commit or update --clean to discard changes)
+    3 = abort: uncommitted changes (checked in commands.py)
 
     Return the same tuple as applyupdates().
     """
 
-    onode = node
+    # This function used to find the default destination if node was None, but
+    # that's now in destutil.py.
+    assert node is not None
+    if not branchmerge and not force:
+        # TODO: remove the default once all callers that pass branchmerge=False
+        # and force=False pass a value for updatecheck. We may want to allow
+        # updatecheck='abort' to better suppport some of these callers.
+        if updatecheck is None:
+            updatecheck = 'linear'
+        assert updatecheck in ('none', 'linear', 'noconflict')
     # If we're doing a partial update, we need to skip updating
     # the dirstate, so make a note of any partial-ness to the
     # update here.
@@ -1531,7 +1546,7 @@
                 raise error.Abort(_("merging with a working directory ancestor"
                                    " has no effect"))
             elif pas == [p1]:
-                if not mergeancestor and p1.branch() == p2.branch():
+                if not mergeancestor and wc.branch() == p2.branch():
                     raise error.Abort(_("nothing to merge"),
                                      hint=_("use 'hg update' "
                                             "or check 'hg heads'"))
@@ -1548,39 +1563,33 @@
                 repo.hook('update', parent1=xp2, parent2='', error=0)
                 return 0, 0, 0, 0
 
-            if pas not in ([p1], [p2]):  # nonlinear
+            if (updatecheck == 'linear' and
+                    pas not in ([p1], [p2])):  # nonlinear
                 dirty = wc.dirty(missing=True)
-                if dirty or onode is None:
+                if dirty:
                     # Branching is a bit strange to ensure we do the minimal
-                    # amount of call to obsolete.background.
+                    # amount of call to obsolete.foreground.
                     foreground = obsolete.foreground(repo, [p1.node()])
                     # note: the <node> variable contains a random identifier
                     if repo[node].node() in foreground:
-                        pas = [p1]  # allow updating to successors
-                    elif dirty:
+                        pass # allow updating to successors
+                    else:
                         msg = _("uncommitted changes")
-                        if onode is None:
-                            hint = _("commit and merge, or update --clean to"
-                                     " discard changes")
-                        else:
-                            hint = _("commit or update --clean to discard"
-                                     " changes")
-                        raise error.Abort(msg, hint=hint)
-                    else:  # node is none
-                        msg = _("not a linear update")
-                        hint = _("merge or update --check to force update")
-                        raise error.Abort(msg, hint=hint)
+                        hint = _("commit or update --clean to discard changes")
+                        raise error.UpdateAbort(msg, hint=hint)
                 else:
                     # Allow jumping branches if clean and specific rev given
-                    pas = [p1]
+                    pass
+
+        if overwrite:
+            pas = [wc]
+        elif not branchmerge:
+            pas = [p1]
 
         # deprecated config: merge.followcopies
         followcopies = repo.ui.configbool('merge', 'followcopies', True)
         if overwrite:
-            pas = [wc]
             followcopies = False
-        elif pas == [p2]: # backwards
-            pas = [p1]
         elif not pas[0]:
             followcopies = False
         if not branchmerge and not wc.dirty(missing=True):
@@ -1591,6 +1600,13 @@
             repo, wc, p2, pas, branchmerge, force, mergeancestor,
             followcopies, matcher=matcher, mergeforce=mergeforce)
 
+        if updatecheck == 'noconflict':
+            for f, (m, args, msg) in actionbyfile.iteritems():
+                if m not in ('g', 'k', 'e', 'r'):
+                    msg = _("conflicting changes")
+                    hint = _("commit or update --clean to discard changes")
+                    raise error.Abort(msg, hint=hint)
+
         # Prompt and create actions. Most of this is in the resolve phase
         # already, but we can't handle .hgsubstate in filemerge or
         # subrepo.submerge yet so we have to keep prompting for it.
@@ -1664,7 +1680,7 @@
             repo.setparents(fp1, fp2)
             recordupdates(repo, actions, branchmerge)
             # update completed, clear state
-            util.unlink(repo.join('updatestate'))
+            util.unlink(repo.vfs.join('updatestate'))
 
             if not branchmerge:
                 repo.dirstate.setbranch(p2.branch())
--- a/mercurial/minirst.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/minirst.py	Tue Apr 18 12:24:34 2017 -0400
@@ -26,6 +26,7 @@
 from .i18n import _
 from . import (
     encoding,
+    pycompat,
     util,
 )
 
@@ -59,12 +60,12 @@
     # ASCII characters other than control/alphabet/digit as a part of
     # multi-bytes characters, so direct replacing with such characters
     # on strings in local encoding causes invalid byte sequences.
-    utext = text.decode(encoding.encoding)
+    utext = text.decode(pycompat.sysstr(encoding.encoding))
     for f, t in substs:
         utext = utext.replace(f.decode("ascii"), t.decode("ascii"))
-    return utext.encode(encoding.encoding)
+    return utext.encode(pycompat.sysstr(encoding.encoding))
 
-_blockre = re.compile(r"\n(?:\s*\n)+")
+_blockre = re.compile(br"\n(?:\s*\n)+")
 
 def findblocks(text):
     """Find continuous blocks of lines in text.
@@ -138,12 +139,12 @@
         i += 1
     return blocks
 
-_bulletre = re.compile(r'(-|[0-9A-Za-z]+\.|\(?[0-9A-Za-z]+\)|\|) ')
-_optionre = re.compile(r'^(-([a-zA-Z0-9]), )?(--[a-z0-9-]+)'
-                       r'((.*)  +)(.*)$')
-_fieldre = re.compile(r':(?![: ])([^:]*)(?<! ):[ ]+(.*)')
-_definitionre = re.compile(r'[^ ]')
-_tablere = re.compile(r'(=+\s+)*=+')
+_bulletre = re.compile(br'(\*|-|[0-9A-Za-z]+\.|\(?[0-9A-Za-z]+\)|\|) ')
+_optionre = re.compile(br'^(-([a-zA-Z0-9]), )?(--[a-z0-9-]+)'
+                       br'((.*)  +)(.*)$')
+_fieldre = re.compile(br':(?![: ])([^:]*)(?<! ):[ ]+(.*)')
+_definitionre = re.compile(br'[^ ]')
+_tablere = re.compile(br'(=+\s+)*=+')
 
 def splitparagraphs(blocks):
     """Split paragraphs into lists."""
@@ -286,7 +287,7 @@
         i += 1
     return blocks, pruned
 
-_sectionre = re.compile(r"""^([-=`:.'"~^_*+#])\1+$""")
+_sectionre = re.compile(br"""^([-=`:.'"~^_*+#])\1+$""")
 
 def findtables(blocks):
     '''Find simple tables
@@ -411,18 +412,20 @@
             i += 1
     return blocks
 
-_admonitionre = re.compile(r"\.\. (admonition|attention|caution|danger|"
-                           r"error|hint|important|note|tip|warning)::",
-                           flags=re.IGNORECASE)
 
-def findadmonitions(blocks):
+def findadmonitions(blocks, admonitions=None):
     """
     Makes the type of the block an admonition block if
     the first line is an admonition directive
     """
+    admonitions = admonitions or _admonitiontitles.keys()
+
+    admonitionre = re.compile(br'\.\. (%s)::' % '|'.join(sorted(admonitions)),
+                              flags=re.IGNORECASE)
+
     i = 0
     while i < len(blocks):
-        m = _admonitionre.match(blocks[i]['lines'][0])
+        m = admonitionre.match(blocks[i]['lines'][0])
         if m:
             blocks[i]['type'] = 'admonition'
             admonitiontitle = blocks[i]['lines'][0][3:m.end() - 2].lower()
@@ -436,15 +439,17 @@
         i = i + 1
     return blocks
 
-_admonitiontitles = {'attention': _('Attention:'),
-                     'caution': _('Caution:'),
-                     'danger': _('!Danger!')  ,
-                     'error': _('Error:'),
-                     'hint': _('Hint:'),
-                     'important': _('Important:'),
-                     'note': _('Note:'),
-                     'tip': _('Tip:'),
-                     'warning': _('Warning!')}
+_admonitiontitles = {
+    'attention': _('Attention:'),
+    'caution': _('Caution:'),
+    'danger': _('!Danger!'),
+    'error': _('Error:'),
+    'hint': _('Hint:'),
+    'important': _('Important:'),
+    'note': _('Note:'),
+    'tip': _('Tip:'),
+    'warning': _('Warning!'),
+}
 
 def formatoption(block, width):
     desc = ' '.join(map(str.strip, block['lines']))
@@ -533,7 +538,7 @@
     elif block['type'] == 'option':
         return formatoption(block, width)
 
-    text = ' '.join(map(str.strip, block['lines']))
+    text = ' '.join(map(bytes.strip, block['lines']))
     return util.wrap(text, width=width,
                      initindent=indent,
                      hangindent=subindent) + '\n'
@@ -596,7 +601,7 @@
             out.append(' <dt>%s\n <dd>%s\n' % (term, text))
         elif btype == 'bullet':
             bullet, head = lines[0].split(' ', 1)
-            if bullet == '-':
+            if bullet in ('*', '-'):
                 openlist('ul', level)
             else:
                 openlist('ol', level)
@@ -629,7 +634,7 @@
 
     return ''.join(out)
 
-def parse(text, indent=0, keep=None):
+def parse(text, indent=0, keep=None, admonitions=None):
     """Parse text into a list of blocks"""
     pruned = []
     blocks = findblocks(text)
@@ -644,7 +649,7 @@
     blocks = splitparagraphs(blocks)
     blocks = updatefieldlists(blocks)
     blocks = updateoptionlists(blocks)
-    blocks = findadmonitions(blocks)
+    blocks = findadmonitions(blocks, admonitions=admonitions)
     blocks = addmargins(blocks)
     blocks = prunecomments(blocks)
     return blocks, pruned
--- a/mercurial/obsolete.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/obsolete.py	Tue Apr 18 12:24:34 2017 -0400
@@ -552,6 +552,8 @@
                 pass
         return bool(self._all)
 
+    __bool__ = __nonzero__
+
     @property
     def readonly(self):
         """True if marker creation is disabled
@@ -1120,7 +1122,7 @@
     """the set of obsolete revisions"""
     obs = set()
     getnode = repo.changelog.node
-    notpublic = repo.revs("not public()")
+    notpublic = repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
     for r in notpublic:
         if getnode(r) in repo.obsstore.successors:
             obs.add(r)
--- a/mercurial/osutil.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/osutil.c	Tue Apr 18 12:24:34 2017 -0400
@@ -24,6 +24,14 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
+#ifdef HAVE_LINUX_STATFS
+#include <linux/magic.h>
+#include <sys/vfs.h>
+#endif
+#ifdef HAVE_BSD_STATFS
+#include <sys/mount.h>
+#include <sys/param.h>
+#endif
 #endif
 
 #ifdef __APPLE__
@@ -206,7 +214,7 @@
 	char *pattern;
 
 	/* build the path + \* pattern string */
-	pattern = malloc(plen + 3); /* path + \* + \0 */
+	pattern = PyMem_Malloc(plen + 3); /* path + \* + \0 */
 	if (!pattern) {
 		PyErr_NoMemory();
 		goto error_nomem;
@@ -269,7 +277,7 @@
 error_list:
 	FindClose(fh);
 error_file:
-	free(pattern);
+	PyMem_Free(pattern);
 error_nomem:
 	return rval;
 }
@@ -400,6 +408,8 @@
 	Py_XDECREF(stat);
 error_list:
 	closedir(dir);
+	/* closedir also closes its dirfd */
+	goto error_value;
 error_dir:
 #ifdef AT_SYMLINK_NOFOLLOW
 	close(dfd);
@@ -784,6 +794,323 @@
 }
 #endif /* ndef SETPROCNAME_USE_NONE */
 
+#if defined(HAVE_BSD_STATFS)
+static const char *describefstype(const struct statfs *pbuf)
+{
+	/* BSD or OSX provides a f_fstypename field */
+	return pbuf->f_fstypename;
+}
+#elif defined(HAVE_LINUX_STATFS)
+static const char *describefstype(const struct statfs *pbuf)
+{
+	/* Begin of Linux filesystems */
+#ifdef ADFS_SUPER_MAGIC
+	if (pbuf->f_type == ADFS_SUPER_MAGIC)
+		return "adfs";
+#endif
+#ifdef AFFS_SUPER_MAGIC
+	if (pbuf->f_type == AFFS_SUPER_MAGIC)
+		return "affs";
+#endif
+#ifdef AUTOFS_SUPER_MAGIC
+	if (pbuf->f_type == AUTOFS_SUPER_MAGIC)
+		return "autofs";
+#endif
+#ifdef BDEVFS_MAGIC
+	if (pbuf->f_type == BDEVFS_MAGIC)
+		return "bdevfs";
+#endif
+#ifdef BEFS_SUPER_MAGIC
+	if (pbuf->f_type == BEFS_SUPER_MAGIC)
+		return "befs";
+#endif
+#ifdef BFS_MAGIC
+	if (pbuf->f_type == BFS_MAGIC)
+		return "bfs";
+#endif
+#ifdef BINFMTFS_MAGIC
+	if (pbuf->f_type == BINFMTFS_MAGIC)
+		return "binfmtfs";
+#endif
+#ifdef BTRFS_SUPER_MAGIC
+	if (pbuf->f_type == BTRFS_SUPER_MAGIC)
+		return "btrfs";
+#endif
+#ifdef CGROUP_SUPER_MAGIC
+	if (pbuf->f_type == CGROUP_SUPER_MAGIC)
+		return "cgroup";
+#endif
+#ifdef CIFS_MAGIC_NUMBER
+	if (pbuf->f_type == CIFS_MAGIC_NUMBER)
+		return "cifs";
+#endif
+#ifdef CODA_SUPER_MAGIC
+	if (pbuf->f_type == CODA_SUPER_MAGIC)
+		return "coda";
+#endif
+#ifdef COH_SUPER_MAGIC
+	if (pbuf->f_type == COH_SUPER_MAGIC)
+		return "coh";
+#endif
+#ifdef CRAMFS_MAGIC
+	if (pbuf->f_type == CRAMFS_MAGIC)
+		return "cramfs";
+#endif
+#ifdef DEBUGFS_MAGIC
+	if (pbuf->f_type == DEBUGFS_MAGIC)
+		return "debugfs";
+#endif
+#ifdef DEVFS_SUPER_MAGIC
+	if (pbuf->f_type == DEVFS_SUPER_MAGIC)
+		return "devfs";
+#endif
+#ifdef DEVPTS_SUPER_MAGIC
+	if (pbuf->f_type == DEVPTS_SUPER_MAGIC)
+		return "devpts";
+#endif
+#ifdef EFIVARFS_MAGIC
+	if (pbuf->f_type == EFIVARFS_MAGIC)
+		return "efivarfs";
+#endif
+#ifdef EFS_SUPER_MAGIC
+	if (pbuf->f_type == EFS_SUPER_MAGIC)
+		return "efs";
+#endif
+#ifdef EXT_SUPER_MAGIC
+	if (pbuf->f_type == EXT_SUPER_MAGIC)
+		return "ext";
+#endif
+#ifdef EXT2_OLD_SUPER_MAGIC
+	if (pbuf->f_type == EXT2_OLD_SUPER_MAGIC)
+		return "ext2";
+#endif
+#ifdef EXT2_SUPER_MAGIC
+	if (pbuf->f_type == EXT2_SUPER_MAGIC)
+		return "ext2";
+#endif
+#ifdef EXT3_SUPER_MAGIC
+	if (pbuf->f_type == EXT3_SUPER_MAGIC)
+		return "ext3";
+#endif
+#ifdef EXT4_SUPER_MAGIC
+	if (pbuf->f_type == EXT4_SUPER_MAGIC)
+		return "ext4";
+#endif
+#ifdef F2FS_SUPER_MAGIC
+	if (pbuf->f_type == F2FS_SUPER_MAGIC)
+		return "f2fs";
+#endif
+#ifdef FUSE_SUPER_MAGIC
+	if (pbuf->f_type == FUSE_SUPER_MAGIC)
+		return "fuse";
+#endif
+#ifdef FUTEXFS_SUPER_MAGIC
+	if (pbuf->f_type == FUTEXFS_SUPER_MAGIC)
+		return "futexfs";
+#endif
+#ifdef HFS_SUPER_MAGIC
+	if (pbuf->f_type == HFS_SUPER_MAGIC)
+		return "hfs";
+#endif
+#ifdef HOSTFS_SUPER_MAGIC
+	if (pbuf->f_type == HOSTFS_SUPER_MAGIC)
+		return "hostfs";
+#endif
+#ifdef HPFS_SUPER_MAGIC
+	if (pbuf->f_type == HPFS_SUPER_MAGIC)
+		return "hpfs";
+#endif
+#ifdef HUGETLBFS_MAGIC
+	if (pbuf->f_type == HUGETLBFS_MAGIC)
+		return "hugetlbfs";
+#endif
+#ifdef ISOFS_SUPER_MAGIC
+	if (pbuf->f_type == ISOFS_SUPER_MAGIC)
+		return "isofs";
+#endif
+#ifdef JFFS2_SUPER_MAGIC
+	if (pbuf->f_type == JFFS2_SUPER_MAGIC)
+		return "jffs2";
+#endif
+#ifdef JFS_SUPER_MAGIC
+	if (pbuf->f_type == JFS_SUPER_MAGIC)
+		return "jfs";
+#endif
+#ifdef MINIX_SUPER_MAGIC
+	if (pbuf->f_type == MINIX_SUPER_MAGIC)
+		return "minix";
+#endif
+#ifdef MINIX2_SUPER_MAGIC
+	if (pbuf->f_type == MINIX2_SUPER_MAGIC)
+		return "minix2";
+#endif
+#ifdef MINIX3_SUPER_MAGIC
+	if (pbuf->f_type == MINIX3_SUPER_MAGIC)
+		return "minix3";
+#endif
+#ifdef MQUEUE_MAGIC
+	if (pbuf->f_type == MQUEUE_MAGIC)
+		return "mqueue";
+#endif
+#ifdef MSDOS_SUPER_MAGIC
+	if (pbuf->f_type == MSDOS_SUPER_MAGIC)
+		return "msdos";
+#endif
+#ifdef NCP_SUPER_MAGIC
+	if (pbuf->f_type == NCP_SUPER_MAGIC)
+		return "ncp";
+#endif
+#ifdef NFS_SUPER_MAGIC
+	if (pbuf->f_type == NFS_SUPER_MAGIC)
+		return "nfs";
+#endif
+#ifdef NILFS_SUPER_MAGIC
+	if (pbuf->f_type == NILFS_SUPER_MAGIC)
+		return "nilfs";
+#endif
+#ifdef NTFS_SB_MAGIC
+	if (pbuf->f_type == NTFS_SB_MAGIC)
+		return "ntfs-sb";
+#endif
+#ifdef OCFS2_SUPER_MAGIC
+	if (pbuf->f_type == OCFS2_SUPER_MAGIC)
+		return "ocfs2";
+#endif
+#ifdef OPENPROM_SUPER_MAGIC
+	if (pbuf->f_type == OPENPROM_SUPER_MAGIC)
+		return "openprom";
+#endif
+#ifdef OVERLAYFS_SUPER_MAGIC
+	if (pbuf->f_type == OVERLAYFS_SUPER_MAGIC)
+		return "overlay";
+#endif
+#ifdef PIPEFS_MAGIC
+	if (pbuf->f_type == PIPEFS_MAGIC)
+		return "pipefs";
+#endif
+#ifdef PROC_SUPER_MAGIC
+	if (pbuf->f_type == PROC_SUPER_MAGIC)
+		return "proc";
+#endif
+#ifdef PSTOREFS_MAGIC
+	if (pbuf->f_type == PSTOREFS_MAGIC)
+		return "pstorefs";
+#endif
+#ifdef QNX4_SUPER_MAGIC
+	if (pbuf->f_type == QNX4_SUPER_MAGIC)
+		return "qnx4";
+#endif
+#ifdef QNX6_SUPER_MAGIC
+	if (pbuf->f_type == QNX6_SUPER_MAGIC)
+		return "qnx6";
+#endif
+#ifdef RAMFS_MAGIC
+	if (pbuf->f_type == RAMFS_MAGIC)
+		return "ramfs";
+#endif
+#ifdef REISERFS_SUPER_MAGIC
+	if (pbuf->f_type == REISERFS_SUPER_MAGIC)
+		return "reiserfs";
+#endif
+#ifdef ROMFS_MAGIC
+	if (pbuf->f_type == ROMFS_MAGIC)
+		return "romfs";
+#endif
+#ifdef SECURITYFS_MAGIC
+	if (pbuf->f_type == SECURITYFS_MAGIC)
+		return "securityfs";
+#endif
+#ifdef SELINUX_MAGIC
+	if (pbuf->f_type == SELINUX_MAGIC)
+		return "selinux";
+#endif
+#ifdef SMACK_MAGIC
+	if (pbuf->f_type == SMACK_MAGIC)
+		return "smack";
+#endif
+#ifdef SMB_SUPER_MAGIC
+	if (pbuf->f_type == SMB_SUPER_MAGIC)
+		return "smb";
+#endif
+#ifdef SOCKFS_MAGIC
+	if (pbuf->f_type == SOCKFS_MAGIC)
+		return "sockfs";
+#endif
+#ifdef SQUASHFS_MAGIC
+	if (pbuf->f_type == SQUASHFS_MAGIC)
+		return "squashfs";
+#endif
+#ifdef SYSFS_MAGIC
+	if (pbuf->f_type == SYSFS_MAGIC)
+		return "sysfs";
+#endif
+#ifdef SYSV2_SUPER_MAGIC
+	if (pbuf->f_type == SYSV2_SUPER_MAGIC)
+		return "sysv2";
+#endif
+#ifdef SYSV4_SUPER_MAGIC
+	if (pbuf->f_type == SYSV4_SUPER_MAGIC)
+		return "sysv4";
+#endif
+#ifdef TMPFS_MAGIC
+	if (pbuf->f_type == TMPFS_MAGIC)
+		return "tmpfs";
+#endif
+#ifdef UDF_SUPER_MAGIC
+	if (pbuf->f_type == UDF_SUPER_MAGIC)
+		return "udf";
+#endif
+#ifdef UFS_MAGIC
+	if (pbuf->f_type == UFS_MAGIC)
+		return "ufs";
+#endif
+#ifdef USBDEVICE_SUPER_MAGIC
+	if (pbuf->f_type == USBDEVICE_SUPER_MAGIC)
+		return "usbdevice";
+#endif
+#ifdef V9FS_MAGIC
+	if (pbuf->f_type == V9FS_MAGIC)
+		return "v9fs";
+#endif
+#ifdef VXFS_SUPER_MAGIC
+	if (pbuf->f_type == VXFS_SUPER_MAGIC)
+		return "vxfs";
+#endif
+#ifdef XENFS_SUPER_MAGIC
+	if (pbuf->f_type == XENFS_SUPER_MAGIC)
+		return "xenfs";
+#endif
+#ifdef XENIX_SUPER_MAGIC
+	if (pbuf->f_type == XENIX_SUPER_MAGIC)
+		return "xenix";
+#endif
+#ifdef XFS_SUPER_MAGIC
+	if (pbuf->f_type == XFS_SUPER_MAGIC)
+		return "xfs";
+#endif
+	/* End of Linux filesystems */
+	return NULL;
+}
+#endif /* def HAVE_LINUX_STATFS */
+
+#if defined(HAVE_BSD_STATFS) || defined(HAVE_LINUX_STATFS)
+/* given a directory path, return filesystem type name (best-effort) */
+static PyObject *getfstype(PyObject *self, PyObject *args)
+{
+	const char *path = NULL;
+	struct statfs buf;
+	int r;
+	if (!PyArg_ParseTuple(args, "s", &path))
+		return NULL;
+
+	memset(&buf, 0, sizeof(buf));
+	r = statfs(path, &buf);
+	if (r != 0)
+		return PyErr_SetFromErrno(PyExc_OSError);
+	return Py_BuildValue("s", describefstype(&buf));
+}
+#endif /* defined(HAVE_LINUX_STATFS) || defined(HAVE_BSD_STATFS) */
+
 #endif /* ndef _WIN32 */
 
 static PyObject *listdir(PyObject *self, PyObject *args, PyObject *kwargs)
@@ -960,6 +1287,10 @@
 	{"setprocname", (PyCFunction)setprocname, METH_VARARGS,
 	 "set process title (best-effort)\n"},
 #endif
+#if defined(HAVE_BSD_STATFS) || defined(HAVE_LINUX_STATFS)
+	{"getfstype", (PyCFunction)getfstype, METH_VARARGS,
+	 "get filesystem type (best-effort)\n"},
+#endif
 #endif /* ndef _WIN32 */
 #ifdef __APPLE__
 	{
--- a/mercurial/parser.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/parser.py	Tue Apr 18 12:24:34 2017 -0400
@@ -19,7 +19,10 @@
 from __future__ import absolute_import
 
 from .i18n import _
-from . import error
+from . import (
+    error,
+    util,
+)
 
 class parser(object):
     def __init__(self, elements, methods=None):
@@ -91,52 +94,67 @@
         return t
 
 def splitargspec(spec):
-    """Parse spec of function arguments into (poskeys, varkey, keys)
+    """Parse spec of function arguments into (poskeys, varkey, keys, optkey)
 
     >>> splitargspec('')
-    ([], None, [])
+    ([], None, [], None)
     >>> splitargspec('foo bar')
-    ([], None, ['foo', 'bar'])
-    >>> splitargspec('foo *bar baz')
-    (['foo'], 'bar', ['baz'])
+    ([], None, ['foo', 'bar'], None)
+    >>> splitargspec('foo *bar baz **qux')
+    (['foo'], 'bar', ['baz'], 'qux')
     >>> splitargspec('*foo')
-    ([], 'foo', [])
+    ([], 'foo', [], None)
+    >>> splitargspec('**foo')
+    ([], None, [], 'foo')
     """
-    pre, sep, post = spec.partition('*')
+    optkey = None
+    pre, sep, post = spec.partition('**')
+    if sep:
+        posts = post.split()
+        if not posts:
+            raise error.ProgrammingError('no **optkey name provided')
+        if len(posts) > 1:
+            raise error.ProgrammingError('excessive **optkey names provided')
+        optkey = posts[0]
+
+    pre, sep, post = pre.partition('*')
     pres = pre.split()
     posts = post.split()
     if sep:
         if not posts:
             raise error.ProgrammingError('no *varkey name provided')
-        return pres, posts[0], posts[1:]
-    return [], None, pres
+        return pres, posts[0], posts[1:], optkey
+    return [], None, pres, optkey
 
 def buildargsdict(trees, funcname, argspec, keyvaluenode, keynode):
     """Build dict from list containing positional and keyword arguments
 
-    Arguments are specified by a tuple of ``(poskeys, varkey, keys)`` where
+    Arguments are specified by a tuple of ``(poskeys, varkey, keys, optkey)``
+    where
 
     - ``poskeys``: list of names of positional arguments
     - ``varkey``: optional argument name that takes up remainder
     - ``keys``: list of names that can be either positional or keyword arguments
+    - ``optkey``: optional argument name that takes up excess keyword arguments
 
     If ``varkey`` specified, all ``keys`` must be given as keyword arguments.
 
     Invalid keywords, too few positional arguments, or too many positional
     arguments are rejected, but missing keyword arguments are just omitted.
     """
-    poskeys, varkey, keys = argspec
+    poskeys, varkey, keys, optkey = argspec
     kwstart = next((i for i, x in enumerate(trees) if x[0] == keyvaluenode),
                    len(trees))
     if kwstart < len(poskeys):
         raise error.ParseError(_("%(func)s takes at least %(nargs)d positional "
                                  "arguments")
                                % {'func': funcname, 'nargs': len(poskeys)})
-    if not varkey and len(trees) > len(poskeys) + len(keys):
-        raise error.ParseError(_("%(func)s takes at most %(nargs)d arguments")
+    if not varkey and kwstart > len(poskeys) + len(keys):
+        raise error.ParseError(_("%(func)s takes at most %(nargs)d positional "
+                                 "arguments")
                                % {'func': funcname,
                                   'nargs': len(poskeys) + len(keys)})
-    args = {}
+    args = util.sortdict()
     # consume positional arguments
     for k, x in zip(poskeys, trees[:kwstart]):
         args[k] = x
@@ -146,25 +164,31 @@
         for k, x in zip(keys, trees[len(args):kwstart]):
             args[k] = x
     # remainder should be keyword arguments
+    if optkey:
+        args[optkey] = util.sortdict()
     for x in trees[kwstart:]:
         if x[0] != keyvaluenode or x[1][0] != keynode:
             raise error.ParseError(_("%(func)s got an invalid argument")
                                    % {'func': funcname})
         k = x[1][1]
-        if k not in keys:
+        if k in keys:
+            d = args
+        elif not optkey:
             raise error.ParseError(_("%(func)s got an unexpected keyword "
                                      "argument '%(key)s'")
                                    % {'func': funcname, 'key': k})
-        if k in args:
+        else:
+            d = args[optkey]
+        if k in d:
             raise error.ParseError(_("%(func)s got multiple values for keyword "
                                      "argument '%(key)s'")
                                    % {'func': funcname, 'key': k})
-        args[k] = x[2]
+        d[k] = x[2]
     return args
 
 def unescapestr(s):
     try:
-        return s.decode("string_escape")
+        return util.unescapestr(s)
     except ValueError as e:
         # mangle Python's exception into our format
         raise error.ParseError(str(e).lower())
@@ -265,7 +289,7 @@
     """Compose error message from specified ParseError object
     """
     if len(inst.args) > 1:
-        return _('at %s: %s') % (inst.args[1], inst.args[0])
+        return _('at %d: %s') % (inst.args[1], inst.args[0])
     else:
         return inst.args[0]
 
--- a/mercurial/parsers.c	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/parsers.c	Tue Apr 18 12:24:34 2017 -0400
@@ -560,11 +560,11 @@
 }
 
 /*
- * Build a set of non-normal entries from the dirstate dmap
+ * Build a set of non-normal and other parent entries from the dirstate dmap
 */
-static PyObject *nonnormalentries(PyObject *self, PyObject *args)
-{
-	PyObject *dmap, *nonnset = NULL, *fname, *v;
+static PyObject *nonnormalotherparententries(PyObject *self, PyObject *args) {
+	PyObject *dmap, *fname, *v;
+	PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
 	Py_ssize_t pos;
 
 	if (!PyArg_ParseTuple(args, "O!:nonnormalentries",
@@ -575,6 +575,10 @@
 	if (nonnset == NULL)
 		goto bail;
 
+	otherpset = PySet_New(NULL);
+	if (otherpset == NULL)
+		goto bail;
+
 	pos = 0;
 	while (PyDict_Next(dmap, &pos, &fname, &v)) {
 		dirstateTupleObject *t;
@@ -585,15 +589,28 @@
 		}
 		t = (dirstateTupleObject *)v;
 
+		if (t->state == 'n' && t->size == -2) {
+			if (PySet_Add(otherpset, fname) == -1) {
+				goto bail;
+			}
+		}
+
 		if (t->state == 'n' && t->mtime != -1)
 			continue;
 		if (PySet_Add(nonnset, fname) == -1)
 			goto bail;
 	}
 
-	return nonnset;
+	result = Py_BuildValue("(OO)", nonnset, otherpset);
+	if (result == NULL)
+		goto bail;
+	Py_DECREF(nonnset);
+	Py_DECREF(otherpset);
+	return result;
 bail:
 	Py_XDECREF(nonnset);
+	Py_XDECREF(otherpset);
+	Py_XDECREF(result);
 	return NULL;
 }
 
@@ -800,8 +817,8 @@
 {
 	if (self->inlined && pos > 0) {
 		if (self->offsets == NULL) {
-			self->offsets = malloc(self->raw_length *
-					       sizeof(*self->offsets));
+			self->offsets = PyMem_Malloc(self->raw_length *
+					             sizeof(*self->offsets));
 			if (self->offsets == NULL)
 				return (const char *)PyErr_NoMemory();
 			inline_scan(self, self->offsets);
@@ -1014,7 +1031,7 @@
 		self->cache = NULL;
 	}
 	if (self->offsets) {
-		free(self->offsets);
+		PyMem_Free(self->offsets);
 		self->offsets = NULL;
 	}
 	if (self->nt) {
@@ -2149,7 +2166,7 @@
 	int *revs;
 
 	argcount = PySequence_Length(args);
-	revs = malloc(argcount * sizeof(*revs));
+	revs = PyMem_Malloc(argcount * sizeof(*revs));
 	if (argcount > 0 && revs == NULL)
 		return PyErr_NoMemory();
 	len = index_length(self) - 1;
@@ -2220,11 +2237,11 @@
 		goto bail;
 
 done:
-	free(revs);
+	PyMem_Free(revs);
 	return ret;
 
 bail:
-	free(revs);
+	PyMem_Free(revs);
 	Py_XDECREF(ret);
 	return NULL;
 }
@@ -2722,6 +2739,7 @@
 		data += nparents * hashwidth;
 	} else {
 		parents = Py_None;
+		Py_INCREF(parents);
 	}
 
 	if (data + 2 * nmetadata > dataend) {
@@ -2764,8 +2782,7 @@
 	Py_XDECREF(prec);
 	Py_XDECREF(succs);
 	Py_XDECREF(metadata);
-	if (parents != Py_None)
-		Py_XDECREF(parents);
+	Py_XDECREF(parents);
 	return ret;
 }
 
@@ -2814,8 +2831,9 @@
 
 static PyMethodDef methods[] = {
 	{"pack_dirstate", pack_dirstate, METH_VARARGS, "pack a dirstate\n"},
-	{"nonnormalentries", nonnormalentries, METH_VARARGS,
-	"create a set containing non-normal entries of given dirstate\n"},
+	{"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
+	"create a set containing non-normal and other parent entries of given "
+	"dirstate\n"},
 	{"parse_manifest", parse_manifest, METH_VARARGS, "parse a manifest\n"},
 	{"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
 	{"parse_index2", parse_index2, METH_VARARGS, "parse a revlog index\n"},
--- a/mercurial/patch.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/patch.py	Tue Apr 18 12:24:34 2017 -0400
@@ -34,14 +34,16 @@
     mail,
     mdiff,
     pathutil,
+    pycompat,
     scmutil,
     similar,
     util,
+    vfs as vfsmod,
 )
 stringio = util.stringio
 
-gitre = re.compile('diff --git a/(.*) b/(.*)')
-tabsplitter = re.compile(r'(\t+|[^\t]+)')
+gitre = re.compile(br'diff --git a/(.*) b/(.*)')
+tabsplitter = re.compile(br'(\t+|[^\t]+)')
 
 class PatchError(Exception):
     pass
@@ -209,7 +211,7 @@
 
     data = {}
     fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
-    tmpfp = os.fdopen(fd, 'w')
+    tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
     try:
         msg = email.Parser.Parser().parse(fileobj)
 
@@ -448,7 +450,7 @@
 class fsbackend(abstractbackend):
     def __init__(self, ui, basedir):
         super(fsbackend, self).__init__(ui)
-        self.opener = scmutil.opener(basedir)
+        self.opener = vfsmod.vfs(basedir)
 
     def _join(self, f):
         return os.path.join(self.opener.base, f)
@@ -559,7 +561,7 @@
         else:
             if self.opener is None:
                 root = tempfile.mkdtemp(prefix='hg-patch-')
-                self.opener = scmutil.opener(root)
+                self.opener = vfsmod.vfs(root)
             # Avoid filename issues with these simple names
             fn = str(self.created)
             self.opener.write(fn, data)
@@ -735,7 +737,7 @@
         for x in self.rej:
             for l in x.hunk:
                 lines.append(l)
-                if l[-1] != '\n':
+                if l[-1:] != '\n':
                     lines.append("\n\ No newline at end of file\n")
         self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
 
@@ -1055,7 +1057,7 @@
                 ncpatchfp = None
                 try:
                     # Write the initial patch
-                    f = os.fdopen(patchfd, "w")
+                    f = os.fdopen(patchfd, pycompat.sysstr("w"))
                     chunk.header.write(f)
                     chunk.write(f)
                     f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
@@ -1063,7 +1065,8 @@
                     # Start the editor and wait for it to complete
                     editor = ui.geteditor()
                     ret = ui.system("%s \"%s\"" % (editor, patchfn),
-                                    environ={'HGUSER': ui.username()})
+                                    environ={'HGUSER': ui.username()},
+                                    blockedtag='filterpatch')
                     if ret != 0:
                         ui.warn(_("editor exited with exit code %d\n") % ret)
                         continue
@@ -2207,13 +2210,15 @@
                                             'ignoreblanklines')
     if formatchanging:
         buildopts['text'] = opts and opts.get('text')
-        buildopts['nobinary'] = get('nobinary', forceplain=False)
+        binary = None if opts is None else opts.get('binary')
+        buildopts['nobinary'] = (not binary if binary is not None
+                                 else get('nobinary', forceplain=False))
         buildopts['noprefix'] = get('noprefix', forceplain=False)
 
-    return mdiff.diffopts(**buildopts)
+    return mdiff.diffopts(**pycompat.strkwargs(buildopts))
 
-def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
-         losedatafn=None, prefix='', relroot='', copy=None):
+def diff(repo, node1=None, node2=None, match=None, changes=None,
+         opts=None, losedatafn=None, prefix='', relroot='', copy=None):
     '''yields diff of changes to files between two nodes, or node and
     working directory.
 
@@ -2236,6 +2241,24 @@
 
     copy, if not empty, should contain mappings {dst@y: src@x} of copy
     information.'''
+    for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
+                                   changes=changes, opts=opts,
+                                   losedatafn=losedatafn, prefix=prefix,
+                                   relroot=relroot, copy=copy):
+        text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
+        if header and (text or len(header) > 1):
+            yield '\n'.join(header) + '\n'
+        if text:
+            yield text
+
+def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
+              opts=None, losedatafn=None, prefix='', relroot='', copy=None):
+    """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
+    where `header` is a list of diff headers and `hunks` is an iterable of
+    (`hunkrange`, `hunklines`) tuples.
+
+    See diff() for the meaning of parameters.
+    """
 
     if opts is None:
         opts = mdiff.defaultopts
@@ -2531,11 +2554,12 @@
         elif revs and not repo.ui.quiet:
             header.append(diffline(path1, revs))
 
-        if binary and opts.git and not opts.nobinary:
+        if binary and opts.git and not opts.nobinary and not opts.text:
             text = mdiff.b85diff(content1, content2)
             if text:
                 header.append('index %s..%s' %
                               (gitindex(content1), gitindex(content2)))
+            hunks = (None, [text]),
         else:
             if opts.git and opts.index > 0:
                 flag = flag1
@@ -2546,13 +2570,11 @@
                                gitindex(content2)[0:opts.index],
                                gitmode[flag]))
 
-            text = mdiff.unidiff(content1, date1,
-                                 content2, date2,
-                                 path1, path2, opts=opts)
-        if header and (text or len(header) > 1):
-            yield '\n'.join(header) + '\n'
-        if text:
-            yield text
+            uheaders, hunks = mdiff.unidiff(content1, date1,
+                                            content2, date2,
+                                            path1, path2, opts=opts)
+            header.extend(uheaders)
+        yield header, hunks
 
 def diffstatsum(stats):
     maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
--- a/mercurial/phases.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/phases.py	Tue Apr 18 12:24:34 2017 -0400
@@ -113,8 +113,10 @@
     short,
 )
 from . import (
-    encoding,
     error,
+    smartset,
+    txnutil,
+    util,
 )
 
 allphases = public, draft, secret = range(3)
@@ -136,15 +138,7 @@
     dirty = False
     roots = [set() for i in allphases]
     try:
-        f = None
-        if 'HG_PENDING' in encoding.environ:
-            try:
-                f = repo.svfs('phaseroots.pending')
-            except IOError as inst:
-                if inst.errno != errno.ENOENT:
-                    raise
-        if f is None:
-            f = repo.svfs('phaseroots')
+        f, pending = txnutil.trypending(repo.root, repo.svfs, 'phaseroots')
         try:
             for line in f:
                 phase, nh = line.split()
@@ -170,6 +164,27 @@
             self.filterunknown(repo)
             self.opener = repo.svfs
 
+    def getrevset(self, repo, phases):
+        """return a smartset for the given phases"""
+        self.loadphaserevs(repo) # ensure phase's sets are loaded
+
+        if self._phasesets and all(self._phasesets[p] is not None
+                                   for p in phases):
+            # fast path - use _phasesets
+            revs = self._phasesets[phases[0]]
+            if len(phases) > 1:
+                revs = revs.copy() # only copy when needed
+                for p in phases[1:]:
+                    revs.update(self._phasesets[p])
+            if repo.changelog.filteredrevs:
+                revs = revs - repo.changelog.filteredrevs
+            return smartset.baseset(revs)
+        else:
+            # slow path - enumerate all revisions
+            phase = self.phase
+            revs = (r for r in repo if phase(repo, r) in phases)
+            return smartset.generatorset(revs, iterasc=True)
+
     def copy(self):
         # Shallow copy meant to ensure isolation in
         # advance/retractboundary(), nothing more.
@@ -199,7 +214,7 @@
         self._phaserevs = revs
         self._populatephaseroots(repo)
         for phase in trackedphases:
-            roots = map(repo.changelog.rev, self.phaseroots[phase])
+            roots = list(map(repo.changelog.rev, self.phaseroots[phase]))
             if roots:
                 for rev in roots:
                     revs[rev] = phase
@@ -210,12 +225,8 @@
         """ensure phase information is loaded in the object"""
         if self._phaserevs is None:
             try:
-                if repo.ui.configbool('experimental',
-                                      'nativephaseskillswitch'):
-                    self._computephaserevspure(repo)
-                else:
-                    res = self._getphaserevsnative(repo)
-                    self._phaserevs, self._phasesets = res
+                res = self._getphaserevsnative(repo)
+                self._phaserevs, self._phasesets = res
             except AttributeError:
                 self._computephaserevspure(repo)
 
@@ -376,7 +387,8 @@
 
 def listphases(repo):
     """List phases root for serialization over pushkey"""
-    keys = {}
+    # Use ordered dictionary so behavior is deterministic.
+    keys = util.sortdict()
     value = '%i' % draft
     for root in repo._phasecache.phaseroots[draft]:
         keys[hex(root)] = value
--- a/mercurial/policy.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/policy.py	Tue Apr 18 12:24:34 2017 -0400
@@ -19,9 +19,9 @@
 #    py - only load pure Python modules
 #
 # By default, require the C extensions for performance reasons.
-policy = 'c'
-policynoc = ('cffi', 'cffi-allow', 'py')
-policynocffi = ('c', 'py')
+policy = b'c'
+policynoc = (b'cffi', b'cffi-allow', b'py')
+policynocffi = (b'c', b'py')
 
 try:
     from . import __modulepolicy__
@@ -39,7 +39,11 @@
 # Our C extensions aren't yet compatible with Python 3. So use pure Python
 # on Python 3 for now.
 if sys.version_info[0] >= 3:
-    policy = 'py'
+    policy = b'py'
 
 # Environment variable can always force settings.
-policy = os.environ.get('HGMODULEPOLICY', policy)
+if sys.version_info[0] >= 3:
+    if 'HGMODULEPOLICY' in os.environ:
+        policy = os.environ['HGMODULEPOLICY'].encode('utf-8')
+else:
+    policy = os.environ.get('HGMODULEPOLICY', policy)
--- a/mercurial/posix.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/posix.py	Tue Apr 18 12:24:34 2017 -0400
@@ -105,7 +105,7 @@
             fp = open(f)
             data = fp.read()
             fp.close()
-            os.unlink(f)
+            unlink(f)
             try:
                 os.symlink(data, f)
             except OSError:
@@ -118,7 +118,7 @@
     if stat.S_ISLNK(s):
         # switch link to file
         data = os.readlink(f)
-        os.unlink(f)
+        unlink(f)
         fp = open(f, "w")
         fp.write(data)
         fp.close()
@@ -181,15 +181,15 @@
                     except OSError as e:
                         if e.errno != errno.ENOENT:
                             raise
-                        file(checknoexec, 'w').close() # might fail
+                        open(checknoexec, 'w').close() # might fail
                         m = os.stat(checknoexec).st_mode
                     if m & EXECFLAGS == 0:
                         # check-exec is exec and check-no-exec is not exec
                         return True
                     # checknoexec exists but is exec - delete it
-                    os.unlink(checknoexec)
+                    unlink(checknoexec)
                 # checkisexec exists but is not exec - delete it
-                os.unlink(checkisexec)
+                unlink(checkisexec)
 
             # check using one file, leave it as checkisexec
             checkdir = cachedir
@@ -210,7 +210,7 @@
                     return True
         finally:
             if fn is not None:
-                os.unlink(fn)
+                unlink(fn)
     except (IOError, OSError):
         # we don't care, the user probably won't be able to commit anyway
         return False
@@ -230,13 +230,16 @@
         else:
             checkdir = path
             cachedir = None
-        name = tempfile.mktemp(dir=checkdir, prefix='checklink-')
+        fscheckdir = pycompat.fsdecode(checkdir)
+        name = tempfile.mktemp(dir=fscheckdir,
+                               prefix=r'checklink-')
+        name = pycompat.fsencode(name)
         try:
             fd = None
             if cachedir is None:
-                fd = tempfile.NamedTemporaryFile(dir=checkdir,
-                                                 prefix='hg-checklink-')
-                target = os.path.basename(fd.name)
+                fd = tempfile.NamedTemporaryFile(dir=fscheckdir,
+                                                 prefix=r'hg-checklink-')
+                target = pycompat.fsencode(os.path.basename(fd.name))
             else:
                 # create a fixed file to link to; doesn't matter if it
                 # already exists.
@@ -245,12 +248,12 @@
             try:
                 os.symlink(target, name)
                 if cachedir is None:
-                    os.unlink(name)
+                    unlink(name)
                 else:
                     try:
                         os.rename(name, checklink)
                     except OSError:
-                        os.unlink(name)
+                        unlink(name)
                 return True
             except OSError as inst:
                 # link creation might race, try again
@@ -265,7 +268,7 @@
         except OSError as inst:
             # sshfs might report failure while successfully creating the link
             if inst[0] == errno.EIO and os.path.exists(name):
-                os.unlink(name)
+                unlink(name)
             return False
 
 def checkosfilename(path):
@@ -408,7 +411,7 @@
         return '"%s"' % s
     global _needsshellquote
     if _needsshellquote is None:
-        _needsshellquote = re.compile(r'[^a-zA-Z0-9._/+-]').search
+        _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search
     if s and not _needsshellquote(s):
         # "s" shouldn't have to be quoted
         return s
@@ -533,19 +536,6 @@
 def makedir(path, notindexed):
     os.mkdir(path)
 
-def unlinkpath(f, ignoremissing=False):
-    """unlink and remove the directory if it is empty"""
-    try:
-        os.unlink(f)
-    except OSError as e:
-        if not (ignoremissing and e.errno == errno.ENOENT):
-            raise
-    # try removing directories that might now be empty
-    try:
-        os.removedirs(os.path.dirname(f))
-    except OSError:
-        pass
-
 def lookupreg(key, name=None, scope=None):
     return None
 
--- a/mercurial/profiling.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/profiling.py	Tue Apr 18 12:24:34 2017 -0400
@@ -8,7 +8,6 @@
 from __future__ import absolute_import, print_function
 
 import contextlib
-import time
 
 from .i18n import _
 from . import (
@@ -66,7 +65,7 @@
     collapse_recursion = True
     thread = flamegraph.ProfileThread(fp, 1.0 / freq,
                                       filter_, collapse_recursion)
-    start_time = time.clock()
+    start_time = util.timer()
     try:
         thread.start()
         yield
@@ -74,7 +73,7 @@
         thread.stop()
         thread.join()
         print('Collected %d stack frames (%d unique) in %2.2f seconds.' % (
-            time.clock() - start_time, thread.num_frames(),
+            util.timer() - start_time, thread.num_frames(),
             thread.num_frames(unique=True)))
 
 @contextlib.contextmanager
@@ -103,6 +102,7 @@
             'bymethod': statprof.DisplayFormats.ByMethod,
             'hotpath': statprof.DisplayFormats.Hotpath,
             'json': statprof.DisplayFormats.Json,
+            'chrome': statprof.DisplayFormats.Chrome,
         }
 
         if profformat in formats:
@@ -111,7 +111,23 @@
             ui.warn(_('unknown profiler output format: %s\n') % profformat)
             displayformat = statprof.DisplayFormats.Hotpath
 
-        statprof.display(fp, data=data, format=displayformat)
+        kwargs = {}
+
+        def fraction(s):
+            if s.endswith('%'):
+                v = float(s[:-1]) / 100
+            else:
+                v = float(s)
+            if 0 <= v <= 1:
+                return v
+            raise ValueError(s)
+
+        if profformat == 'chrome':
+            showmin = ui.configwith(fraction, 'profiling', 'showmin', 0.005)
+            showmax = ui.configwith(fraction, 'profiling', 'showmax', 0.999)
+            kwargs.update(minthreshold=showmin, maxthreshold=showmax)
+
+        statprof.display(fp, data=data, format=displayformat, **kwargs)
 
 @contextlib.contextmanager
 def profile(ui):
--- a/mercurial/progress.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/progress.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,6 +7,7 @@
 
 from __future__ import absolute_import
 
+import errno
 import threading
 import time
 
@@ -60,6 +61,24 @@
     # i18n: format X years and YY weeks as "XyYYw"
     return _("%dy%02dw") % (years, weeks)
 
+# file_write() and file_flush() of Python 2 do not restart on EINTR if
+# the file is attached to a "slow" device (e.g. a terminal) and raise
+# IOError. We cannot know how many bytes would be written by file_write(),
+# but a progress text is known to be short enough to be written by a
+# single write() syscall, so we can just retry file_write() with the whole
+# text. (issue5532)
+#
+# This should be a short-term workaround. We'll need to fix every occurrence
+# of write() to a terminal or pipe.
+def _eintrretry(func, *args):
+    while True:
+        try:
+            return func(*args)
+        except IOError as err:
+            if err.errno == errno.EINTR:
+                continue
+            raise
+
 class progbar(object):
     def __init__(self, ui):
         self.ui = ui
@@ -157,14 +176,14 @@
             out = spacejoin(head, prog, tail)
         else:
             out = spacejoin(head, tail)
-        self.ui.ferr.write('\r' + encoding.trim(out, termwidth))
+        self._writeerr('\r' + encoding.trim(out, termwidth))
         self.lasttopic = topic
-        self.ui.ferr.flush()
+        self._flusherr()
 
     def clear(self):
         if not self.printed or not self.lastprint or not shouldprint(self.ui):
             return
-        self.ui.ferr.write('\r%s\r' % (' ' * self.width()))
+        self._writeerr('\r%s\r' % (' ' * self.width()))
         if self.printed:
             # force immediate re-paint of progress bar
             self.lastprint = 0
@@ -175,8 +194,14 @@
         if self.ui.configbool('progress', 'clear-complete', default=True):
             self.clear()
         else:
-            self.ui.ferr.write('\n')
-        self.ui.ferr.flush()
+            self._writeerr('\n')
+        self._flusherr()
+
+    def _flusherr(self):
+        _eintrretry(self.ui.ferr.flush)
+
+    def _writeerr(self, msg):
+        _eintrretry(self.ui.ferr.write, msg)
 
     def width(self):
         tw = self.ui.termwidth()
--- a/mercurial/pure/bdiff.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/pure/bdiff.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import array
 import difflib
 import re
 import struct
@@ -55,15 +54,9 @@
     r.append(prev)
     return r
 
-def _tostring(c):
-    if type(c) is array.array:
-        # this copy overhead isn't ideal
-        return c.tostring()
-    return str(c)
-
 def bdiff(a, b):
-    a = _tostring(a).splitlines(True)
-    b = _tostring(b).splitlines(True)
+    a = bytes(a).splitlines(True)
+    b = bytes(b).splitlines(True)
 
     if not a:
         s = "".join(b)
--- a/mercurial/pure/osutil.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/pure/osutil.py	Tue Apr 18 12:24:34 2017 -0400
@@ -338,12 +338,12 @@
                 _kernel32.CloseHandle(fh)
                 _raiseioerror(name)
 
-            f = os.fdopen(fd, mode, bufsize)
+            f = os.fdopen(fd, pycompat.sysstr(mode), bufsize)
             # unfortunately, f.name is '<fdopen>' at this point -- so we store
             # the name on this wrapper. We cannot just assign to f.name,
             # because that attribute is read-only.
-            object.__setattr__(self, 'name', name)
-            object.__setattr__(self, '_file', f)
+            object.__setattr__(self, r'name', name)
+            object.__setattr__(self, r'_file', f)
 
         def __iter__(self):
             return self._file
--- a/mercurial/pure/parsers.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/pure/parsers.py	Tue Apr 18 12:24:34 2017 -0400
@@ -14,6 +14,7 @@
 from . import pycompat
 stringio = pycompat.stringio
 
+
 _pack = struct.pack
 _unpack = struct.unpack
 _compress = zlib.compress
@@ -34,7 +35,7 @@
     return int(q & 0xFFFF)
 
 def offset_type(offset, type):
-    return long(long(offset) << 16 | type)
+    return int(int(offset) << 16 | type)
 
 class BaseIndexObject(object):
     def __len__(self):
--- a/mercurial/pycompat.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/pycompat.py	Tue Apr 18 12:24:34 2017 -0400
@@ -18,30 +18,33 @@
 ispy3 = (sys.version_info[0] >= 3)
 
 if not ispy3:
+    import cookielib
     import cPickle as pickle
-    import cStringIO as io
     import httplib
     import Queue as _queue
     import SocketServer as socketserver
-    import urlparse
-    urlunquote = urlparse.unquote
     import xmlrpclib
 else:
+    import http.cookiejar as cookielib
     import http.client as httplib
-    import io
     import pickle
     import queue as _queue
     import socketserver
-    import urllib.parse as urlparse
-    urlunquote = urlparse.unquote_to_bytes
     import xmlrpc.client as xmlrpclib
 
+def identity(a):
+    return a
+
 if ispy3:
     import builtins
     import functools
+    import io
+    import struct
+
     fsencode = os.fsencode
     fsdecode = os.fsdecode
     # A bytes version of os.name.
+    oslinesep = os.linesep.encode('ascii')
     osname = os.name.encode('ascii')
     ospathsep = os.pathsep.encode('ascii')
     ossep = os.sep.encode('ascii')
@@ -55,6 +58,8 @@
     sysexecutable = sys.executable
     if sysexecutable:
         sysexecutable = os.fsencode(sysexecutable)
+    stringio = io.BytesIO
+    maplist = lambda *args: list(map(*args))
 
     # TODO: .buffer might not exist if std streams were replaced; we'll need
     # a silly wrapper to make a bytes stream backed by a unicode one.
@@ -72,6 +77,81 @@
     if getattr(sys, 'argv', None) is not None:
         sysargv = list(map(os.fsencode, sys.argv))
 
+    bytechr = struct.Struct('>B').pack
+
+    class bytestr(bytes):
+        """A bytes which mostly acts as a Python 2 str
+
+        >>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1)
+        (b'', b'foo', b'ascii', b'1')
+        >>> s = bytestr(b'foo')
+        >>> assert s is bytestr(s)
+
+        There's no implicit conversion from non-ascii str as its encoding is
+        unknown:
+
+        >>> bytestr(chr(0x80)) # doctest: +ELLIPSIS
+        Traceback (most recent call last):
+          ...
+        UnicodeEncodeError: ...
+
+        Comparison between bytestr and bytes should work:
+
+        >>> assert bytestr(b'foo') == b'foo'
+        >>> assert b'foo' == bytestr(b'foo')
+        >>> assert b'f' in bytestr(b'foo')
+        >>> assert bytestr(b'f') in b'foo'
+
+        Sliced elements should be bytes, not integer:
+
+        >>> s[1], s[:2]
+        (b'o', b'fo')
+        >>> list(s), list(reversed(s))
+        ([b'f', b'o', b'o'], [b'o', b'o', b'f'])
+
+        As bytestr type isn't propagated across operations, you need to cast
+        bytes to bytestr explicitly:
+
+        >>> s = bytestr(b'foo').upper()
+        >>> t = bytestr(s)
+        >>> s[0], t[0]
+        (70, b'F')
+
+        Be careful to not pass a bytestr object to a function which expects
+        bytearray-like behavior.
+
+        >>> t = bytes(t)  # cast to bytes
+        >>> assert type(t) is bytes
+        """
+
+        def __new__(cls, s=b''):
+            if isinstance(s, bytestr):
+                return s
+            if not isinstance(s, (bytes, bytearray)):
+                s = str(s).encode(u'ascii')
+            return bytes.__new__(cls, s)
+
+        def __getitem__(self, key):
+            s = bytes.__getitem__(self, key)
+            if not isinstance(s, bytes):
+                s = bytechr(s)
+            return s
+
+        def __iter__(self):
+            return iterbytestr(bytes.__iter__(self))
+
+    def iterbytestr(s):
+        """Iterate bytes as if it were a str object of Python 2"""
+        return map(bytechr, s)
+
+    def sysbytes(s):
+        """Convert an internal str (e.g. keyword, __doc__) back to bytes
+
+        This never raises UnicodeEncodeError, but only ASCII characters
+        can be round-trip by sysstr(sysbytes(s)).
+        """
+        return s.encode(u'utf-8')
+
     def sysstr(s):
         """Return a keyword str to be passed to Python functions such as
         getattr() and str.encode()
@@ -96,6 +176,10 @@
     hasattr = _wrapattrfunc(builtins.hasattr)
     setattr = _wrapattrfunc(builtins.setattr)
     xrange = builtins.range
+    unicode = str
+
+    def open(name, mode='r', buffering=-1):
+        return builtins.open(name, sysstr(mode), buffering)
 
     # getopt.getopt() on Python 3 deals with unicodes internally so we cannot
     # pass bytes there. Passing unicodes will result in unicodes as return
@@ -132,8 +216,13 @@
         return [a.encode('latin-1') for a in ret]
 
 else:
-    def sysstr(s):
-        return s
+    import cStringIO
+
+    bytechr = chr
+    bytestr = str
+    iterbytestr = iter
+    sysbytes = identity
+    sysstr = identity
 
     # Partial backport from os.py in Python 3, which only accepts bytes.
     # In Python 2, our paths should only ever be bytes, a unicode path
@@ -147,18 +236,15 @@
 
     # In Python 2, fsdecode() has a very chance to receive bytes. So it's
     # better not to touch Python 2 part as it's already working fine.
-    def fsdecode(filename):
-        return filename
+    fsdecode = identity
 
     def getoptb(args, shortlist, namelist):
         return getopt.getopt(args, shortlist, namelist)
 
-    def strkwargs(dic):
-        return dic
+    strkwargs = identity
+    byteskwargs = identity
 
-    def byteskwargs(dic):
-        return dic
-
+    oslinesep = os.linesep
     osname = os.name
     ospathsep = os.pathsep
     ossep = os.sep
@@ -172,8 +258,9 @@
     getcwd = os.getcwd
     sysexecutable = sys.executable
     shlexsplit = shlex.split
+    stringio = cStringIO.StringIO
+    maplist = map
 
-stringio = io.StringIO
 empty = _queue.Empty
 queue = _queue.Queue
 
@@ -188,6 +275,10 @@
             (item.replace(sysstr('_'), sysstr('')).lower(), (origin, item))
             for item in items)
 
+    def _registeralias(self, origin, attr, name):
+        """Alias ``origin``.``attr`` as ``name``"""
+        self._aliases[sysstr(name)] = (origin, sysstr(attr))
+
     def __getattr__(self, name):
         try:
             origin, item = self._aliases[name]
@@ -205,6 +296,7 @@
     import SimpleHTTPServer
     import urllib2
     import urllib
+    import urlparse
     urlreq._registeraliases(urllib, (
         "addclosehook",
         "addinfourl",
@@ -235,6 +327,10 @@
         "Request",
         "urlopen",
     ))
+    urlreq._registeraliases(urlparse, (
+        "urlparse",
+        "urlunparse",
+    ))
     urlerr._registeraliases(urllib2, (
         "HTTPError",
         "URLError",
@@ -251,11 +347,19 @@
     ))
 
 else:
+    import urllib.parse
+    urlreq._registeraliases(urllib.parse, (
+        "splitattr",
+        "splitpasswd",
+        "splitport",
+        "splituser",
+        "urlparse",
+        "urlunparse",
+    ))
+    urlreq._registeralias(urllib.parse, "unquote_to_bytes", "unquote")
     import urllib.request
     urlreq._registeraliases(urllib.request, (
         "AbstractHTTPHandler",
-        "addclosehook",
-        "addinfourl",
         "BaseHandler",
         "build_opener",
         "FileHandler",
@@ -269,16 +373,15 @@
         "HTTPDigestAuthHandler",
         "HTTPPasswordMgrWithDefaultRealm",
         "ProxyHandler",
-        "quote",
         "Request",
-        "splitattr",
-        "splitpasswd",
-        "splitport",
-        "splituser",
-        "unquote",
         "url2pathname",
         "urlopen",
     ))
+    import urllib.response
+    urlreq._registeraliases(urllib.response, (
+        "addclosehook",
+        "addinfourl",
+    ))
     import urllib.error
     urlerr._registeraliases(urllib.error, (
         "HTTPError",
@@ -291,3 +394,19 @@
         "SimpleHTTPRequestHandler",
         "CGIHTTPRequestHandler",
     ))
+
+    # urllib.parse.quote() accepts both str and bytes, decodes bytes
+    # (if necessary), and returns str. This is wonky. We provide a custom
+    # implementation that only accepts bytes and emits bytes.
+    def quote(s, safe=r'/'):
+        s = urllib.parse.quote_from_bytes(s, safe=safe)
+        return s.encode('ascii', 'strict')
+
+    # urllib.parse.urlencode() returns str. We use this function to make
+    # sure we return bytes.
+    def urlencode(query, doseq=False):
+            s = urllib.parse.urlencode(query, doseq=doseq)
+            return s.encode('ascii')
+
+    urlreq.quote = quote
+    urlreq.urlencode = urlencode
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/rcutil.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,98 @@
+# rcutil.py - utilities about config paths, special config sections etc.
+#
+#  Copyright Mercurial Contributors
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import os
+
+from . import (
+    encoding,
+    osutil,
+    pycompat,
+    util,
+)
+
+if pycompat.osname == 'nt':
+    from . import scmwindows as scmplatform
+else:
+    from . import scmposix as scmplatform
+
+systemrcpath = scmplatform.systemrcpath
+userrcpath = scmplatform.userrcpath
+
+def _expandrcpath(path):
+    '''path could be a file or a directory. return a list of file paths'''
+    p = util.expandpath(path)
+    if os.path.isdir(p):
+        join = os.path.join
+        return [join(p, f) for f, k in osutil.listdir(p) if f.endswith('.rc')]
+    return [p]
+
+def envrcitems(env=None):
+    '''Return [(section, name, value, source)] config items.
+
+    The config items are extracted from environment variables specified by env,
+    used to override systemrc, but not userrc.
+
+    If env is not provided, encoding.environ will be used.
+    '''
+    if env is None:
+        env = encoding.environ
+    checklist = [
+        ('EDITOR', 'ui', 'editor'),
+        ('VISUAL', 'ui', 'editor'),
+        ('PAGER', 'pager', 'pager'),
+    ]
+    result = []
+    for envname, section, configname in checklist:
+        if envname not in env:
+            continue
+        result.append((section, configname, env[envname], '$%s' % envname))
+    return result
+
+def defaultrcpath():
+    '''return rc paths in default.d'''
+    path = []
+    defaultpath = os.path.join(util.datapath, 'default.d')
+    if os.path.isdir(defaultpath):
+        path = _expandrcpath(defaultpath)
+    return path
+
+def rccomponents():
+    '''return an ordered [(type, obj)] about where to load configs.
+
+    respect $HGRCPATH. if $HGRCPATH is empty, only .hg/hgrc of current repo is
+    used. if $HGRCPATH is not set, the platform default will be used.
+
+    if a directory is provided, *.rc files under it will be used.
+
+    type could be either 'path' or 'items', if type is 'path', obj is a string,
+    and is the config file path. if type is 'items', obj is a list of (section,
+    name, value, source) that should fill the config directly.
+    '''
+    envrc = ('items', envrcitems())
+
+    if 'HGRCPATH' in encoding.environ:
+        # assume HGRCPATH is all about user configs so environments can be
+        # overridden.
+        _rccomponents = [envrc]
+        for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
+            if not p:
+                continue
+            _rccomponents.extend(('path', p) for p in _expandrcpath(p))
+    else:
+        normpaths = lambda paths: [('path', os.path.normpath(p)) for p in paths]
+        _rccomponents = normpaths(defaultrcpath() + systemrcpath())
+        _rccomponents.append(envrc)
+        _rccomponents.extend(normpaths(userrcpath()))
+    return _rccomponents
+
+def defaultpagerenv():
+    '''return a dict of default environment variables and their values,
+    intended to be set before starting a pager.
+    '''
+    return {'LESS': 'FRX', 'LV': '-c'}
--- a/mercurial/registrar.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/registrar.py	Tue Apr 18 12:24:34 2017 -0400
@@ -56,9 +56,9 @@
             raise error.ProgrammingError(msg)
 
         if func.__doc__ and not util.safehasattr(func, '_origdoc'):
-            doc = func.__doc__.strip()
+            doc = pycompat.sysbytes(func.__doc__).strip()
             func._origdoc = doc
-            func.__doc__ = self._formatdoc(decl, doc)
+            func.__doc__ = pycompat.sysstr(self._formatdoc(decl, doc))
 
         self._table[name] = func
         self._extrasetup(name, func, *args, **kwargs)
@@ -127,7 +127,7 @@
     Otherwise, explicit 'revset.loadpredicate()' is needed.
     """
     _getname = _funcregistrarbase._parsefuncdecl
-    _docformat = pycompat.sysstr("``%s``\n    %s")
+    _docformat = "``%s``\n    %s"
 
     def _extrasetup(self, name, func, safe=False, takeorder=False):
         func._safe = safe
@@ -166,7 +166,7 @@
     Otherwise, explicit 'fileset.loadpredicate()' is needed.
     """
     _getname = _funcregistrarbase._parsefuncdecl
-    _docformat = pycompat.sysstr("``%s``\n    %s")
+    _docformat = "``%s``\n    %s"
 
     def _extrasetup(self, name, func, callstatus=False, callexisting=False):
         func._callstatus = callstatus
@@ -175,7 +175,7 @@
 class _templateregistrarbase(_funcregistrarbase):
     """Base of decorator to register functions as template specific one
     """
-    _docformat = pycompat.sysstr(":%s: %s")
+    _docformat = ":%s: %s"
 
 class templatekeyword(_templateregistrarbase):
     """Decorator to register template keyword
@@ -234,7 +234,7 @@
 
         templatefunc = registrar.templatefunc()
 
-        @templatefunc('myfunc(arg1, arg2[, arg3])')
+        @templatefunc('myfunc(arg1, arg2[, arg3])', argspec='arg1 arg2 arg3')
         def myfuncfunc(context, mapping, args):
             '''Explanation of this template function ....
             '''
@@ -242,6 +242,10 @@
 
     The first string argument is used also in online help.
 
+    If optional 'argspec' is defined, the function will receive 'args' as
+    a dict of named arguments. Otherwise 'args' is a list of positional
+    arguments.
+
     'templatefunc' instance in example above can be used to
     decorate multiple functions.
 
@@ -252,3 +256,6 @@
     Otherwise, explicit 'templater.loadfunction()' is needed.
     """
     _getname = _funcregistrarbase._parsefuncdecl
+
+    def _extrasetup(self, name, func, argspec=None):
+        func._argspec = argspec
--- a/mercurial/repair.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/repair.py	Tue Apr 18 12:24:34 2017 -0400
@@ -10,22 +10,15 @@
 
 import errno
 import hashlib
-import stat
-import tempfile
-import time
 
 from .i18n import _
 from .node import short
 from . import (
     bundle2,
     changegroup,
-    changelog,
     error,
     exchange,
-    manifest,
     obsolete,
-    revlog,
-    scmutil,
     util,
 )
 
@@ -163,8 +156,7 @@
     curtr = repo.currenttransaction()
     if curtr is not None:
         del curtr  # avoid carrying reference to transaction for nothing
-        msg = _('programming error: cannot strip from inside a transaction')
-        raise error.Abort(msg, hint=_('contact your extension maintainer'))
+        raise error.ProgrammingError('cannot strip from inside a transaction')
 
     try:
         with repo.transaction("strip") as tr:
@@ -214,15 +206,10 @@
 
         for m in updatebm:
             bm[m] = repo[newbmtarget].node()
-        lock = tr = None
-        try:
-            lock = repo.lock()
-            tr = repo.transaction('repair')
-            bm.recordchange(tr)
-            tr.close()
-        finally:
-            tr.release()
-            lock.release()
+
+        with repo.lock():
+            with repo.transaction('repair') as tr:
+                bm.recordchange(tr)
 
         # remove undo files
         for undovfs, undofile in repo.undofiles():
@@ -365,739 +352,3 @@
         newobsstorefile.write(bytes)
     newobsstorefile.close()
     return n
-
-def upgraderequiredsourcerequirements(repo):
-    """Obtain requirements required to be present to upgrade a repo.
-
-    An upgrade will not be allowed if the repository doesn't have the
-    requirements returned by this function.
-    """
-    return set([
-        # Introduced in Mercurial 0.9.2.
-        'revlogv1',
-        # Introduced in Mercurial 0.9.2.
-        'store',
-    ])
-
-def upgradeblocksourcerequirements(repo):
-    """Obtain requirements that will prevent an upgrade from occurring.
-
-    An upgrade cannot be performed if the source repository contains a
-    requirements in the returned set.
-    """
-    return set([
-        # The upgrade code does not yet support these experimental features.
-        # This is an artificial limitation.
-        'manifestv2',
-        'treemanifest',
-        # This was a precursor to generaldelta and was never enabled by default.
-        # It should (hopefully) not exist in the wild.
-        'parentdelta',
-        # Upgrade should operate on the actual store, not the shared link.
-        'shared',
-    ])
-
-def upgradesupportremovedrequirements(repo):
-    """Obtain requirements that can be removed during an upgrade.
-
-    If an upgrade were to create a repository that dropped a requirement,
-    the dropped requirement must appear in the returned set for the upgrade
-    to be allowed.
-    """
-    return set()
-
-def upgradesupporteddestrequirements(repo):
-    """Obtain requirements that upgrade supports in the destination.
-
-    If the result of the upgrade would create requirements not in this set,
-    the upgrade is disallowed.
-
-    Extensions should monkeypatch this to add their custom requirements.
-    """
-    return set([
-        'dotencode',
-        'fncache',
-        'generaldelta',
-        'revlogv1',
-        'store',
-    ])
-
-def upgradeallowednewrequirements(repo):
-    """Obtain requirements that can be added to a repository during upgrade.
-
-    This is used to disallow proposed requirements from being added when
-    they weren't present before.
-
-    We use a list of allowed requirement additions instead of a list of known
-    bad additions because the whitelist approach is safer and will prevent
-    future, unknown requirements from accidentally being added.
-    """
-    return set([
-        'dotencode',
-        'fncache',
-        'generaldelta',
-    ])
-
-deficiency = 'deficiency'
-optimisation = 'optimization'
-
-class upgradeimprovement(object):
-    """Represents an improvement that can be made as part of an upgrade.
-
-    The following attributes are defined on each instance:
-
-    name
-       Machine-readable string uniquely identifying this improvement. It
-       will be mapped to an action later in the upgrade process.
-
-    type
-       Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
-       problem. An optimization is an action (sometimes optional) that
-       can be taken to further improve the state of the repository.
-
-    description
-       Message intended for humans explaining the improvement in more detail,
-       including the implications of it. For ``deficiency`` types, should be
-       worded in the present tense. For ``optimisation`` types, should be
-       worded in the future tense.
-
-    upgrademessage
-       Message intended for humans explaining what an upgrade addressing this
-       issue will do. Should be worded in the future tense.
-
-    fromdefault (``deficiency`` types only)
-       Boolean indicating whether the current (deficient) state deviates
-       from Mercurial's default configuration.
-
-    fromconfig (``deficiency`` types only)
-       Boolean indicating whether the current (deficient) state deviates
-       from the current Mercurial configuration.
-    """
-    def __init__(self, name, type, description, upgrademessage, **kwargs):
-        self.name = name
-        self.type = type
-        self.description = description
-        self.upgrademessage = upgrademessage
-
-        for k, v in kwargs.items():
-            setattr(self, k, v)
-
-def upgradefindimprovements(repo):
-    """Determine improvements that can be made to the repo during upgrade.
-
-    Returns a list of ``upgradeimprovement`` describing repository deficiencies
-    and optimizations.
-    """
-    # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
-    from . import localrepo
-
-    newreporeqs = localrepo.newreporequirements(repo)
-
-    improvements = []
-
-    # We could detect lack of revlogv1 and store here, but they were added
-    # in 0.9.2 and we don't support upgrading repos without these
-    # requirements, so let's not bother.
-
-    if 'fncache' not in repo.requirements:
-        improvements.append(upgradeimprovement(
-            name='fncache',
-            type=deficiency,
-            description=_('long and reserved filenames may not work correctly; '
-                          'repository performance is sub-optimal'),
-            upgrademessage=_('repository will be more resilient to storing '
-                             'certain paths and performance of certain '
-                             'operations should be improved'),
-            fromdefault=True,
-            fromconfig='fncache' in newreporeqs))
-
-    if 'dotencode' not in repo.requirements:
-        improvements.append(upgradeimprovement(
-            name='dotencode',
-            type=deficiency,
-            description=_('storage of filenames beginning with a period or '
-                          'space may not work correctly'),
-            upgrademessage=_('repository will be better able to store files '
-                             'beginning with a space or period'),
-            fromdefault=True,
-            fromconfig='dotencode' in newreporeqs))
-
-    if 'generaldelta' not in repo.requirements:
-        improvements.append(upgradeimprovement(
-            name='generaldelta',
-            type=deficiency,
-            description=_('deltas within internal storage are unable to '
-                          'choose optimal revisions; repository is larger and '
-                          'slower than it could be; interaction with other '
-                          'repositories may require extra network and CPU '
-                          'resources, making "hg push" and "hg pull" slower'),
-            upgrademessage=_('repository storage will be able to create '
-                             'optimal deltas; new repository data will be '
-                             'smaller and read times should decrease; '
-                             'interacting with other repositories using this '
-                             'storage model should require less network and '
-                             'CPU resources, making "hg push" and "hg pull" '
-                             'faster'),
-            fromdefault=True,
-            fromconfig='generaldelta' in newreporeqs))
-
-    # Mercurial 4.0 changed changelogs to not use delta chains. Search for
-    # changelogs with deltas.
-    cl = repo.changelog
-    for rev in cl:
-        chainbase = cl.chainbase(rev)
-        if chainbase != rev:
-            improvements.append(upgradeimprovement(
-                name='removecldeltachain',
-                type=deficiency,
-                description=_('changelog storage is using deltas instead of '
-                              'raw entries; changelog reading and any '
-                              'operation relying on changelog data are slower '
-                              'than they could be'),
-                upgrademessage=_('changelog storage will be reformated to '
-                                 'store raw entries; changelog reading will be '
-                                 'faster; changelog size may be reduced'),
-                fromdefault=True,
-                fromconfig=True))
-            break
-
-    # Now for the optimizations.
-
-    # These are unconditionally added. There is logic later that figures out
-    # which ones to apply.
-
-    improvements.append(upgradeimprovement(
-        name='redeltaparent',
-        type=optimisation,
-        description=_('deltas within internal storage will be recalculated to '
-                      'choose an optimal base revision where this was not '
-                      'already done; the size of the repository may shrink and '
-                      'various operations may become faster; the first time '
-                      'this optimization is performed could slow down upgrade '
-                      'execution considerably; subsequent invocations should '
-                      'not run noticeably slower'),
-        upgrademessage=_('deltas within internal storage will choose a new '
-                         'base revision if needed')))
-
-    improvements.append(upgradeimprovement(
-        name='redeltamultibase',
-        type=optimisation,
-        description=_('deltas within internal storage will be recalculated '
-                      'against multiple base revision and the smallest '
-                      'difference will be used; the size of the repository may '
-                      'shrink significantly when there are many merges; this '
-                      'optimization will slow down execution in proportion to '
-                      'the number of merges in the repository and the amount '
-                      'of files in the repository; this slow down should not '
-                      'be significant unless there are tens of thousands of '
-                      'files and thousands of merges'),
-        upgrademessage=_('deltas within internal storage will choose an '
-                         'optimal delta by computing deltas against multiple '
-                         'parents; may slow down execution time '
-                         'significantly')))
-
-    improvements.append(upgradeimprovement(
-        name='redeltaall',
-        type=optimisation,
-        description=_('deltas within internal storage will always be '
-                      'recalculated without reusing prior deltas; this will '
-                      'likely make execution run several times slower; this '
-                      'optimization is typically not needed'),
-        upgrademessage=_('deltas within internal storage will be fully '
-                         'recomputed; this will likely drastically slow down '
-                         'execution time')))
-
-    return improvements
-
-def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
-                            optimize):
-    """Determine upgrade actions that will be performed.
-
-    Given a list of improvements as returned by ``upgradefindimprovements``,
-    determine the list of upgrade actions that will be performed.
-
-    The role of this function is to filter improvements if needed, apply
-    recommended optimizations from the improvements list that make sense,
-    etc.
-
-    Returns a list of action names.
-    """
-    newactions = []
-
-    knownreqs = upgradesupporteddestrequirements(repo)
-
-    for i in improvements:
-        name = i.name
-
-        # If the action is a requirement that doesn't show up in the
-        # destination requirements, prune the action.
-        if name in knownreqs and name not in destreqs:
-            continue
-
-        if i.type == deficiency:
-            newactions.append(name)
-
-    newactions.extend(o for o in sorted(optimize) if o not in newactions)
-
-    # FUTURE consider adding some optimizations here for certain transitions.
-    # e.g. adding generaldelta could schedule parent redeltas.
-
-    return newactions
-
-def _revlogfrompath(repo, path):
-    """Obtain a revlog from a repo path.
-
-    An instance of the appropriate class is returned.
-    """
-    if path == '00changelog.i':
-        return changelog.changelog(repo.svfs)
-    elif path.endswith('00manifest.i'):
-        mandir = path[:-len('00manifest.i')]
-        return manifest.manifestrevlog(repo.svfs, dir=mandir)
-    else:
-        # Filelogs don't do anything special with settings. So we can use a
-        # vanilla revlog.
-        return revlog.revlog(repo.svfs, path)
-
-def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
-    """Copy revlogs between 2 repos."""
-    revcount = 0
-    srcsize = 0
-    srcrawsize = 0
-    dstsize = 0
-    fcount = 0
-    frevcount = 0
-    fsrcsize = 0
-    frawsize = 0
-    fdstsize = 0
-    mcount = 0
-    mrevcount = 0
-    msrcsize = 0
-    mrawsize = 0
-    mdstsize = 0
-    crevcount = 0
-    csrcsize = 0
-    crawsize = 0
-    cdstsize = 0
-
-    # Perform a pass to collect metadata. This validates we can open all
-    # source files and allows a unified progress bar to be displayed.
-    for unencoded, encoded, size in srcrepo.store.walk():
-        if unencoded.endswith('.d'):
-            continue
-
-        rl = _revlogfrompath(srcrepo, unencoded)
-        revcount += len(rl)
-
-        datasize = 0
-        rawsize = 0
-        idx = rl.index
-        for rev in rl:
-            e = idx[rev]
-            datasize += e[1]
-            rawsize += e[2]
-
-        srcsize += datasize
-        srcrawsize += rawsize
-
-        # This is for the separate progress bars.
-        if isinstance(rl, changelog.changelog):
-            crevcount += len(rl)
-            csrcsize += datasize
-            crawsize += rawsize
-        elif isinstance(rl, manifest.manifestrevlog):
-            mcount += 1
-            mrevcount += len(rl)
-            msrcsize += datasize
-            mrawsize += rawsize
-        elif isinstance(rl, revlog.revlog):
-            fcount += 1
-            frevcount += len(rl)
-            fsrcsize += datasize
-            frawsize += rawsize
-
-    if not revcount:
-        return
-
-    ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
-               '%d in changelog)\n') %
-             (revcount, frevcount, mrevcount, crevcount))
-    ui.write(_('migrating %s in store; %s tracked data\n') % (
-             (util.bytecount(srcsize), util.bytecount(srcrawsize))))
-
-    # Used to keep track of progress.
-    progress = []
-    def oncopiedrevision(rl, rev, node):
-        progress[1] += 1
-        srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
-
-    # Do the actual copying.
-    # FUTURE this operation can be farmed off to worker processes.
-    seen = set()
-    for unencoded, encoded, size in srcrepo.store.walk():
-        if unencoded.endswith('.d'):
-            continue
-
-        oldrl = _revlogfrompath(srcrepo, unencoded)
-        newrl = _revlogfrompath(dstrepo, unencoded)
-
-        if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
-            ui.write(_('finished migrating %d manifest revisions across %d '
-                       'manifests; change in size: %s\n') %
-                     (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
-
-            ui.write(_('migrating changelog containing %d revisions '
-                       '(%s in store; %s tracked data)\n') %
-                     (crevcount, util.bytecount(csrcsize),
-                      util.bytecount(crawsize)))
-            seen.add('c')
-            progress[:] = [_('changelog revisions'), 0, crevcount]
-        elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
-            ui.write(_('finished migrating %d filelog revisions across %d '
-                       'filelogs; change in size: %s\n') %
-                     (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
-
-            ui.write(_('migrating %d manifests containing %d revisions '
-                       '(%s in store; %s tracked data)\n') %
-                     (mcount, mrevcount, util.bytecount(msrcsize),
-                      util.bytecount(mrawsize)))
-            seen.add('m')
-            progress[:] = [_('manifest revisions'), 0, mrevcount]
-        elif 'f' not in seen:
-            ui.write(_('migrating %d filelogs containing %d revisions '
-                       '(%s in store; %s tracked data)\n') %
-                     (fcount, frevcount, util.bytecount(fsrcsize),
-                      util.bytecount(frawsize)))
-            seen.add('f')
-            progress[:] = [_('file revisions'), 0, frevcount]
-
-        ui.progress(progress[0], progress[1], total=progress[2])
-
-        ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
-        oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
-                    deltareuse=deltareuse,
-                    aggressivemergedeltas=aggressivemergedeltas)
-
-        datasize = 0
-        idx = newrl.index
-        for rev in newrl:
-            datasize += idx[rev][1]
-
-        dstsize += datasize
-
-        if isinstance(newrl, changelog.changelog):
-            cdstsize += datasize
-        elif isinstance(newrl, manifest.manifestrevlog):
-            mdstsize += datasize
-        else:
-            fdstsize += datasize
-
-    ui.progress(progress[0], None)
-
-    ui.write(_('finished migrating %d changelog revisions; change in size: '
-               '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
-
-    ui.write(_('finished migrating %d total revisions; total change in store '
-               'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
-
-def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
-    """Determine whether to copy a store file during upgrade.
-
-    This function is called when migrating store files from ``srcrepo`` to
-    ``dstrepo`` as part of upgrading a repository.
-
-    Args:
-      srcrepo: repo we are copying from
-      dstrepo: repo we are copying to
-      requirements: set of requirements for ``dstrepo``
-      path: store file being examined
-      mode: the ``ST_MODE`` file type of ``path``
-      st: ``stat`` data structure for ``path``
-
-    Function should return ``True`` if the file is to be copied.
-    """
-    # Skip revlogs.
-    if path.endswith(('.i', '.d')):
-        return False
-    # Skip transaction related files.
-    if path.startswith('undo'):
-        return False
-    # Only copy regular files.
-    if mode != stat.S_IFREG:
-        return False
-    # Skip other skipped files.
-    if path in ('lock', 'fncache'):
-        return False
-
-    return True
-
-def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
-    """Hook point for extensions to perform additional actions during upgrade.
-
-    This function is called after revlogs and store files have been copied but
-    before the new store is swapped into the original location.
-    """
-
-def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
-    """Do the low-level work of upgrading a repository.
-
-    The upgrade is effectively performed as a copy between a source
-    repository and a temporary destination repository.
-
-    The source repository is unmodified for as long as possible so the
-    upgrade can abort at any time without causing loss of service for
-    readers and without corrupting the source repository.
-    """
-    assert srcrepo.currentwlock()
-    assert dstrepo.currentwlock()
-
-    ui.write(_('(it is safe to interrupt this process any time before '
-               'data migration completes)\n'))
-
-    if 'redeltaall' in actions:
-        deltareuse = revlog.revlog.DELTAREUSENEVER
-    elif 'redeltaparent' in actions:
-        deltareuse = revlog.revlog.DELTAREUSESAMEREVS
-    elif 'redeltamultibase' in actions:
-        deltareuse = revlog.revlog.DELTAREUSESAMEREVS
-    else:
-        deltareuse = revlog.revlog.DELTAREUSEALWAYS
-
-    with dstrepo.transaction('upgrade') as tr:
-        _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
-                     'redeltamultibase' in actions)
-
-    # Now copy other files in the store directory.
-    # The sorted() makes execution deterministic.
-    for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
-        if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
-                                       p, kind, st):
-            continue
-
-        srcrepo.ui.write(_('copying %s\n') % p)
-        src = srcrepo.store.rawvfs.join(p)
-        dst = dstrepo.store.rawvfs.join(p)
-        util.copyfile(src, dst, copystat=True)
-
-    _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
-
-    ui.write(_('data fully migrated to temporary repository\n'))
-
-    backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
-    backupvfs = scmutil.vfs(backuppath)
-
-    # Make a backup of requires file first, as it is the first to be modified.
-    util.copyfile(srcrepo.join('requires'), backupvfs.join('requires'))
-
-    # We install an arbitrary requirement that clients must not support
-    # as a mechanism to lock out new clients during the data swap. This is
-    # better than allowing a client to continue while the repository is in
-    # an inconsistent state.
-    ui.write(_('marking source repository as being upgraded; clients will be '
-               'unable to read from repository\n'))
-    scmutil.writerequires(srcrepo.vfs,
-                          srcrepo.requirements | set(['upgradeinprogress']))
-
-    ui.write(_('starting in-place swap of repository data\n'))
-    ui.write(_('replaced files will be backed up at %s\n') %
-             backuppath)
-
-    # Now swap in the new store directory. Doing it as a rename should make
-    # the operation nearly instantaneous and atomic (at least in well-behaved
-    # environments).
-    ui.write(_('replacing store...\n'))
-    tstart = time.time()
-    util.rename(srcrepo.spath, backupvfs.join('store'))
-    util.rename(dstrepo.spath, srcrepo.spath)
-    elapsed = time.time() - tstart
-    ui.write(_('store replacement complete; repository was inconsistent for '
-               '%0.1fs\n') % elapsed)
-
-    # We first write the requirements file. Any new requirements will lock
-    # out legacy clients.
-    ui.write(_('finalizing requirements file and making repository readable '
-               'again\n'))
-    scmutil.writerequires(srcrepo.vfs, requirements)
-
-    # The lock file from the old store won't be removed because nothing has a
-    # reference to its new location. So clean it up manually. Alternatively, we
-    # could update srcrepo.svfs and other variables to point to the new
-    # location. This is simpler.
-    backupvfs.unlink('store/lock')
-
-    return backuppath
-
-def upgraderepo(ui, repo, run=False, optimize=None):
-    """Upgrade a repository in place."""
-    # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
-    from . import localrepo
-
-    optimize = set(optimize or [])
-    repo = repo.unfiltered()
-
-    # Ensure the repository can be upgraded.
-    missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
-    if missingreqs:
-        raise error.Abort(_('cannot upgrade repository; requirement '
-                            'missing: %s') % _(', ').join(sorted(missingreqs)))
-
-    blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
-    if blockedreqs:
-        raise error.Abort(_('cannot upgrade repository; unsupported source '
-                            'requirement: %s') %
-                          _(', ').join(sorted(blockedreqs)))
-
-    # FUTURE there is potentially a need to control the wanted requirements via
-    # command arguments or via an extension hook point.
-    newreqs = localrepo.newreporequirements(repo)
-
-    noremovereqs = (repo.requirements - newreqs -
-                   upgradesupportremovedrequirements(repo))
-    if noremovereqs:
-        raise error.Abort(_('cannot upgrade repository; requirement would be '
-                            'removed: %s') % _(', ').join(sorted(noremovereqs)))
-
-    noaddreqs = (newreqs - repo.requirements -
-                 upgradeallowednewrequirements(repo))
-    if noaddreqs:
-        raise error.Abort(_('cannot upgrade repository; do not support adding '
-                            'requirement: %s') %
-                          _(', ').join(sorted(noaddreqs)))
-
-    unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
-    if unsupportedreqs:
-        raise error.Abort(_('cannot upgrade repository; do not support '
-                            'destination requirement: %s') %
-                          _(', ').join(sorted(unsupportedreqs)))
-
-    # Find and validate all improvements that can be made.
-    improvements = upgradefindimprovements(repo)
-    for i in improvements:
-        if i.type not in (deficiency, optimisation):
-            raise error.Abort(_('unexpected improvement type %s for %s') % (
-                i.type, i.name))
-
-    # Validate arguments.
-    unknownoptimize = optimize - set(i.name for i in improvements
-                                     if i.type == optimisation)
-    if unknownoptimize:
-        raise error.Abort(_('unknown optimization action requested: %s') %
-                          ', '.join(sorted(unknownoptimize)),
-                          hint=_('run without arguments to see valid '
-                                 'optimizations'))
-
-    actions = upgradedetermineactions(repo, improvements, repo.requirements,
-                                      newreqs, optimize)
-
-    def printrequirements():
-        ui.write(_('requirements\n'))
-        ui.write(_('   preserved: %s\n') %
-                 _(', ').join(sorted(newreqs & repo.requirements)))
-
-        if repo.requirements - newreqs:
-            ui.write(_('   removed: %s\n') %
-                     _(', ').join(sorted(repo.requirements - newreqs)))
-
-        if newreqs - repo.requirements:
-            ui.write(_('   added: %s\n') %
-                     _(', ').join(sorted(newreqs - repo.requirements)))
-
-        ui.write('\n')
-
-    def printupgradeactions():
-        for action in actions:
-            for i in improvements:
-                if i.name == action:
-                    ui.write('%s\n   %s\n\n' %
-                             (i.name, i.upgrademessage))
-
-    if not run:
-        fromdefault = []
-        fromconfig = []
-        optimizations = []
-
-        for i in improvements:
-            assert i.type in (deficiency, optimisation)
-            if i.type == deficiency:
-                if i.fromdefault:
-                    fromdefault.append(i)
-                if i.fromconfig:
-                    fromconfig.append(i)
-            else:
-                optimizations.append(i)
-
-        if fromdefault or fromconfig:
-            fromconfignames = set(x.name for x in fromconfig)
-            onlydefault = [i for i in fromdefault
-                           if i.name not in fromconfignames]
-
-            if fromconfig:
-                ui.write(_('repository lacks features recommended by '
-                           'current config options:\n\n'))
-                for i in fromconfig:
-                    ui.write('%s\n   %s\n\n' % (i.name, i.description))
-
-            if onlydefault:
-                ui.write(_('repository lacks features used by the default '
-                           'config options:\n\n'))
-                for i in onlydefault:
-                    ui.write('%s\n   %s\n\n' % (i.name, i.description))
-
-            ui.write('\n')
-        else:
-            ui.write(_('(no feature deficiencies found in existing '
-                       'repository)\n'))
-
-        ui.write(_('performing an upgrade with "--run" will make the following '
-                   'changes:\n\n'))
-
-        printrequirements()
-        printupgradeactions()
-
-        unusedoptimize = [i for i in improvements
-                          if i.name not in actions and i.type == optimisation]
-        if unusedoptimize:
-            ui.write(_('additional optimizations are available by specifying '
-                     '"--optimize <name>":\n\n'))
-            for i in unusedoptimize:
-                ui.write(_('%s\n   %s\n\n') % (i.name, i.description))
-        return
-
-    # Else we're in the run=true case.
-    ui.write(_('upgrade will perform the following actions:\n\n'))
-    printrequirements()
-    printupgradeactions()
-
-    ui.write(_('beginning upgrade...\n'))
-    with repo.wlock():
-        with repo.lock():
-            ui.write(_('repository locked and read-only\n'))
-            # Our strategy for upgrading the repository is to create a new,
-            # temporary repository, write data to it, then do a swap of the
-            # data. There are less heavyweight ways to do this, but it is easier
-            # to create a new repo object than to instantiate all the components
-            # (like the store) separately.
-            tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
-            backuppath = None
-            try:
-                ui.write(_('creating temporary repository to stage migrated '
-                           'data: %s\n') % tmppath)
-                dstrepo = localrepo.localrepository(repo.baseui,
-                                                    path=tmppath,
-                                                    create=True)
-
-                with dstrepo.wlock():
-                    with dstrepo.lock():
-                        backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
-                                                  actions)
-
-            finally:
-                ui.write(_('removing temporary repository %s\n') % tmppath)
-                repo.vfs.rmtree(tmppath, forcibly=True)
-
-                if backuppath:
-                    ui.warn(_('copy of old repository backed up at %s\n') %
-                            backuppath)
-                    ui.warn(_('the old repository will not be deleted; remove '
-                              'it to free up disk space once the upgraded '
-                              'repository is verified\n'))
--- a/mercurial/repoview.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/repoview.py	Tue Apr 18 12:24:34 2017 -0400
@@ -104,7 +104,7 @@
     """
     h = hashlib.sha1()
     h.update(''.join(repo.heads()))
-    h.update(str(hash(frozenset(hideable))))
+    h.update('%d' % hash(frozenset(hideable)))
     return h.digest()
 
 def _writehiddencache(cachefile, cachehash, hidden):
@@ -139,15 +139,13 @@
         if wlock:
             wlock.release()
 
-def tryreadcache(repo, hideable):
-    """read a cache if the cache exists and is valid, otherwise returns None."""
+def _readhiddencache(repo, cachefilename, newhash):
     hidden = fh = None
     try:
         if repo.vfs.exists(cachefile):
             fh = repo.vfs.open(cachefile, 'rb')
             version, = struct.unpack(">H", fh.read(2))
             oldhash = fh.read(20)
-            newhash = cachehash(repo, hideable)
             if (cacheversion, oldhash) == (version, newhash):
                 # cache is valid, so we can start reading the hidden revs
                 data = fh.read()
@@ -165,6 +163,11 @@
         if fh:
             fh.close()
 
+def tryreadcache(repo, hideable):
+    """read a cache if the cache exists and is valid, otherwise returns None."""
+    newhash = cachehash(repo, hideable)
+    return _readhiddencache(repo, cachefile, newhash)
+
 def computehidden(repo):
     """compute the set of hidden revision to filter
 
@@ -297,10 +300,10 @@
     """
 
     def __init__(self, repo, filtername):
-        object.__setattr__(self, '_unfilteredrepo', repo)
-        object.__setattr__(self, 'filtername', filtername)
-        object.__setattr__(self, '_clcachekey', None)
-        object.__setattr__(self, '_clcache', None)
+        object.__setattr__(self, r'_unfilteredrepo', repo)
+        object.__setattr__(self, r'filtername', filtername)
+        object.__setattr__(self, r'_clcachekey', None)
+        object.__setattr__(self, r'_clcache', None)
 
     # not a propertycache on purpose we shall implement a proper cache later
     @property
@@ -328,8 +331,8 @@
         if cl is None:
             cl = copy.copy(unfichangelog)
             cl.filteredrevs = revs
-            object.__setattr__(self, '_clcache', cl)
-            object.__setattr__(self, '_clcachekey', newkey)
+            object.__setattr__(self, r'_clcache', cl)
+            object.__setattr__(self, r'_clcachekey', newkey)
         return cl
 
     def unfiltered(self):
--- a/mercurial/revlog.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/revlog.py	Tue Apr 18 12:24:34 2017 -0400
@@ -33,6 +33,7 @@
     error,
     mdiff,
     parsers,
+    pycompat,
     templatefilters,
     util,
 )
@@ -87,11 +88,17 @@
     - Only one flag processor can be registered on a specific flag.
     - flagprocessors must be 3-tuples of functions (read, write, raw) with the
       following signatures:
-          - (read)  f(self, text) -> newtext, bool
-          - (write) f(self, text) -> newtext, bool
-          - (raw)   f(self, text) -> bool
+          - (read)  f(self, rawtext) -> text, bool
+          - (write) f(self, text) -> rawtext, bool
+          - (raw)   f(self, rawtext) -> bool
+      "text" is presented to the user. "rawtext" is stored in revlog data, not
+      directly visible to the user.
       The boolean returned by these transforms is used to determine whether
-      'newtext' can be used for hash integrity checking.
+      the returned text can be used for hash integrity checking. For example,
+      if "write" returns False, then "text" is used to generate hash. If
+      "write" returns True, that basically means "rawtext" returned by "write"
+      should be used to generate hash. Usually, "write" and "read" return
+      different booleans. And "raw" returns a same boolean as "write".
 
       Note: The 'raw' transform is used for changegroup generation and in some
       debug commands. In this case the transform only indicates whether the
@@ -117,7 +124,7 @@
 def offset_type(offset, type):
     if (type & ~REVIDX_KNOWN_FLAGS) != 0:
         raise ValueError('unknown revlog index flags')
-    return long(long(offset) << 16 | type)
+    return int(int(offset) << 16 | type)
 
 _nullhash = hashlib.sha1(nullid)
 
@@ -437,9 +444,18 @@
         if l >= 0:
             return l
 
-        t = self.revision(self.node(rev))
+        t = self.revision(rev, raw=True)
         return len(t)
-    size = rawsize
+
+    def size(self, rev):
+        """length of non-raw text (processed by a "read" flag processor)"""
+        # fast path: if no "read" flag processor could change the content,
+        # size is rawsize. note: ELLIPSIS is known to not change the content.
+        flags = self.flags(rev)
+        if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
+            return self.rawsize(rev)
+
+        return len(self.revision(rev, raw=False))
 
     def chainbase(self, rev):
         base = self._chainbasecache.get(rev)
@@ -943,7 +959,7 @@
             ancs = self.index.commonancestorsheads(a, b)
         except (AttributeError, OverflowError): # C implementation failed
             ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
-        return map(self.node, ancs)
+        return pycompat.maplist(self.node, ancs)
 
     def isancestor(self, a, b):
         """return True if node a is an ancestor of node b
@@ -1232,12 +1248,16 @@
             return rev - 1
 
     def revdiff(self, rev1, rev2):
-        """return or calculate a delta between two revisions"""
+        """return or calculate a delta between two revisions
+
+        The delta calculated is in binary form and is intended to be written to
+        revlog data directly. So this function needs raw revision data.
+        """
         if rev1 != nullrev and self.deltaparent(rev2) == rev1:
-            return str(self._chunk(rev2))
+            return bytes(self._chunk(rev2))
 
-        return mdiff.textdiff(self.revision(rev1),
-                              self.revision(rev2))
+        return mdiff.textdiff(self.revision(rev1, raw=True),
+                              self.revision(rev2, raw=True))
 
     def revision(self, nodeorrev, _df=None, raw=False):
         """return an uncompressed revision of a given node or revision
@@ -1256,38 +1276,57 @@
             rev = None
 
         cachedrev = None
+        flags = None
+        rawtext = None
         if node == nullid:
             return ""
         if self._cache:
             if self._cache[0] == node:
-                return self._cache[2]
+                # _cache only stores rawtext
+                if raw:
+                    return self._cache[2]
+                # duplicated, but good for perf
+                if rev is None:
+                    rev = self.rev(node)
+                if flags is None:
+                    flags = self.flags(rev)
+                # no extra flags set, no flag processor runs, text = rawtext
+                if flags == REVIDX_DEFAULT_FLAGS:
+                    return self._cache[2]
+                # rawtext is reusable. need to run flag processor
+                rawtext = self._cache[2]
+
             cachedrev = self._cache[1]
 
         # look up what we need to read
-        text = None
-        if rev is None:
-            rev = self.rev(node)
+        if rawtext is None:
+            if rev is None:
+                rev = self.rev(node)
 
-        chain, stopped = self._deltachain(rev, stoprev=cachedrev)
-        if stopped:
-            text = self._cache[2]
+            chain, stopped = self._deltachain(rev, stoprev=cachedrev)
+            if stopped:
+                rawtext = self._cache[2]
+
+            # drop cache to save memory
+            self._cache = None
 
-        # drop cache to save memory
-        self._cache = None
+            bins = self._chunks(chain, df=_df)
+            if rawtext is None:
+                rawtext = bytes(bins[0])
+                bins = bins[1:]
 
-        bins = self._chunks(chain, df=_df)
-        if text is None:
-            text = str(bins[0])
-            bins = bins[1:]
+            rawtext = mdiff.patches(rawtext, bins)
+            self._cache = (node, rev, rawtext)
 
-        text = mdiff.patches(text, bins)
+        if flags is None:
+            if rev is None:
+                rev = self.rev(node)
+            flags = self.flags(rev)
 
-        text, validatehash = self._processflags(text, self.flags(rev), 'read',
-                                                raw=raw)
+        text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
         if validatehash:
             self.checkhash(text, node, rev=rev)
 
-        self._cache = (node, rev, text)
         return text
 
     def hash(self, text, p1, p2):
@@ -1444,32 +1483,31 @@
         if flags:
             node = node or self.hash(text, p1, p2)
 
-        newtext, validatehash = self._processflags(text, flags, 'write')
+        rawtext, validatehash = self._processflags(text, flags, 'write')
 
         # If the flag processor modifies the revision data, ignore any provided
         # cachedelta.
-        if newtext != text:
+        if rawtext != text:
             cachedelta = None
-        text = newtext
 
-        if len(text) > _maxentrysize:
+        if len(rawtext) > _maxentrysize:
             raise RevlogError(
                 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
-                % (self.indexfile, len(text)))
+                % (self.indexfile, len(rawtext)))
 
-        node = node or self.hash(text, p1, p2)
+        node = node or self.hash(rawtext, p1, p2)
         if node in self.nodemap:
             return node
 
         if validatehash:
-            self.checkhash(text, node, p1=p1, p2=p2)
+            self.checkhash(rawtext, node, p1=p1, p2=p2)
 
         dfh = None
         if not self._inline:
             dfh = self.opener(self.datafile, "a+")
         ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
         try:
-            return self._addrevision(node, text, transaction, link, p1, p2,
+            return self._addrevision(node, rawtext, transaction, link, p1, p2,
                                      flags, cachedelta, ifh, dfh)
         finally:
             if dfh:
@@ -1487,7 +1525,7 @@
             # The revlog compressor added the header in the returned data.
             return '', compressed
 
-        if data[0] == '\0':
+        if data[0:1] == '\0':
             return '', data
         return 'u', data
 
@@ -1521,7 +1559,7 @@
         #
         # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
         # compressed chunks. And this matters for changelog and manifest reads.
-        t = data[0]
+        t = data[0:1]
 
         if t == 'x':
             try:
@@ -1565,19 +1603,19 @@
 
         return True
 
-    def _addrevision(self, node, text, transaction, link, p1, p2, flags,
-                     cachedelta, ifh, dfh, alwayscache=False, raw=False):
+    def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
+                     cachedelta, ifh, dfh, alwayscache=False):
         """internal function to add revisions to the log
 
         see addrevision for argument descriptions.
+
+        note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
+
         invariants:
-        - text is optional (can be None); if not set, cachedelta must be set.
+        - rawtext is optional (can be None); if not set, cachedelta must be set.
           if both are set, they must correspond to each other.
-        - raw is optional; if set to True, it indicates the revision data is to
-          be treated by _processflags() as raw. It is usually set by changegroup
-          generation and debug commands.
         """
-        btext = [text]
+        btext = [rawtext]
         def buildtext():
             if btext[0] is not None:
                 return btext[0]
@@ -1595,11 +1633,11 @@
                     fh = ifh
                 else:
                     fh = dfh
-                basetext = self.revision(self.node(baserev), _df=fh, raw=raw)
+                basetext = self.revision(baserev, _df=fh, raw=True)
                 btext[0] = mdiff.patch(basetext, delta)
 
             try:
-                res = self._processflags(btext[0], flags, 'read', raw=raw)
+                res = self._processflags(btext[0], flags, 'read', raw=True)
                 btext[0], validatehash = res
                 if validatehash:
                     self.checkhash(btext[0], node, p1=p1, p2=p2)
@@ -1627,7 +1665,7 @@
                         fh = ifh
                     else:
                         fh = dfh
-                    ptext = self.revision(self.node(rev), _df=fh)
+                    ptext = self.revision(rev, _df=fh, raw=True)
                     delta = mdiff.textdiff(ptext, t)
             header, data = self.compress(delta)
             deltalen = len(header) + len(data)
@@ -1651,11 +1689,11 @@
 
         # full versions are inserted when the needed deltas
         # become comparable to the uncompressed text
-        if text is None:
+        if rawtext is None:
             textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
                                         cachedelta[1])
         else:
-            textlen = len(text)
+            textlen = len(rawtext)
 
         # should we try to build a delta?
         if prev != nullrev and self.storedeltachains:
@@ -1696,8 +1734,8 @@
         if delta is not None:
             dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
         else:
-            text = buildtext()
-            data = self.compress(text)
+            rawtext = buildtext()
+            data = self.compress(rawtext)
             l = len(data[1]) + len(data[0])
             base = chainbase = curr
 
@@ -1709,11 +1747,11 @@
         entry = self._io.packentry(e, self.node, self.version, curr)
         self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
 
-        if alwayscache and text is None:
-            text = buildtext()
+        if alwayscache and rawtext is None:
+            rawtext = buildtext()
 
-        if type(text) == str: # only accept immutable objects
-            self._cache = (node, curr, text)
+        if type(rawtext) == str: # only accept immutable objects
+            self._cache = (node, curr, rawtext)
         self._chainbasecache[curr] = chainbase
         return node
 
@@ -1835,8 +1873,7 @@
                 chain = self._addrevision(node, None, transaction, link,
                                           p1, p2, flags, (baserev, delta),
                                           ifh, dfh,
-                                          alwayscache=bool(addrevisioncb),
-                                          raw=True)
+                                          alwayscache=bool(addrevisioncb))
 
                 if addrevisioncb:
                     addrevisioncb(self, chain)
@@ -2072,14 +2109,14 @@
                 # (Possibly) reuse the delta from the revlog if allowed and
                 # the revlog chunk is a delta.
                 cachedelta = None
-                text = None
+                rawtext = None
                 if populatecachedelta:
                     dp = self.deltaparent(rev)
                     if dp != nullrev:
                         cachedelta = (dp, str(self._chunk(rev)))
 
                 if not cachedelta:
-                    text = self.revision(rev)
+                    rawtext = self.revision(rev, raw=True)
 
                 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
                                         checkambig=False)
@@ -2087,7 +2124,7 @@
                 if not destrevlog._inline:
                     dfh = destrevlog.opener(destrevlog.datafile, 'a+')
                 try:
-                    destrevlog._addrevision(node, text, tr, linkrev, p1, p2,
+                    destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2,
                                             flags, cachedelta, ifh, dfh)
                 finally:
                     if dfh:
--- a/mercurial/revset.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/revset.py	Tue Apr 18 12:24:34 2017 -0400
@@ -9,7 +9,6 @@
 
 import heapq
 import re
-import string
 
 from .i18n import _
 from . import (
@@ -20,15 +19,35 @@
     match as matchmod,
     node,
     obsolete as obsmod,
-    parser,
     pathutil,
     phases,
-    pycompat,
     registrar,
     repoview,
+    revsetlang,
+    smartset,
     util,
 )
 
+# helpers for processing parsed tree
+getsymbol = revsetlang.getsymbol
+getstring = revsetlang.getstring
+getinteger = revsetlang.getinteger
+getboolean = revsetlang.getboolean
+getlist = revsetlang.getlist
+getrange = revsetlang.getrange
+getargs = revsetlang.getargs
+getargsdict = revsetlang.getargsdict
+
+# constants used as an argument of match() and matchany()
+anyorder = revsetlang.anyorder
+defineorder = revsetlang.defineorder
+followorder = revsetlang.followorder
+
+baseset = smartset.baseset
+generatorset = smartset.generatorset
+spanset = smartset.spanset
+fullreposet = smartset.fullreposet
+
 def _revancestors(repo, revs, followfirst):
     """Like revlog.ancestors(), but supports followfirst."""
     if followfirst:
@@ -146,226 +165,12 @@
     revs.sort()
     return revs
 
-elements = {
-    # token-type: binding-strength, primary, prefix, infix, suffix
-    "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
-    "##": (20, None, None, ("_concat", 20), None),
-    "~": (18, None, None, ("ancestor", 18), None),
-    "^": (18, None, None, ("parent", 18), "parentpost"),
-    "-": (5, None, ("negate", 19), ("minus", 5), None),
-    "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
-    "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
-    ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
-    "not": (10, None, ("not", 10), None, None),
-    "!": (10, None, ("not", 10), None, None),
-    "and": (5, None, None, ("and", 5), None),
-    "&": (5, None, None, ("and", 5), None),
-    "%": (5, None, None, ("only", 5), "onlypost"),
-    "or": (4, None, None, ("or", 4), None),
-    "|": (4, None, None, ("or", 4), None),
-    "+": (4, None, None, ("or", 4), None),
-    "=": (3, None, None, ("keyvalue", 3), None),
-    ",": (2, None, None, ("list", 2), None),
-    ")": (0, None, None, None, None),
-    "symbol": (0, "symbol", None, None, None),
-    "string": (0, "string", None, None, None),
-    "end": (0, None, None, None, None),
-}
-
-keywords = set(['and', 'or', 'not'])
-
-# default set of valid characters for the initial letter of symbols
-_syminitletters = set(
-    string.ascii_letters +
-    string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
-
-# default set of valid characters for non-initial letters of symbols
-_symletters = _syminitletters | set(pycompat.sysstr('-/'))
-
-def tokenize(program, lookup=None, syminitletters=None, symletters=None):
-    '''
-    Parse a revset statement into a stream of tokens
-
-    ``syminitletters`` is the set of valid characters for the initial
-    letter of symbols.
-
-    By default, character ``c`` is recognized as valid for initial
-    letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
-
-    ``symletters`` is the set of valid characters for non-initial
-    letters of symbols.
-
-    By default, character ``c`` is recognized as valid for non-initial
-    letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
-
-    Check that @ is a valid unquoted token character (issue3686):
-    >>> list(tokenize("@::"))
-    [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
-
-    '''
-    if syminitletters is None:
-        syminitletters = _syminitletters
-    if symletters is None:
-        symletters = _symletters
-
-    if program and lookup:
-        # attempt to parse old-style ranges first to deal with
-        # things like old-tag which contain query metacharacters
-        parts = program.split(':', 1)
-        if all(lookup(sym) for sym in parts if sym):
-            if parts[0]:
-                yield ('symbol', parts[0], 0)
-            if len(parts) > 1:
-                s = len(parts[0])
-                yield (':', None, s)
-                if parts[1]:
-                    yield ('symbol', parts[1], s + 1)
-            yield ('end', None, len(program))
-            return
-
-    pos, l = 0, len(program)
-    while pos < l:
-        c = program[pos]
-        if c.isspace(): # skip inter-token whitespace
-            pass
-        elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
-            yield ('::', None, pos)
-            pos += 1 # skip ahead
-        elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
-            yield ('..', None, pos)
-            pos += 1 # skip ahead
-        elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
-            yield ('##', None, pos)
-            pos += 1 # skip ahead
-        elif c in "():=,-|&+!~^%": # handle simple operators
-            yield (c, None, pos)
-        elif (c in '"\'' or c == 'r' and
-              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
-            if c == 'r':
-                pos += 1
-                c = program[pos]
-                decode = lambda x: x
-            else:
-                decode = parser.unescapestr
-            pos += 1
-            s = pos
-            while pos < l: # find closing quote
-                d = program[pos]
-                if d == '\\': # skip over escaped characters
-                    pos += 2
-                    continue
-                if d == c:
-                    yield ('string', decode(program[s:pos]), s)
-                    break
-                pos += 1
-            else:
-                raise error.ParseError(_("unterminated string"), s)
-        # gather up a symbol/keyword
-        elif c in syminitletters:
-            s = pos
-            pos += 1
-            while pos < l: # find end of symbol
-                d = program[pos]
-                if d not in symletters:
-                    break
-                if d == '.' and program[pos - 1] == '.': # special case for ..
-                    pos -= 1
-                    break
-                pos += 1
-            sym = program[s:pos]
-            if sym in keywords: # operator keywords
-                yield (sym, None, s)
-            elif '-' in sym:
-                # some jerk gave us foo-bar-baz, try to check if it's a symbol
-                if lookup and lookup(sym):
-                    # looks like a real symbol
-                    yield ('symbol', sym, s)
-                else:
-                    # looks like an expression
-                    parts = sym.split('-')
-                    for p in parts[:-1]:
-                        if p: # possible consecutive -
-                            yield ('symbol', p, s)
-                        s += len(p)
-                        yield ('-', None, pos)
-                        s += 1
-                    if parts[-1]: # possible trailing -
-                        yield ('symbol', parts[-1], s)
-            else:
-                yield ('symbol', sym, s)
-            pos -= 1
-        else:
-            raise error.ParseError(_("syntax error in revset '%s'") %
-                                   program, pos)
-        pos += 1
-    yield ('end', None, pos)
-
 # helpers
 
-_notset = object()
-
-def getsymbol(x):
-    if x and x[0] == 'symbol':
-        return x[1]
-    raise error.ParseError(_('not a symbol'))
-
-def getstring(x, err):
-    if x and (x[0] == 'string' or x[0] == 'symbol'):
-        return x[1]
-    raise error.ParseError(err)
-
-def getinteger(x, err, default=_notset):
-    if not x and default is not _notset:
-        return default
-    try:
-        return int(getstring(x, err))
-    except ValueError:
-        raise error.ParseError(err)
-
-def getlist(x):
-    if not x:
-        return []
-    if x[0] == 'list':
-        return list(x[1:])
-    return [x]
-
-def getrange(x, err):
-    if not x:
-        raise error.ParseError(err)
-    op = x[0]
-    if op == 'range':
-        return x[1], x[2]
-    elif op == 'rangepre':
-        return None, x[1]
-    elif op == 'rangepost':
-        return x[1], None
-    elif op == 'rangeall':
-        return None, None
-    raise error.ParseError(err)
-
-def getargs(x, min, max, err):
-    l = getlist(x)
-    if len(l) < min or (max >= 0 and len(l) > max):
-        raise error.ParseError(err)
-    return l
-
-def getargsdict(x, funcname, keys):
-    return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
-                                keyvaluenode='keyvalue', keynode='symbol')
-
 def getset(repo, subset, x):
     if not x:
         raise error.ParseError(_("missing argument"))
-    s = methods[x[0]](repo, subset, *x[1:])
-    if util.safehasattr(s, 'isascending'):
-        return s
-    # else case should not happen, because all non-func are internal,
-    # ignoring for now.
-    if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
-        repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
-                           % x[1][1],
-                           '3.9')
-    return baseset(s)
+    return methods[x[0]](repo, subset, *x[1:])
 
 def _getrevsource(repo, r):
     extra = repo[r].extra()
@@ -501,7 +306,7 @@
 @predicate('_destupdate')
 def _destupdate(repo, subset, x):
     # experimental revset for update destination
-    args = getargsdict(x, 'limit', 'clean check')
+    args = getargsdict(x, 'limit', 'clean')
     return subset & baseset([destutil.destupdate(repo, **args)[0]])
 
 @predicate('_destmerge')
@@ -1097,17 +902,22 @@
     # of every revisions or files revisions.
     return _follow(repo, subset, x, '_followfirst', followfirst=True)
 
-@predicate('followlines(file, fromline:toline[, startrev=.])', safe=True)
+@predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
+           safe=True)
 def followlines(repo, subset, x):
     """Changesets modifying `file` in line range ('fromline', 'toline').
 
     Line range corresponds to 'file' content at 'startrev' and should hence be
     consistent with file size. If startrev is not specified, working directory's
     parent is used.
+
+    By default, ancestors of 'startrev' are returned. If 'descend' is True,
+    descendants of 'startrev' are returned though renames are (currently) not
+    followed in this direction.
     """
     from . import context  # avoid circular import issues
 
-    args = getargsdict(x, 'followlines', 'file *lines startrev')
+    args = getargsdict(x, 'followlines', 'file *lines startrev descend')
     if len(args['lines']) != 1:
         raise error.ParseError(_("followlines requires a line range"))
 
@@ -1132,15 +942,24 @@
     lr = getrange(args['lines'][0], _("followlines expects a line range"))
     fromline, toline = [getinteger(a, _("line range bounds must be integers"))
                         for a in lr]
-    if toline - fromline < 0:
-        raise error.ParseError(_("line range must be positive"))
-    if fromline < 1:
-        raise error.ParseError(_("fromline must be strictly positive"))
-    fromline -= 1
+    fromline, toline = util.processlinerange(fromline, toline)
 
     fctx = repo[rev].filectx(fname)
-    revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
-    return subset & generatorset(revs, iterasc=False)
+    descend = False
+    if 'descend' in args:
+        descend = getboolean(args['descend'],
+                             _("'descend' argument must be a boolean"))
+    if descend:
+        rs = generatorset(
+            (c.rev() for c, _linerange
+             in context.blockdescendants(fctx, fromline, toline)),
+            iterasc=True)
+    else:
+        rs = generatorset(
+            (c.rev() for c, _linerange
+             in context.blockancestors(fctx, fromline, toline)),
+            iterasc=False)
+    return subset & rs
 
 @predicate('all()', safe=True)
 def getall(repo, subset, x):
@@ -1638,19 +1457,10 @@
     ps -= set([node.nullrev])
     return subset & ps
 
-def _phase(repo, subset, target):
-    """helper to select all rev in phase <target>"""
-    repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
-    if repo._phasecache._phasesets:
-        s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
-        s = baseset(s)
-        s.sort() # set are non ordered, so we enforce ascending
-        return subset & s
-    else:
-        phase = repo._phasecache.phase
-        condition = lambda r: phase(repo, r) == target
-        return subset.filter(condition, condrepr=('<phase %r>', target),
-                             cache=False)
+def _phase(repo, subset, *targets):
+    """helper to select all rev in <targets> phases"""
+    s = repo._phasecache.getrevset(repo, targets)
+    return subset & s
 
 @predicate('draft()', safe=True)
 def draft(repo, subset, x):
@@ -1711,20 +1521,7 @@
 @predicate('_notpublic', safe=True)
 def _notpublic(repo, subset, x):
     getargs(x, 0, 0, "_notpublic takes no arguments")
-    repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
-    if repo._phasecache._phasesets:
-        s = set()
-        for u in repo._phasecache._phasesets[1:]:
-            s.update(u)
-        s = baseset(s - repo.changelog.filteredrevs)
-        s.sort()
-        return subset & s
-    else:
-        phase = repo._phasecache.phase
-        target = phases.public
-        condition = lambda r: phase(repo, r) != target
-        return subset.filter(condition, condrepr=('<phase %r>', target),
-                             cache=False)
+    return _phase(repo, subset, phases.draft, phases.secret)
 
 @predicate('public()', safe=True)
 def public(repo, subset, x):
@@ -2428,350 +2225,6 @@
     "parentpost": parentpost,
 }
 
-# Constants for ordering requirement, used in _analyze():
-#
-# If 'define', any nested functions and operations can change the ordering of
-# the entries in the set. If 'follow', any nested functions and operations
-# should take the ordering specified by the first operand to the '&' operator.
-#
-# For instance,
-#
-#   X & (Y | Z)
-#   ^   ^^^^^^^
-#   |   follow
-#   define
-#
-# will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
-# of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
-#
-# 'any' means the order doesn't matter. For instance,
-#
-#   X & !Y
-#        ^
-#        any
-#
-# 'y()' can either enforce its ordering requirement or take the ordering
-# specified by 'x()' because 'not()' doesn't care the order.
-#
-# Transition of ordering requirement:
-#
-# 1. starts with 'define'
-# 2. shifts to 'follow' by 'x & y'
-# 3. changes back to 'define' on function call 'f(x)' or function-like
-#    operation 'x (f) y' because 'f' may have its own ordering requirement
-#    for 'x' and 'y' (e.g. 'first(x)')
-#
-anyorder = 'any'        # don't care the order
-defineorder = 'define'  # should define the order
-followorder = 'follow'  # must follow the current order
-
-# transition table for 'x & y', from the current expression 'x' to 'y'
-_tofolloworder = {
-    anyorder: anyorder,
-    defineorder: followorder,
-    followorder: followorder,
-}
-
-def _matchonly(revs, bases):
-    """
-    >>> f = lambda *args: _matchonly(*map(parse, args))
-    >>> f('ancestors(A)', 'not ancestors(B)')
-    ('list', ('symbol', 'A'), ('symbol', 'B'))
-    """
-    if (revs is not None
-        and revs[0] == 'func'
-        and getsymbol(revs[1]) == 'ancestors'
-        and bases is not None
-        and bases[0] == 'not'
-        and bases[1][0] == 'func'
-        and getsymbol(bases[1][1]) == 'ancestors'):
-        return ('list', revs[2], bases[1][2])
-
-def _fixops(x):
-    """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
-    handled well by our simple top-down parser"""
-    if not isinstance(x, tuple):
-        return x
-
-    op = x[0]
-    if op == 'parent':
-        # x^:y means (x^) : y, not x ^ (:y)
-        # x^:  means (x^) :,   not x ^ (:)
-        post = ('parentpost', x[1])
-        if x[2][0] == 'dagrangepre':
-            return _fixops(('dagrange', post, x[2][1]))
-        elif x[2][0] == 'rangepre':
-            return _fixops(('range', post, x[2][1]))
-        elif x[2][0] == 'rangeall':
-            return _fixops(('rangepost', post))
-    elif op == 'or':
-        # make number of arguments deterministic:
-        # x + y + z -> (or x y z) -> (or (list x y z))
-        return (op, _fixops(('list',) + x[1:]))
-
-    return (op,) + tuple(_fixops(y) for y in x[1:])
-
-def _analyze(x, order):
-    if x is None:
-        return x
-
-    op = x[0]
-    if op == 'minus':
-        return _analyze(('and', x[1], ('not', x[2])), order)
-    elif op == 'only':
-        t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
-        return _analyze(t, order)
-    elif op == 'onlypost':
-        return _analyze(('func', ('symbol', 'only'), x[1]), order)
-    elif op == 'dagrangepre':
-        return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
-    elif op == 'dagrangepost':
-        return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
-    elif op == 'negate':
-        s = getstring(x[1], _("can't negate that"))
-        return _analyze(('string', '-' + s), order)
-    elif op in ('string', 'symbol'):
-        return x
-    elif op == 'and':
-        ta = _analyze(x[1], order)
-        tb = _analyze(x[2], _tofolloworder[order])
-        return (op, ta, tb, order)
-    elif op == 'or':
-        return (op, _analyze(x[1], order), order)
-    elif op == 'not':
-        return (op, _analyze(x[1], anyorder), order)
-    elif op == 'rangeall':
-        return (op, None, order)
-    elif op in ('rangepre', 'rangepost', 'parentpost'):
-        return (op, _analyze(x[1], defineorder), order)
-    elif op == 'group':
-        return _analyze(x[1], order)
-    elif op in ('dagrange', 'range', 'parent', 'ancestor'):
-        ta = _analyze(x[1], defineorder)
-        tb = _analyze(x[2], defineorder)
-        return (op, ta, tb, order)
-    elif op == 'list':
-        return (op,) + tuple(_analyze(y, order) for y in x[1:])
-    elif op == 'keyvalue':
-        return (op, x[1], _analyze(x[2], order))
-    elif op == 'func':
-        f = getsymbol(x[1])
-        d = defineorder
-        if f == 'present':
-            # 'present(set)' is known to return the argument set with no
-            # modification, so forward the current order to its argument
-            d = order
-        return (op, x[1], _analyze(x[2], d), order)
-    raise ValueError('invalid operator %r' % op)
-
-def analyze(x, order=defineorder):
-    """Transform raw parsed tree to evaluatable tree which can be fed to
-    optimize() or getset()
-
-    All pseudo operations should be mapped to real operations or functions
-    defined in methods or symbols table respectively.
-
-    'order' specifies how the current expression 'x' is ordered (see the
-    constants defined above.)
-    """
-    return _analyze(x, order)
-
-def _optimize(x, small):
-    if x is None:
-        return 0, x
-
-    smallbonus = 1
-    if small:
-        smallbonus = .5
-
-    op = x[0]
-    if op in ('string', 'symbol'):
-        return smallbonus, x # single revisions are small
-    elif op == 'and':
-        wa, ta = _optimize(x[1], True)
-        wb, tb = _optimize(x[2], True)
-        order = x[3]
-        w = min(wa, wb)
-
-        # (::x and not ::y)/(not ::y and ::x) have a fast path
-        tm = _matchonly(ta, tb) or _matchonly(tb, ta)
-        if tm:
-            return w, ('func', ('symbol', 'only'), tm, order)
-
-        if tb is not None and tb[0] == 'not':
-            return wa, ('difference', ta, tb[1], order)
-
-        if wa > wb:
-            return w, (op, tb, ta, order)
-        return w, (op, ta, tb, order)
-    elif op == 'or':
-        # fast path for machine-generated expression, that is likely to have
-        # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
-        order = x[2]
-        ws, ts, ss = [], [], []
-        def flushss():
-            if not ss:
-                return
-            if len(ss) == 1:
-                w, t = ss[0]
-            else:
-                s = '\0'.join(t[1] for w, t in ss)
-                y = ('func', ('symbol', '_list'), ('string', s), order)
-                w, t = _optimize(y, False)
-            ws.append(w)
-            ts.append(t)
-            del ss[:]
-        for y in getlist(x[1]):
-            w, t = _optimize(y, False)
-            if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
-                ss.append((w, t))
-                continue
-            flushss()
-            ws.append(w)
-            ts.append(t)
-        flushss()
-        if len(ts) == 1:
-            return ws[0], ts[0] # 'or' operation is fully optimized out
-        # we can't reorder trees by weight because it would change the order.
-        # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
-        #   ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
-        return max(ws), (op, ('list',) + tuple(ts), order)
-    elif op == 'not':
-        # Optimize not public() to _notpublic() because we have a fast version
-        if x[1][:3] == ('func', ('symbol', 'public'), None):
-            order = x[1][3]
-            newsym = ('func', ('symbol', '_notpublic'), None, order)
-            o = _optimize(newsym, not small)
-            return o[0], o[1]
-        else:
-            o = _optimize(x[1], not small)
-            order = x[2]
-            return o[0], (op, o[1], order)
-    elif op == 'rangeall':
-        return smallbonus, x
-    elif op in ('rangepre', 'rangepost', 'parentpost'):
-        o = _optimize(x[1], small)
-        order = x[2]
-        return o[0], (op, o[1], order)
-    elif op in ('dagrange', 'range', 'parent', 'ancestor'):
-        wa, ta = _optimize(x[1], small)
-        wb, tb = _optimize(x[2], small)
-        order = x[3]
-        return wa + wb, (op, ta, tb, order)
-    elif op == 'list':
-        ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
-        return sum(ws), (op,) + ts
-    elif op == 'keyvalue':
-        w, t = _optimize(x[2], small)
-        return w, (op, x[1], t)
-    elif op == 'func':
-        f = getsymbol(x[1])
-        wa, ta = _optimize(x[2], small)
-        if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
-                 'keyword', 'outgoing', 'user', 'destination'):
-            w = 10 # slow
-        elif f in ('modifies', 'adds', 'removes'):
-            w = 30 # slower
-        elif f == "contains":
-            w = 100 # very slow
-        elif f == "ancestor":
-            w = 1 * smallbonus
-        elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
-            w = 0
-        elif f == "sort":
-            w = 10 # assume most sorts look at changelog
-        else:
-            w = 1
-        order = x[3]
-        return w + wa, (op, x[1], ta, order)
-    raise ValueError('invalid operator %r' % op)
-
-def optimize(tree):
-    """Optimize evaluatable tree
-
-    All pseudo operations should be transformed beforehand.
-    """
-    _weight, newtree = _optimize(tree, small=True)
-    return newtree
-
-# the set of valid characters for the initial letter of symbols in
-# alias declarations and definitions
-_aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
-
-def _parsewith(spec, lookup=None, syminitletters=None):
-    """Generate a parse tree of given spec with given tokenizing options
-
-    >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
-    ('func', ('symbol', 'foo'), ('symbol', '$1'))
-    >>> _parsewith('$1')
-    Traceback (most recent call last):
-      ...
-    ParseError: ("syntax error in revset '$1'", 0)
-    >>> _parsewith('foo bar')
-    Traceback (most recent call last):
-      ...
-    ParseError: ('invalid token', 4)
-    """
-    p = parser.parser(elements)
-    tree, pos = p.parse(tokenize(spec, lookup=lookup,
-                                 syminitletters=syminitletters))
-    if pos != len(spec):
-        raise error.ParseError(_('invalid token'), pos)
-    return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
-
-class _aliasrules(parser.basealiasrules):
-    """Parsing and expansion rule set of revset aliases"""
-    _section = _('revset alias')
-
-    @staticmethod
-    def _parse(spec):
-        """Parse alias declaration/definition ``spec``
-
-        This allows symbol names to use also ``$`` as an initial letter
-        (for backward compatibility), and callers of this function should
-        examine whether ``$`` is used also for unexpected symbols or not.
-        """
-        return _parsewith(spec, syminitletters=_aliassyminitletters)
-
-    @staticmethod
-    def _trygetfunc(tree):
-        if tree[0] == 'func' and tree[1][0] == 'symbol':
-            return tree[1][1], getlist(tree[2])
-
-def expandaliases(ui, tree):
-    aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
-    tree = _aliasrules.expand(aliases, tree)
-    # warn about problematic (but not referred) aliases
-    for name, alias in sorted(aliases.iteritems()):
-        if alias.error and not alias.warned:
-            ui.warn(_('warning: %s\n') % (alias.error))
-            alias.warned = True
-    return tree
-
-def foldconcat(tree):
-    """Fold elements to be concatenated by `##`
-    """
-    if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
-        return tree
-    if tree[0] == '_concat':
-        pending = [tree]
-        l = []
-        while pending:
-            e = pending.pop()
-            if e[0] == '_concat':
-                pending.extend(reversed(e[1:]))
-            elif e[0] in ('string', 'symbol'):
-                l.append(e[1])
-            else:
-                msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
-                raise error.ParseError(msg)
-        return ('string', ''.join(l))
-    else:
-        return tuple(foldconcat(t) for t in tree)
-
-def parse(spec, lookup=None):
-    return _parsewith(spec, lookup=lookup)
-
 def posttreebuilthook(tree, repo):
     # hook for extensions to execute code on the optimized tree
     pass
@@ -2801,15 +2254,16 @@
     if repo:
         lookup = repo.__contains__
     if len(specs) == 1:
-        tree = parse(specs[0], lookup)
+        tree = revsetlang.parse(specs[0], lookup)
     else:
-        tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
+        tree = ('or',
+                ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
 
     if ui:
-        tree = expandaliases(ui, tree)
-    tree = foldconcat(tree)
-    tree = analyze(tree, order)
-    tree = optimize(tree)
+        tree = revsetlang.expandaliases(ui, tree)
+    tree = revsetlang.foldconcat(tree)
+    tree = revsetlang.analyze(tree, order)
+    tree = revsetlang.optimize(tree)
     posttreebuilthook(tree, repo)
     return makematcher(tree)
 
@@ -2818,1089 +2272,9 @@
     def mfunc(repo, subset=None):
         if subset is None:
             subset = fullreposet(repo)
-        if util.safehasattr(subset, 'isascending'):
-            result = getset(repo, subset, tree)
-        else:
-            result = getset(repo, baseset(subset), tree)
-        return result
+        return getset(repo, subset, tree)
     return mfunc
 
-def formatspec(expr, *args):
-    '''
-    This is a convenience function for using revsets internally, and
-    escapes arguments appropriately. Aliases are intentionally ignored
-    so that intended expression behavior isn't accidentally subverted.
-
-    Supported arguments:
-
-    %r = revset expression, parenthesized
-    %d = int(arg), no quoting
-    %s = string(arg), escaped and single-quoted
-    %b = arg.branch(), escaped and single-quoted
-    %n = hex(arg), single-quoted
-    %% = a literal '%'
-
-    Prefixing the type with 'l' specifies a parenthesized list of that type.
-
-    >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
-    '(10 or 11):: and ((this()) or (that()))'
-    >>> formatspec('%d:: and not %d::', 10, 20)
-    '10:: and not 20::'
-    >>> formatspec('%ld or %ld', [], [1])
-    "_list('') or 1"
-    >>> formatspec('keyword(%s)', 'foo\\xe9')
-    "keyword('foo\\\\xe9')"
-    >>> b = lambda: 'default'
-    >>> b.branch = b
-    >>> formatspec('branch(%b)', b)
-    "branch('default')"
-    >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
-    "root(_list('a\\x00b\\x00c\\x00d'))"
-    '''
-
-    def quote(s):
-        return repr(str(s))
-
-    def argtype(c, arg):
-        if c == 'd':
-            return str(int(arg))
-        elif c == 's':
-            return quote(arg)
-        elif c == 'r':
-            parse(arg) # make sure syntax errors are confined
-            return '(%s)' % arg
-        elif c == 'n':
-            return quote(node.hex(arg))
-        elif c == 'b':
-            return quote(arg.branch())
-
-    def listexp(s, t):
-        l = len(s)
-        if l == 0:
-            return "_list('')"
-        elif l == 1:
-            return argtype(t, s[0])
-        elif t == 'd':
-            return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
-        elif t == 's':
-            return "_list('%s')" % "\0".join(s)
-        elif t == 'n':
-            return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
-        elif t == 'b':
-            return "_list('%s')" % "\0".join(a.branch() for a in s)
-
-        m = l // 2
-        return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
-
-    ret = ''
-    pos = 0
-    arg = 0
-    while pos < len(expr):
-        c = expr[pos]
-        if c == '%':
-            pos += 1
-            d = expr[pos]
-            if d == '%':
-                ret += d
-            elif d in 'dsnbr':
-                ret += argtype(d, args[arg])
-                arg += 1
-            elif d == 'l':
-                # a list of some type
-                pos += 1
-                d = expr[pos]
-                ret += listexp(list(args[arg]), d)
-                arg += 1
-            else:
-                raise error.Abort(_('unexpected revspec format character %s')
-                                  % d)
-        else:
-            ret += c
-        pos += 1
-
-    return ret
-
-def prettyformat(tree):
-    return parser.prettyformat(tree, ('string', 'symbol'))
-
-def depth(tree):
-    if isinstance(tree, tuple):
-        return max(map(depth, tree)) + 1
-    else:
-        return 0
-
-def funcsused(tree):
-    if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
-        return set()
-    else:
-        funcs = set()
-        for s in tree[1:]:
-            funcs |= funcsused(s)
-        if tree[0] == 'func':
-            funcs.add(tree[1][1])
-        return funcs
-
-def _formatsetrepr(r):
-    """Format an optional printable representation of a set
-
-    ========  =================================
-    type(r)   example
-    ========  =================================
-    tuple     ('<not %r>', other)
-    str       '<branch closed>'
-    callable  lambda: '<branch %r>' % sorted(b)
-    object    other
-    ========  =================================
-    """
-    if r is None:
-        return ''
-    elif isinstance(r, tuple):
-        return r[0] % r[1:]
-    elif isinstance(r, str):
-        return r
-    elif callable(r):
-        return r()
-    else:
-        return repr(r)
-
-class abstractsmartset(object):
-
-    def __nonzero__(self):
-        """True if the smartset is not empty"""
-        raise NotImplementedError()
-
-    def __contains__(self, rev):
-        """provide fast membership testing"""
-        raise NotImplementedError()
-
-    def __iter__(self):
-        """iterate the set in the order it is supposed to be iterated"""
-        raise NotImplementedError()
-
-    # Attributes containing a function to perform a fast iteration in a given
-    # direction. A smartset can have none, one, or both defined.
-    #
-    # Default value is None instead of a function returning None to avoid
-    # initializing an iterator just for testing if a fast method exists.
-    fastasc = None
-    fastdesc = None
-
-    def isascending(self):
-        """True if the set will iterate in ascending order"""
-        raise NotImplementedError()
-
-    def isdescending(self):
-        """True if the set will iterate in descending order"""
-        raise NotImplementedError()
-
-    def istopo(self):
-        """True if the set will iterate in topographical order"""
-        raise NotImplementedError()
-
-    def min(self):
-        """return the minimum element in the set"""
-        if self.fastasc is None:
-            v = min(self)
-        else:
-            for v in self.fastasc():
-                break
-            else:
-                raise ValueError('arg is an empty sequence')
-        self.min = lambda: v
-        return v
-
-    def max(self):
-        """return the maximum element in the set"""
-        if self.fastdesc is None:
-            return max(self)
-        else:
-            for v in self.fastdesc():
-                break
-            else:
-                raise ValueError('arg is an empty sequence')
-        self.max = lambda: v
-        return v
-
-    def first(self):
-        """return the first element in the set (user iteration perspective)
-
-        Return None if the set is empty"""
-        raise NotImplementedError()
-
-    def last(self):
-        """return the last element in the set (user iteration perspective)
-
-        Return None if the set is empty"""
-        raise NotImplementedError()
-
-    def __len__(self):
-        """return the length of the smartsets
-
-        This can be expensive on smartset that could be lazy otherwise."""
-        raise NotImplementedError()
-
-    def reverse(self):
-        """reverse the expected iteration order"""
-        raise NotImplementedError()
-
-    def sort(self, reverse=True):
-        """get the set to iterate in an ascending or descending order"""
-        raise NotImplementedError()
-
-    def __and__(self, other):
-        """Returns a new object with the intersection of the two collections.
-
-        This is part of the mandatory API for smartset."""
-        if isinstance(other, fullreposet):
-            return self
-        return self.filter(other.__contains__, condrepr=other, cache=False)
-
-    def __add__(self, other):
-        """Returns a new object with the union of the two collections.
-
-        This is part of the mandatory API for smartset."""
-        return addset(self, other)
-
-    def __sub__(self, other):
-        """Returns a new object with the substraction of the two collections.
-
-        This is part of the mandatory API for smartset."""
-        c = other.__contains__
-        return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
-                           cache=False)
-
-    def filter(self, condition, condrepr=None, cache=True):
-        """Returns this smartset filtered by condition as a new smartset.
-
-        `condition` is a callable which takes a revision number and returns a
-        boolean. Optional `condrepr` provides a printable representation of
-        the given `condition`.
-
-        This is part of the mandatory API for smartset."""
-        # builtin cannot be cached. but do not needs to
-        if cache and util.safehasattr(condition, 'func_code'):
-            condition = util.cachefunc(condition)
-        return filteredset(self, condition, condrepr)
-
-class baseset(abstractsmartset):
-    """Basic data structure that represents a revset and contains the basic
-    operation that it should be able to perform.
-
-    Every method in this class should be implemented by any smartset class.
-    """
-    def __init__(self, data=(), datarepr=None, istopo=False):
-        """
-        datarepr: a tuple of (format, obj, ...), a function or an object that
-                  provides a printable representation of the given data.
-        """
-        self._ascending = None
-        self._istopo = istopo
-        if not isinstance(data, list):
-            if isinstance(data, set):
-                self._set = data
-                # set has no order we pick one for stability purpose
-                self._ascending = True
-            data = list(data)
-        self._list = data
-        self._datarepr = datarepr
-
-    @util.propertycache
-    def _set(self):
-        return set(self._list)
-
-    @util.propertycache
-    def _asclist(self):
-        asclist = self._list[:]
-        asclist.sort()
-        return asclist
-
-    def __iter__(self):
-        if self._ascending is None:
-            return iter(self._list)
-        elif self._ascending:
-            return iter(self._asclist)
-        else:
-            return reversed(self._asclist)
-
-    def fastasc(self):
-        return iter(self._asclist)
-
-    def fastdesc(self):
-        return reversed(self._asclist)
-
-    @util.propertycache
-    def __contains__(self):
-        return self._set.__contains__
-
-    def __nonzero__(self):
-        return bool(self._list)
-
-    def sort(self, reverse=False):
-        self._ascending = not bool(reverse)
-        self._istopo = False
-
-    def reverse(self):
-        if self._ascending is None:
-            self._list.reverse()
-        else:
-            self._ascending = not self._ascending
-        self._istopo = False
-
-    def __len__(self):
-        return len(self._list)
-
-    def isascending(self):
-        """Returns True if the collection is ascending order, False if not.
-
-        This is part of the mandatory API for smartset."""
-        if len(self) <= 1:
-            return True
-        return self._ascending is not None and self._ascending
-
-    def isdescending(self):
-        """Returns True if the collection is descending order, False if not.
-
-        This is part of the mandatory API for smartset."""
-        if len(self) <= 1:
-            return True
-        return self._ascending is not None and not self._ascending
-
-    def istopo(self):
-        """Is the collection is in topographical order or not.
-
-        This is part of the mandatory API for smartset."""
-        if len(self) <= 1:
-            return True
-        return self._istopo
-
-    def first(self):
-        if self:
-            if self._ascending is None:
-                return self._list[0]
-            elif self._ascending:
-                return self._asclist[0]
-            else:
-                return self._asclist[-1]
-        return None
-
-    def last(self):
-        if self:
-            if self._ascending is None:
-                return self._list[-1]
-            elif self._ascending:
-                return self._asclist[-1]
-            else:
-                return self._asclist[0]
-        return None
-
-    def __repr__(self):
-        d = {None: '', False: '-', True: '+'}[self._ascending]
-        s = _formatsetrepr(self._datarepr)
-        if not s:
-            l = self._list
-            # if _list has been built from a set, it might have a different
-            # order from one python implementation to another.
-            # We fallback to the sorted version for a stable output.
-            if self._ascending is not None:
-                l = self._asclist
-            s = repr(l)
-        return '<%s%s %s>' % (type(self).__name__, d, s)
-
-class filteredset(abstractsmartset):
-    """Duck type for baseset class which iterates lazily over the revisions in
-    the subset and contains a function which tests for membership in the
-    revset
-    """
-    def __init__(self, subset, condition=lambda x: True, condrepr=None):
-        """
-        condition: a function that decide whether a revision in the subset
-                   belongs to the revset or not.
-        condrepr: a tuple of (format, obj, ...), a function or an object that
-                  provides a printable representation of the given condition.
-        """
-        self._subset = subset
-        self._condition = condition
-        self._condrepr = condrepr
-
-    def __contains__(self, x):
-        return x in self._subset and self._condition(x)
-
-    def __iter__(self):
-        return self._iterfilter(self._subset)
-
-    def _iterfilter(self, it):
-        cond = self._condition
-        for x in it:
-            if cond(x):
-                yield x
-
-    @property
-    def fastasc(self):
-        it = self._subset.fastasc
-        if it is None:
-            return None
-        return lambda: self._iterfilter(it())
-
-    @property
-    def fastdesc(self):
-        it = self._subset.fastdesc
-        if it is None:
-            return None
-        return lambda: self._iterfilter(it())
-
-    def __nonzero__(self):
-        fast = None
-        candidates = [self.fastasc if self.isascending() else None,
-                      self.fastdesc if self.isdescending() else None,
-                      self.fastasc,
-                      self.fastdesc]
-        for candidate in candidates:
-            if candidate is not None:
-                fast = candidate
-                break
-
-        if fast is not None:
-            it = fast()
-        else:
-            it = self
-
-        for r in it:
-            return True
-        return False
-
-    def __len__(self):
-        # Basic implementation to be changed in future patches.
-        # until this gets improved, we use generator expression
-        # here, since list comprehensions are free to call __len__ again
-        # causing infinite recursion
-        l = baseset(r for r in self)
-        return len(l)
-
-    def sort(self, reverse=False):
-        self._subset.sort(reverse=reverse)
-
-    def reverse(self):
-        self._subset.reverse()
-
-    def isascending(self):
-        return self._subset.isascending()
-
-    def isdescending(self):
-        return self._subset.isdescending()
-
-    def istopo(self):
-        return self._subset.istopo()
-
-    def first(self):
-        for x in self:
-            return x
-        return None
-
-    def last(self):
-        it = None
-        if self.isascending():
-            it = self.fastdesc
-        elif self.isdescending():
-            it = self.fastasc
-        if it is not None:
-            for x in it():
-                return x
-            return None #empty case
-        else:
-            x = None
-            for x in self:
-                pass
-            return x
-
-    def __repr__(self):
-        xs = [repr(self._subset)]
-        s = _formatsetrepr(self._condrepr)
-        if s:
-            xs.append(s)
-        return '<%s %s>' % (type(self).__name__, ', '.join(xs))
-
-def _iterordered(ascending, iter1, iter2):
-    """produce an ordered iteration from two iterators with the same order
-
-    The ascending is used to indicated the iteration direction.
-    """
-    choice = max
-    if ascending:
-        choice = min
-
-    val1 = None
-    val2 = None
-    try:
-        # Consume both iterators in an ordered way until one is empty
-        while True:
-            if val1 is None:
-                val1 = next(iter1)
-            if val2 is None:
-                val2 = next(iter2)
-            n = choice(val1, val2)
-            yield n
-            if val1 == n:
-                val1 = None
-            if val2 == n:
-                val2 = None
-    except StopIteration:
-        # Flush any remaining values and consume the other one
-        it = iter2
-        if val1 is not None:
-            yield val1
-            it = iter1
-        elif val2 is not None:
-            # might have been equality and both are empty
-            yield val2
-        for val in it:
-            yield val
-
-class addset(abstractsmartset):
-    """Represent the addition of two sets
-
-    Wrapper structure for lazily adding two structures without losing much
-    performance on the __contains__ method
-
-    If the ascending attribute is set, that means the two structures are
-    ordered in either an ascending or descending way. Therefore, we can add
-    them maintaining the order by iterating over both at the same time
-
-    >>> xs = baseset([0, 3, 2])
-    >>> ys = baseset([5, 2, 4])
-
-    >>> rs = addset(xs, ys)
-    >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
-    (True, True, False, True, 0, 4)
-    >>> rs = addset(xs, baseset([]))
-    >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
-    (True, True, False, 0, 2)
-    >>> rs = addset(baseset([]), baseset([]))
-    >>> bool(rs), 0 in rs, rs.first(), rs.last()
-    (False, False, None, None)
-
-    iterate unsorted:
-    >>> rs = addset(xs, ys)
-    >>> # (use generator because pypy could call len())
-    >>> list(x for x in rs)  # without _genlist
-    [0, 3, 2, 5, 4]
-    >>> assert not rs._genlist
-    >>> len(rs)
-    5
-    >>> [x for x in rs]  # with _genlist
-    [0, 3, 2, 5, 4]
-    >>> assert rs._genlist
-
-    iterate ascending:
-    >>> rs = addset(xs, ys, ascending=True)
-    >>> # (use generator because pypy could call len())
-    >>> list(x for x in rs), list(x for x in rs.fastasc())  # without _asclist
-    ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
-    >>> assert not rs._asclist
-    >>> len(rs)
-    5
-    >>> [x for x in rs], [x for x in rs.fastasc()]
-    ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
-    >>> assert rs._asclist
-
-    iterate descending:
-    >>> rs = addset(xs, ys, ascending=False)
-    >>> # (use generator because pypy could call len())
-    >>> list(x for x in rs), list(x for x in rs.fastdesc())  # without _asclist
-    ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
-    >>> assert not rs._asclist
-    >>> len(rs)
-    5
-    >>> [x for x in rs], [x for x in rs.fastdesc()]
-    ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
-    >>> assert rs._asclist
-
-    iterate ascending without fastasc:
-    >>> rs = addset(xs, generatorset(ys), ascending=True)
-    >>> assert rs.fastasc is None
-    >>> [x for x in rs]
-    [0, 2, 3, 4, 5]
-
-    iterate descending without fastdesc:
-    >>> rs = addset(generatorset(xs), ys, ascending=False)
-    >>> assert rs.fastdesc is None
-    >>> [x for x in rs]
-    [5, 4, 3, 2, 0]
-    """
-    def __init__(self, revs1, revs2, ascending=None):
-        self._r1 = revs1
-        self._r2 = revs2
-        self._iter = None
-        self._ascending = ascending
-        self._genlist = None
-        self._asclist = None
-
-    def __len__(self):
-        return len(self._list)
-
-    def __nonzero__(self):
-        return bool(self._r1) or bool(self._r2)
-
-    @util.propertycache
-    def _list(self):
-        if not self._genlist:
-            self._genlist = baseset(iter(self))
-        return self._genlist
-
-    def __iter__(self):
-        """Iterate over both collections without repeating elements
-
-        If the ascending attribute is not set, iterate over the first one and
-        then over the second one checking for membership on the first one so we
-        dont yield any duplicates.
-
-        If the ascending attribute is set, iterate over both collections at the
-        same time, yielding only one value at a time in the given order.
-        """
-        if self._ascending is None:
-            if self._genlist:
-                return iter(self._genlist)
-            def arbitraryordergen():
-                for r in self._r1:
-                    yield r
-                inr1 = self._r1.__contains__
-                for r in self._r2:
-                    if not inr1(r):
-                        yield r
-            return arbitraryordergen()
-        # try to use our own fast iterator if it exists
-        self._trysetasclist()
-        if self._ascending:
-            attr = 'fastasc'
-        else:
-            attr = 'fastdesc'
-        it = getattr(self, attr)
-        if it is not None:
-            return it()
-        # maybe half of the component supports fast
-        # get iterator for _r1
-        iter1 = getattr(self._r1, attr)
-        if iter1 is None:
-            # let's avoid side effect (not sure it matters)
-            iter1 = iter(sorted(self._r1, reverse=not self._ascending))
-        else:
-            iter1 = iter1()
-        # get iterator for _r2
-        iter2 = getattr(self._r2, attr)
-        if iter2 is None:
-            # let's avoid side effect (not sure it matters)
-            iter2 = iter(sorted(self._r2, reverse=not self._ascending))
-        else:
-            iter2 = iter2()
-        return _iterordered(self._ascending, iter1, iter2)
-
-    def _trysetasclist(self):
-        """populate the _asclist attribute if possible and necessary"""
-        if self._genlist is not None and self._asclist is None:
-            self._asclist = sorted(self._genlist)
-
-    @property
-    def fastasc(self):
-        self._trysetasclist()
-        if self._asclist is not None:
-            return self._asclist.__iter__
-        iter1 = self._r1.fastasc
-        iter2 = self._r2.fastasc
-        if None in (iter1, iter2):
-            return None
-        return lambda: _iterordered(True, iter1(), iter2())
-
-    @property
-    def fastdesc(self):
-        self._trysetasclist()
-        if self._asclist is not None:
-            return self._asclist.__reversed__
-        iter1 = self._r1.fastdesc
-        iter2 = self._r2.fastdesc
-        if None in (iter1, iter2):
-            return None
-        return lambda: _iterordered(False, iter1(), iter2())
-
-    def __contains__(self, x):
-        return x in self._r1 or x in self._r2
-
-    def sort(self, reverse=False):
-        """Sort the added set
-
-        For this we use the cached list with all the generated values and if we
-        know they are ascending or descending we can sort them in a smart way.
-        """
-        self._ascending = not reverse
-
-    def isascending(self):
-        return self._ascending is not None and self._ascending
-
-    def isdescending(self):
-        return self._ascending is not None and not self._ascending
-
-    def istopo(self):
-        # not worth the trouble asserting if the two sets combined are still
-        # in topographical order. Use the sort() predicate to explicitly sort
-        # again instead.
-        return False
-
-    def reverse(self):
-        if self._ascending is None:
-            self._list.reverse()
-        else:
-            self._ascending = not self._ascending
-
-    def first(self):
-        for x in self:
-            return x
-        return None
-
-    def last(self):
-        self.reverse()
-        val = self.first()
-        self.reverse()
-        return val
-
-    def __repr__(self):
-        d = {None: '', False: '-', True: '+'}[self._ascending]
-        return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
-
-class generatorset(abstractsmartset):
-    """Wrap a generator for lazy iteration
-
-    Wrapper structure for generators that provides lazy membership and can
-    be iterated more than once.
-    When asked for membership it generates values until either it finds the
-    requested one or has gone through all the elements in the generator
-    """
-    def __init__(self, gen, iterasc=None):
-        """
-        gen: a generator producing the values for the generatorset.
-        """
-        self._gen = gen
-        self._asclist = None
-        self._cache = {}
-        self._genlist = []
-        self._finished = False
-        self._ascending = True
-        if iterasc is not None:
-            if iterasc:
-                self.fastasc = self._iterator
-                self.__contains__ = self._asccontains
-            else:
-                self.fastdesc = self._iterator
-                self.__contains__ = self._desccontains
-
-    def __nonzero__(self):
-        # Do not use 'for r in self' because it will enforce the iteration
-        # order (default ascending), possibly unrolling a whole descending
-        # iterator.
-        if self._genlist:
-            return True
-        for r in self._consumegen():
-            return True
-        return False
-
-    def __contains__(self, x):
-        if x in self._cache:
-            return self._cache[x]
-
-        # Use new values only, as existing values would be cached.
-        for l in self._consumegen():
-            if l == x:
-                return True
-
-        self._cache[x] = False
-        return False
-
-    def _asccontains(self, x):
-        """version of contains optimised for ascending generator"""
-        if x in self._cache:
-            return self._cache[x]
-
-        # Use new values only, as existing values would be cached.
-        for l in self._consumegen():
-            if l == x:
-                return True
-            if l > x:
-                break
-
-        self._cache[x] = False
-        return False
-
-    def _desccontains(self, x):
-        """version of contains optimised for descending generator"""
-        if x in self._cache:
-            return self._cache[x]
-
-        # Use new values only, as existing values would be cached.
-        for l in self._consumegen():
-            if l == x:
-                return True
-            if l < x:
-                break
-
-        self._cache[x] = False
-        return False
-
-    def __iter__(self):
-        if self._ascending:
-            it = self.fastasc
-        else:
-            it = self.fastdesc
-        if it is not None:
-            return it()
-        # we need to consume the iterator
-        for x in self._consumegen():
-            pass
-        # recall the same code
-        return iter(self)
-
-    def _iterator(self):
-        if self._finished:
-            return iter(self._genlist)
-
-        # We have to use this complex iteration strategy to allow multiple
-        # iterations at the same time. We need to be able to catch revision
-        # removed from _consumegen and added to genlist in another instance.
-        #
-        # Getting rid of it would provide an about 15% speed up on this
-        # iteration.
-        genlist = self._genlist
-        nextrev = self._consumegen().next
-        _len = len # cache global lookup
-        def gen():
-            i = 0
-            while True:
-                if i < _len(genlist):
-                    yield genlist[i]
-                else:
-                    yield nextrev()
-                i += 1
-        return gen()
-
-    def _consumegen(self):
-        cache = self._cache
-        genlist = self._genlist.append
-        for item in self._gen:
-            cache[item] = True
-            genlist(item)
-            yield item
-        if not self._finished:
-            self._finished = True
-            asc = self._genlist[:]
-            asc.sort()
-            self._asclist = asc
-            self.fastasc = asc.__iter__
-            self.fastdesc = asc.__reversed__
-
-    def __len__(self):
-        for x in self._consumegen():
-            pass
-        return len(self._genlist)
-
-    def sort(self, reverse=False):
-        self._ascending = not reverse
-
-    def reverse(self):
-        self._ascending = not self._ascending
-
-    def isascending(self):
-        return self._ascending
-
-    def isdescending(self):
-        return not self._ascending
-
-    def istopo(self):
-        # not worth the trouble asserting if the two sets combined are still
-        # in topographical order. Use the sort() predicate to explicitly sort
-        # again instead.
-        return False
-
-    def first(self):
-        if self._ascending:
-            it = self.fastasc
-        else:
-            it = self.fastdesc
-        if it is None:
-            # we need to consume all and try again
-            for x in self._consumegen():
-                pass
-            return self.first()
-        return next(it(), None)
-
-    def last(self):
-        if self._ascending:
-            it = self.fastdesc
-        else:
-            it = self.fastasc
-        if it is None:
-            # we need to consume all and try again
-            for x in self._consumegen():
-                pass
-            return self.first()
-        return next(it(), None)
-
-    def __repr__(self):
-        d = {False: '-', True: '+'}[self._ascending]
-        return '<%s%s>' % (type(self).__name__, d)
-
-class spanset(abstractsmartset):
-    """Duck type for baseset class which represents a range of revisions and
-    can work lazily and without having all the range in memory
-
-    Note that spanset(x, y) behave almost like xrange(x, y) except for two
-    notable points:
-    - when x < y it will be automatically descending,
-    - revision filtered with this repoview will be skipped.
-
-    """
-    def __init__(self, repo, start=0, end=None):
-        """
-        start: first revision included the set
-               (default to 0)
-        end:   first revision excluded (last+1)
-               (default to len(repo)
-
-        Spanset will be descending if `end` < `start`.
-        """
-        if end is None:
-            end = len(repo)
-        self._ascending = start <= end
-        if not self._ascending:
-            start, end = end + 1, start +1
-        self._start = start
-        self._end = end
-        self._hiddenrevs = repo.changelog.filteredrevs
-
-    def sort(self, reverse=False):
-        self._ascending = not reverse
-
-    def reverse(self):
-        self._ascending = not self._ascending
-
-    def istopo(self):
-        # not worth the trouble asserting if the two sets combined are still
-        # in topographical order. Use the sort() predicate to explicitly sort
-        # again instead.
-        return False
-
-    def _iterfilter(self, iterrange):
-        s = self._hiddenrevs
-        for r in iterrange:
-            if r not in s:
-                yield r
-
-    def __iter__(self):
-        if self._ascending:
-            return self.fastasc()
-        else:
-            return self.fastdesc()
-
-    def fastasc(self):
-        iterrange = xrange(self._start, self._end)
-        if self._hiddenrevs:
-            return self._iterfilter(iterrange)
-        return iter(iterrange)
-
-    def fastdesc(self):
-        iterrange = xrange(self._end - 1, self._start - 1, -1)
-        if self._hiddenrevs:
-            return self._iterfilter(iterrange)
-        return iter(iterrange)
-
-    def __contains__(self, rev):
-        hidden = self._hiddenrevs
-        return ((self._start <= rev < self._end)
-                and not (hidden and rev in hidden))
-
-    def __nonzero__(self):
-        for r in self:
-            return True
-        return False
-
-    def __len__(self):
-        if not self._hiddenrevs:
-            return abs(self._end - self._start)
-        else:
-            count = 0
-            start = self._start
-            end = self._end
-            for rev in self._hiddenrevs:
-                if (end < rev <= start) or (start <= rev < end):
-                    count += 1
-            return abs(self._end - self._start) - count
-
-    def isascending(self):
-        return self._ascending
-
-    def isdescending(self):
-        return not self._ascending
-
-    def first(self):
-        if self._ascending:
-            it = self.fastasc
-        else:
-            it = self.fastdesc
-        for x in it():
-            return x
-        return None
-
-    def last(self):
-        if self._ascending:
-            it = self.fastdesc
-        else:
-            it = self.fastasc
-        for x in it():
-            return x
-        return None
-
-    def __repr__(self):
-        d = {False: '-', True: '+'}[self._ascending]
-        return '<%s%s %d:%d>' % (type(self).__name__, d,
-                                 self._start, self._end - 1)
-
-class fullreposet(spanset):
-    """a set containing all revisions in the repo
-
-    This class exists to host special optimization and magic to handle virtual
-    revisions such as "null".
-    """
-
-    def __init__(self, repo):
-        super(fullreposet, self).__init__(repo)
-
-    def __and__(self, other):
-        """As self contains the whole repo, all of the other set should also be
-        in self. Therefore `self & other = other`.
-
-        This boldly assumes the other contains valid revs only.
-        """
-        # other not a smartset, make is so
-        if not util.safehasattr(other, 'isascending'):
-            # filter out hidden revision
-            # (this boldly assumes all smartset are pure)
-            #
-            # `other` was used with "&", let's assume this is a set like
-            # object.
-            other = baseset(other - self._hiddenrevs)
-
-        other.sort(reverse=self.isdescending())
-        return other
-
-def prettyformatset(revs):
-    lines = []
-    rs = repr(revs)
-    p = 0
-    while p < len(rs):
-        q = rs.find('<', p + 1)
-        if q < 0:
-            q = len(rs)
-        l = rs.count('<', 0, p) - rs.count('>', 0, p)
-        assert l >= 0
-        lines.append((l, rs[p:q].rstrip()))
-        p = q
-    return '\n'.join('  ' * l + s for l, s in lines)
-
 def loadpredicate(ui, extname, registrarobj):
     """Load revset predicates from specified registrarobj
     """
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revsetlang.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,708 @@
+# revsetlang.py - parser, tokenizer and utility for revision set language
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import string
+
+from .i18n import _
+from . import (
+    error,
+    node,
+    parser,
+    pycompat,
+    util,
+)
+
+elements = {
+    # token-type: binding-strength, primary, prefix, infix, suffix
+    "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
+    "##": (20, None, None, ("_concat", 20), None),
+    "~": (18, None, None, ("ancestor", 18), None),
+    "^": (18, None, None, ("parent", 18), "parentpost"),
+    "-": (5, None, ("negate", 19), ("minus", 5), None),
+    "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
+    "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
+    ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
+    "not": (10, None, ("not", 10), None, None),
+    "!": (10, None, ("not", 10), None, None),
+    "and": (5, None, None, ("and", 5), None),
+    "&": (5, None, None, ("and", 5), None),
+    "%": (5, None, None, ("only", 5), "onlypost"),
+    "or": (4, None, None, ("or", 4), None),
+    "|": (4, None, None, ("or", 4), None),
+    "+": (4, None, None, ("or", 4), None),
+    "=": (3, None, None, ("keyvalue", 3), None),
+    ",": (2, None, None, ("list", 2), None),
+    ")": (0, None, None, None, None),
+    "symbol": (0, "symbol", None, None, None),
+    "string": (0, "string", None, None, None),
+    "end": (0, None, None, None, None),
+}
+
+keywords = set(['and', 'or', 'not'])
+
+_quoteletters = set(['"', "'"])
+_simpleopletters = set(pycompat.iterbytestr("():=,-|&+!~^%"))
+
+# default set of valid characters for the initial letter of symbols
+_syminitletters = set(pycompat.iterbytestr(
+    string.ascii_letters.encode('ascii') +
+    string.digits.encode('ascii') +
+    '._@')) | set(map(pycompat.bytechr, xrange(128, 256)))
+
+# default set of valid characters for non-initial letters of symbols
+_symletters = _syminitletters | set(pycompat.iterbytestr('-/'))
+
+def tokenize(program, lookup=None, syminitletters=None, symletters=None):
+    '''
+    Parse a revset statement into a stream of tokens
+
+    ``syminitletters`` is the set of valid characters for the initial
+    letter of symbols.
+
+    By default, character ``c`` is recognized as valid for initial
+    letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
+
+    ``symletters`` is the set of valid characters for non-initial
+    letters of symbols.
+
+    By default, character ``c`` is recognized as valid for non-initial
+    letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
+
+    Check that @ is a valid unquoted token character (issue3686):
+    >>> list(tokenize("@::"))
+    [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
+
+    '''
+    program = pycompat.bytestr(program)
+    if syminitletters is None:
+        syminitletters = _syminitletters
+    if symletters is None:
+        symletters = _symletters
+
+    if program and lookup:
+        # attempt to parse old-style ranges first to deal with
+        # things like old-tag which contain query metacharacters
+        parts = program.split(':', 1)
+        if all(lookup(sym) for sym in parts if sym):
+            if parts[0]:
+                yield ('symbol', parts[0], 0)
+            if len(parts) > 1:
+                s = len(parts[0])
+                yield (':', None, s)
+                if parts[1]:
+                    yield ('symbol', parts[1], s + 1)
+            yield ('end', None, len(program))
+            return
+
+    pos, l = 0, len(program)
+    while pos < l:
+        c = program[pos]
+        if c.isspace(): # skip inter-token whitespace
+            pass
+        elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
+            yield ('::', None, pos)
+            pos += 1 # skip ahead
+        elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
+            yield ('..', None, pos)
+            pos += 1 # skip ahead
+        elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
+            yield ('##', None, pos)
+            pos += 1 # skip ahead
+        elif c in _simpleopletters: # handle simple operators
+            yield (c, None, pos)
+        elif (c in _quoteletters or c == 'r' and
+              program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
+            if c == 'r':
+                pos += 1
+                c = program[pos]
+                decode = lambda x: x
+            else:
+                decode = parser.unescapestr
+            pos += 1
+            s = pos
+            while pos < l: # find closing quote
+                d = program[pos]
+                if d == '\\': # skip over escaped characters
+                    pos += 2
+                    continue
+                if d == c:
+                    yield ('string', decode(program[s:pos]), s)
+                    break
+                pos += 1
+            else:
+                raise error.ParseError(_("unterminated string"), s)
+        # gather up a symbol/keyword
+        elif c in syminitletters:
+            s = pos
+            pos += 1
+            while pos < l: # find end of symbol
+                d = program[pos]
+                if d not in symletters:
+                    break
+                if d == '.' and program[pos - 1] == '.': # special case for ..
+                    pos -= 1
+                    break
+                pos += 1
+            sym = program[s:pos]
+            if sym in keywords: # operator keywords
+                yield (sym, None, s)
+            elif '-' in sym:
+                # some jerk gave us foo-bar-baz, try to check if it's a symbol
+                if lookup and lookup(sym):
+                    # looks like a real symbol
+                    yield ('symbol', sym, s)
+                else:
+                    # looks like an expression
+                    parts = sym.split('-')
+                    for p in parts[:-1]:
+                        if p: # possible consecutive -
+                            yield ('symbol', p, s)
+                        s += len(p)
+                        yield ('-', None, pos)
+                        s += 1
+                    if parts[-1]: # possible trailing -
+                        yield ('symbol', parts[-1], s)
+            else:
+                yield ('symbol', sym, s)
+            pos -= 1
+        else:
+            raise error.ParseError(_("syntax error in revset '%s'") %
+                                   program, pos)
+        pos += 1
+    yield ('end', None, pos)
+
+# helpers
+
+_notset = object()
+
+def getsymbol(x):
+    if x and x[0] == 'symbol':
+        return x[1]
+    raise error.ParseError(_('not a symbol'))
+
+def getstring(x, err):
+    if x and (x[0] == 'string' or x[0] == 'symbol'):
+        return x[1]
+    raise error.ParseError(err)
+
+def getinteger(x, err, default=_notset):
+    if not x and default is not _notset:
+        return default
+    try:
+        return int(getstring(x, err))
+    except ValueError:
+        raise error.ParseError(err)
+
+def getboolean(x, err):
+    value = util.parsebool(getsymbol(x))
+    if value is not None:
+        return value
+    raise error.ParseError(err)
+
+def getlist(x):
+    if not x:
+        return []
+    if x[0] == 'list':
+        return list(x[1:])
+    return [x]
+
+def getrange(x, err):
+    if not x:
+        raise error.ParseError(err)
+    op = x[0]
+    if op == 'range':
+        return x[1], x[2]
+    elif op == 'rangepre':
+        return None, x[1]
+    elif op == 'rangepost':
+        return x[1], None
+    elif op == 'rangeall':
+        return None, None
+    raise error.ParseError(err)
+
+def getargs(x, min, max, err):
+    l = getlist(x)
+    if len(l) < min or (max >= 0 and len(l) > max):
+        raise error.ParseError(err)
+    return l
+
+def getargsdict(x, funcname, keys):
+    return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
+                                keyvaluenode='keyvalue', keynode='symbol')
+
+# Constants for ordering requirement, used in _analyze():
+#
+# If 'define', any nested functions and operations can change the ordering of
+# the entries in the set. If 'follow', any nested functions and operations
+# should take the ordering specified by the first operand to the '&' operator.
+#
+# For instance,
+#
+#   X & (Y | Z)
+#   ^   ^^^^^^^
+#   |   follow
+#   define
+#
+# will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
+# of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
+#
+# 'any' means the order doesn't matter. For instance,
+#
+#   X & !Y
+#        ^
+#        any
+#
+# 'y()' can either enforce its ordering requirement or take the ordering
+# specified by 'x()' because 'not()' doesn't care the order.
+#
+# Transition of ordering requirement:
+#
+# 1. starts with 'define'
+# 2. shifts to 'follow' by 'x & y'
+# 3. changes back to 'define' on function call 'f(x)' or function-like
+#    operation 'x (f) y' because 'f' may have its own ordering requirement
+#    for 'x' and 'y' (e.g. 'first(x)')
+#
+anyorder = 'any'        # don't care the order
+defineorder = 'define'  # should define the order
+followorder = 'follow'  # must follow the current order
+
+# transition table for 'x & y', from the current expression 'x' to 'y'
+_tofolloworder = {
+    anyorder: anyorder,
+    defineorder: followorder,
+    followorder: followorder,
+}
+
+def _matchonly(revs, bases):
+    """
+    >>> f = lambda *args: _matchonly(*map(parse, args))
+    >>> f('ancestors(A)', 'not ancestors(B)')
+    ('list', ('symbol', 'A'), ('symbol', 'B'))
+    """
+    if (revs is not None
+        and revs[0] == 'func'
+        and getsymbol(revs[1]) == 'ancestors'
+        and bases is not None
+        and bases[0] == 'not'
+        and bases[1][0] == 'func'
+        and getsymbol(bases[1][1]) == 'ancestors'):
+        return ('list', revs[2], bases[1][2])
+
+def _fixops(x):
+    """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
+    handled well by our simple top-down parser"""
+    if not isinstance(x, tuple):
+        return x
+
+    op = x[0]
+    if op == 'parent':
+        # x^:y means (x^) : y, not x ^ (:y)
+        # x^:  means (x^) :,   not x ^ (:)
+        post = ('parentpost', x[1])
+        if x[2][0] == 'dagrangepre':
+            return _fixops(('dagrange', post, x[2][1]))
+        elif x[2][0] == 'rangepre':
+            return _fixops(('range', post, x[2][1]))
+        elif x[2][0] == 'rangeall':
+            return _fixops(('rangepost', post))
+    elif op == 'or':
+        # make number of arguments deterministic:
+        # x + y + z -> (or x y z) -> (or (list x y z))
+        return (op, _fixops(('list',) + x[1:]))
+
+    return (op,) + tuple(_fixops(y) for y in x[1:])
+
+def _analyze(x, order):
+    if x is None:
+        return x
+
+    op = x[0]
+    if op == 'minus':
+        return _analyze(('and', x[1], ('not', x[2])), order)
+    elif op == 'only':
+        t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
+        return _analyze(t, order)
+    elif op == 'onlypost':
+        return _analyze(('func', ('symbol', 'only'), x[1]), order)
+    elif op == 'dagrangepre':
+        return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
+    elif op == 'dagrangepost':
+        return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
+    elif op == 'negate':
+        s = getstring(x[1], _("can't negate that"))
+        return _analyze(('string', '-' + s), order)
+    elif op in ('string', 'symbol'):
+        return x
+    elif op == 'and':
+        ta = _analyze(x[1], order)
+        tb = _analyze(x[2], _tofolloworder[order])
+        return (op, ta, tb, order)
+    elif op == 'or':
+        return (op, _analyze(x[1], order), order)
+    elif op == 'not':
+        return (op, _analyze(x[1], anyorder), order)
+    elif op == 'rangeall':
+        return (op, None, order)
+    elif op in ('rangepre', 'rangepost', 'parentpost'):
+        return (op, _analyze(x[1], defineorder), order)
+    elif op == 'group':
+        return _analyze(x[1], order)
+    elif op in ('dagrange', 'range', 'parent', 'ancestor'):
+        ta = _analyze(x[1], defineorder)
+        tb = _analyze(x[2], defineorder)
+        return (op, ta, tb, order)
+    elif op == 'list':
+        return (op,) + tuple(_analyze(y, order) for y in x[1:])
+    elif op == 'keyvalue':
+        return (op, x[1], _analyze(x[2], order))
+    elif op == 'func':
+        f = getsymbol(x[1])
+        d = defineorder
+        if f == 'present':
+            # 'present(set)' is known to return the argument set with no
+            # modification, so forward the current order to its argument
+            d = order
+        return (op, x[1], _analyze(x[2], d), order)
+    raise ValueError('invalid operator %r' % op)
+
+def analyze(x, order=defineorder):
+    """Transform raw parsed tree to evaluatable tree which can be fed to
+    optimize() or getset()
+
+    All pseudo operations should be mapped to real operations or functions
+    defined in methods or symbols table respectively.
+
+    'order' specifies how the current expression 'x' is ordered (see the
+    constants defined above.)
+    """
+    return _analyze(x, order)
+
+def _optimize(x, small):
+    if x is None:
+        return 0, x
+
+    smallbonus = 1
+    if small:
+        smallbonus = .5
+
+    op = x[0]
+    if op in ('string', 'symbol'):
+        return smallbonus, x # single revisions are small
+    elif op == 'and':
+        wa, ta = _optimize(x[1], True)
+        wb, tb = _optimize(x[2], True)
+        order = x[3]
+        w = min(wa, wb)
+
+        # (::x and not ::y)/(not ::y and ::x) have a fast path
+        tm = _matchonly(ta, tb) or _matchonly(tb, ta)
+        if tm:
+            return w, ('func', ('symbol', 'only'), tm, order)
+
+        if tb is not None and tb[0] == 'not':
+            return wa, ('difference', ta, tb[1], order)
+
+        if wa > wb:
+            return w, (op, tb, ta, order)
+        return w, (op, ta, tb, order)
+    elif op == 'or':
+        # fast path for machine-generated expression, that is likely to have
+        # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
+        order = x[2]
+        ws, ts, ss = [], [], []
+        def flushss():
+            if not ss:
+                return
+            if len(ss) == 1:
+                w, t = ss[0]
+            else:
+                s = '\0'.join(t[1] for w, t in ss)
+                y = ('func', ('symbol', '_list'), ('string', s), order)
+                w, t = _optimize(y, False)
+            ws.append(w)
+            ts.append(t)
+            del ss[:]
+        for y in getlist(x[1]):
+            w, t = _optimize(y, False)
+            if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
+                ss.append((w, t))
+                continue
+            flushss()
+            ws.append(w)
+            ts.append(t)
+        flushss()
+        if len(ts) == 1:
+            return ws[0], ts[0] # 'or' operation is fully optimized out
+        if order != defineorder:
+            # reorder by weight only when f(a + b) == f(b + a)
+            ts = [wt[1] for wt in sorted(zip(ws, ts), key=lambda wt: wt[0])]
+        return max(ws), (op, ('list',) + tuple(ts), order)
+    elif op == 'not':
+        # Optimize not public() to _notpublic() because we have a fast version
+        if x[1][:3] == ('func', ('symbol', 'public'), None):
+            order = x[1][3]
+            newsym = ('func', ('symbol', '_notpublic'), None, order)
+            o = _optimize(newsym, not small)
+            return o[0], o[1]
+        else:
+            o = _optimize(x[1], not small)
+            order = x[2]
+            return o[0], (op, o[1], order)
+    elif op == 'rangeall':
+        return smallbonus, x
+    elif op in ('rangepre', 'rangepost', 'parentpost'):
+        o = _optimize(x[1], small)
+        order = x[2]
+        return o[0], (op, o[1], order)
+    elif op in ('dagrange', 'range', 'parent', 'ancestor'):
+        wa, ta = _optimize(x[1], small)
+        wb, tb = _optimize(x[2], small)
+        order = x[3]
+        return wa + wb, (op, ta, tb, order)
+    elif op == 'list':
+        ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
+        return sum(ws), (op,) + ts
+    elif op == 'keyvalue':
+        w, t = _optimize(x[2], small)
+        return w, (op, x[1], t)
+    elif op == 'func':
+        f = getsymbol(x[1])
+        wa, ta = _optimize(x[2], small)
+        if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
+                 'keyword', 'outgoing', 'user', 'destination'):
+            w = 10 # slow
+        elif f in ('modifies', 'adds', 'removes'):
+            w = 30 # slower
+        elif f == "contains":
+            w = 100 # very slow
+        elif f == "ancestor":
+            w = 1 * smallbonus
+        elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
+            w = 0
+        elif f == "sort":
+            w = 10 # assume most sorts look at changelog
+        else:
+            w = 1
+        order = x[3]
+        return w + wa, (op, x[1], ta, order)
+    raise ValueError('invalid operator %r' % op)
+
+def optimize(tree):
+    """Optimize evaluatable tree
+
+    All pseudo operations should be transformed beforehand.
+    """
+    _weight, newtree = _optimize(tree, small=True)
+    return newtree
+
+# the set of valid characters for the initial letter of symbols in
+# alias declarations and definitions
+_aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
+
+def _parsewith(spec, lookup=None, syminitletters=None):
+    """Generate a parse tree of given spec with given tokenizing options
+
+    >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
+    ('func', ('symbol', 'foo'), ('symbol', '$1'))
+    >>> _parsewith('$1')
+    Traceback (most recent call last):
+      ...
+    ParseError: ("syntax error in revset '$1'", 0)
+    >>> _parsewith('foo bar')
+    Traceback (most recent call last):
+      ...
+    ParseError: ('invalid token', 4)
+    """
+    p = parser.parser(elements)
+    tree, pos = p.parse(tokenize(spec, lookup=lookup,
+                                 syminitletters=syminitletters))
+    if pos != len(spec):
+        raise error.ParseError(_('invalid token'), pos)
+    return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
+
+class _aliasrules(parser.basealiasrules):
+    """Parsing and expansion rule set of revset aliases"""
+    _section = _('revset alias')
+
+    @staticmethod
+    def _parse(spec):
+        """Parse alias declaration/definition ``spec``
+
+        This allows symbol names to use also ``$`` as an initial letter
+        (for backward compatibility), and callers of this function should
+        examine whether ``$`` is used also for unexpected symbols or not.
+        """
+        return _parsewith(spec, syminitletters=_aliassyminitletters)
+
+    @staticmethod
+    def _trygetfunc(tree):
+        if tree[0] == 'func' and tree[1][0] == 'symbol':
+            return tree[1][1], getlist(tree[2])
+
+def expandaliases(ui, tree):
+    aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
+    tree = _aliasrules.expand(aliases, tree)
+    # warn about problematic (but not referred) aliases
+    for name, alias in sorted(aliases.iteritems()):
+        if alias.error and not alias.warned:
+            ui.warn(_('warning: %s\n') % (alias.error))
+            alias.warned = True
+    return tree
+
+def foldconcat(tree):
+    """Fold elements to be concatenated by `##`
+    """
+    if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
+        return tree
+    if tree[0] == '_concat':
+        pending = [tree]
+        l = []
+        while pending:
+            e = pending.pop()
+            if e[0] == '_concat':
+                pending.extend(reversed(e[1:]))
+            elif e[0] in ('string', 'symbol'):
+                l.append(e[1])
+            else:
+                msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
+                raise error.ParseError(msg)
+        return ('string', ''.join(l))
+    else:
+        return tuple(foldconcat(t) for t in tree)
+
+def parse(spec, lookup=None):
+    return _parsewith(spec, lookup=lookup)
+
+def _quote(s):
+    r"""Quote a value in order to make it safe for the revset engine.
+
+    >>> _quote('asdf')
+    "'asdf'"
+    >>> _quote("asdf'\"")
+    '\'asdf\\\'"\''
+    >>> _quote('asdf\'')
+    "'asdf\\''"
+    >>> _quote(1)
+    "'1'"
+    """
+    return "'%s'" % util.escapestr(pycompat.bytestr(s))
+
+def formatspec(expr, *args):
+    '''
+    This is a convenience function for using revsets internally, and
+    escapes arguments appropriately. Aliases are intentionally ignored
+    so that intended expression behavior isn't accidentally subverted.
+
+    Supported arguments:
+
+    %r = revset expression, parenthesized
+    %d = int(arg), no quoting
+    %s = string(arg), escaped and single-quoted
+    %b = arg.branch(), escaped and single-quoted
+    %n = hex(arg), single-quoted
+    %% = a literal '%'
+
+    Prefixing the type with 'l' specifies a parenthesized list of that type.
+
+    >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
+    '(10 or 11):: and ((this()) or (that()))'
+    >>> formatspec('%d:: and not %d::', 10, 20)
+    '10:: and not 20::'
+    >>> formatspec('%ld or %ld', [], [1])
+    "_list('') or 1"
+    >>> formatspec('keyword(%s)', 'foo\\xe9')
+    "keyword('foo\\\\xe9')"
+    >>> b = lambda: 'default'
+    >>> b.branch = b
+    >>> formatspec('branch(%b)', b)
+    "branch('default')"
+    >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
+    "root(_list('a\\x00b\\x00c\\x00d'))"
+    '''
+
+    def argtype(c, arg):
+        if c == 'd':
+            return '%d' % int(arg)
+        elif c == 's':
+            return _quote(arg)
+        elif c == 'r':
+            parse(arg) # make sure syntax errors are confined
+            return '(%s)' % arg
+        elif c == 'n':
+            return _quote(node.hex(arg))
+        elif c == 'b':
+            return _quote(arg.branch())
+
+    def listexp(s, t):
+        l = len(s)
+        if l == 0:
+            return "_list('')"
+        elif l == 1:
+            return argtype(t, s[0])
+        elif t == 'd':
+            return "_intlist('%s')" % "\0".join('%d' % int(a) for a in s)
+        elif t == 's':
+            return "_list('%s')" % "\0".join(s)
+        elif t == 'n':
+            return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
+        elif t == 'b':
+            return "_list('%s')" % "\0".join(a.branch() for a in s)
+
+        m = l // 2
+        return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
+
+    expr = pycompat.bytestr(expr)
+    ret = ''
+    pos = 0
+    arg = 0
+    while pos < len(expr):
+        c = expr[pos]
+        if c == '%':
+            pos += 1
+            d = expr[pos]
+            if d == '%':
+                ret += d
+            elif d in 'dsnbr':
+                ret += argtype(d, args[arg])
+                arg += 1
+            elif d == 'l':
+                # a list of some type
+                pos += 1
+                d = expr[pos]
+                ret += listexp(list(args[arg]), d)
+                arg += 1
+            else:
+                raise error.Abort(_('unexpected revspec format character %s')
+                                  % d)
+        else:
+            ret += c
+        pos += 1
+
+    return ret
+
+def prettyformat(tree):
+    return parser.prettyformat(tree, ('string', 'symbol'))
+
+def depth(tree):
+    if isinstance(tree, tuple):
+        return max(map(depth, tree)) + 1
+    else:
+        return 0
+
+def funcsused(tree):
+    if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
+        return set()
+    else:
+        funcs = set()
+        for s in tree[1:]:
+            funcs |= funcsused(s)
+        if tree[0] == 'func':
+            funcs.add(tree[1][1])
+        return funcs
--- a/mercurial/scmposix.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/scmposix.py	Tue Apr 18 12:24:34 2017 -0400
@@ -40,8 +40,15 @@
 def userrcpath():
     if pycompat.sysplatform == 'plan9':
         return [encoding.environ['home'] + '/lib/hgrc']
+    elif pycompat.sysplatform == 'darwin':
+        return [os.path.expanduser('~/.hgrc')]
     else:
-        return [os.path.expanduser('~/.hgrc')]
+        confighome = encoding.environ.get('XDG_CONFIG_HOME')
+        if confighome is None or not os.path.isabs(confighome):
+            confighome = os.path.expanduser('~/.config')
+
+        return [os.path.expanduser('~/.hgrc'),
+                os.path.join(confighome, 'hg', 'hgrc')]
 
 def termsize(ui):
     try:
@@ -59,7 +66,7 @@
             if not os.isatty(fd):
                 continue
             arri = fcntl.ioctl(fd, TIOCGWINSZ, '\0' * 8)
-            height, width = array.array('h', arri)[:2]
+            height, width = array.array(r'h', arri)[:2]
             if width > 0 and height > 0:
                 return width, height
         except ValueError:
--- a/mercurial/scmutil.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/scmutil.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,17 +7,12 @@
 
 from __future__ import absolute_import
 
-import contextlib
 import errno
 import glob
 import hashlib
 import os
 import re
-import shutil
 import socket
-import stat
-import tempfile
-import threading
 
 from .i18n import _
 from .node import wdirrev
@@ -25,13 +20,13 @@
     encoding,
     error,
     match as matchmod,
-    osutil,
     pathutil,
     phases,
     pycompat,
-    revset,
+    revsetlang,
     similar,
     util,
+    vfs as vfsmod,
 )
 
 if pycompat.osname == 'nt':
@@ -39,8 +34,6 @@
 else:
     from . import scmposix as scmplatform
 
-systemrcpath = scmplatform.systemrcpath
-userrcpath = scmplatform.userrcpath
 termsize = scmplatform.termsize
 
 class status(tuple):
@@ -149,7 +142,11 @@
     and return an exit code accordingly. does not handle all exceptions.
     """
     try:
-        return func()
+        try:
+            return func()
+        except: # re-raises
+            ui.traceback()
+            raise
     # Global exception handling, alphabetically
     # Mercurial-specific first, followed by built-in and library exceptions
     except error.LockHeld as inst:
@@ -332,459 +329,30 @@
     if revs:
         s = hashlib.sha1()
         for rev in revs:
-            s.update('%s;' % rev)
+            s.update('%d;' % rev)
         key = s.digest()
     return key
 
-class abstractvfs(object):
-    """Abstract base class; cannot be instantiated"""
-
-    def __init__(self, *args, **kwargs):
-        '''Prevent instantiation; don't call this from subclasses.'''
-        raise NotImplementedError('attempted instantiating ' + str(type(self)))
-
-    def tryread(self, path):
-        '''gracefully return an empty string for missing files'''
-        try:
-            return self.read(path)
-        except IOError as inst:
-            if inst.errno != errno.ENOENT:
-                raise
-        return ""
-
-    def tryreadlines(self, path, mode='rb'):
-        '''gracefully return an empty array for missing files'''
-        try:
-            return self.readlines(path, mode=mode)
-        except IOError as inst:
-            if inst.errno != errno.ENOENT:
-                raise
-        return []
-
-    @util.propertycache
-    def open(self):
-        '''Open ``path`` file, which is relative to vfs root.
-
-        Newly created directories are marked as "not to be indexed by
-        the content indexing service", if ``notindexed`` is specified
-        for "write" mode access.
-        '''
-        return self.__call__
-
-    def read(self, path):
-        with self(path, 'rb') as fp:
-            return fp.read()
-
-    def readlines(self, path, mode='rb'):
-        with self(path, mode=mode) as fp:
-            return fp.readlines()
-
-    def write(self, path, data, backgroundclose=False):
-        with self(path, 'wb', backgroundclose=backgroundclose) as fp:
-            return fp.write(data)
-
-    def writelines(self, path, data, mode='wb', notindexed=False):
-        with self(path, mode=mode, notindexed=notindexed) as fp:
-            return fp.writelines(data)
-
-    def append(self, path, data):
-        with self(path, 'ab') as fp:
-            return fp.write(data)
-
-    def basename(self, path):
-        """return base element of a path (as os.path.basename would do)
-
-        This exists to allow handling of strange encoding if needed."""
-        return os.path.basename(path)
-
-    def chmod(self, path, mode):
-        return os.chmod(self.join(path), mode)
-
-    def dirname(self, path):
-        """return dirname element of a path (as os.path.dirname would do)
-
-        This exists to allow handling of strange encoding if needed."""
-        return os.path.dirname(path)
-
-    def exists(self, path=None):
-        return os.path.exists(self.join(path))
-
-    def fstat(self, fp):
-        return util.fstat(fp)
-
-    def isdir(self, path=None):
-        return os.path.isdir(self.join(path))
-
-    def isfile(self, path=None):
-        return os.path.isfile(self.join(path))
-
-    def islink(self, path=None):
-        return os.path.islink(self.join(path))
-
-    def isfileorlink(self, path=None):
-        '''return whether path is a regular file or a symlink
-
-        Unlike isfile, this doesn't follow symlinks.'''
-        try:
-            st = self.lstat(path)
-        except OSError:
-            return False
-        mode = st.st_mode
-        return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
-
-    def reljoin(self, *paths):
-        """join various elements of a path together (as os.path.join would do)
-
-        The vfs base is not injected so that path stay relative. This exists
-        to allow handling of strange encoding if needed."""
-        return os.path.join(*paths)
-
-    def split(self, path):
-        """split top-most element of a path (as os.path.split would do)
-
-        This exists to allow handling of strange encoding if needed."""
-        return os.path.split(path)
-
-    def lexists(self, path=None):
-        return os.path.lexists(self.join(path))
-
-    def lstat(self, path=None):
-        return os.lstat(self.join(path))
-
-    def listdir(self, path=None):
-        return os.listdir(self.join(path))
-
-    def makedir(self, path=None, notindexed=True):
-        return util.makedir(self.join(path), notindexed)
-
-    def makedirs(self, path=None, mode=None):
-        return util.makedirs(self.join(path), mode)
-
-    def makelock(self, info, path):
-        return util.makelock(info, self.join(path))
-
-    def mkdir(self, path=None):
-        return os.mkdir(self.join(path))
-
-    def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
-        fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
-                                    dir=self.join(dir), text=text)
-        dname, fname = util.split(name)
-        if dir:
-            return fd, os.path.join(dir, fname)
-        else:
-            return fd, fname
-
-    def readdir(self, path=None, stat=None, skip=None):
-        return osutil.listdir(self.join(path), stat, skip)
-
-    def readlock(self, path):
-        return util.readlock(self.join(path))
-
-    def rename(self, src, dst, checkambig=False):
-        """Rename from src to dst
-
-        checkambig argument is used with util.filestat, and is useful
-        only if destination file is guarded by any lock
-        (e.g. repo.lock or repo.wlock).
-        """
-        dstpath = self.join(dst)
-        oldstat = checkambig and util.filestat(dstpath)
-        if oldstat and oldstat.stat:
-            ret = util.rename(self.join(src), dstpath)
-            newstat = util.filestat(dstpath)
-            if newstat.isambig(oldstat):
-                # stat of renamed file is ambiguous to original one
-                newstat.avoidambig(dstpath, oldstat)
-            return ret
-        return util.rename(self.join(src), dstpath)
-
-    def readlink(self, path):
-        return os.readlink(self.join(path))
-
-    def removedirs(self, path=None):
-        """Remove a leaf directory and all empty intermediate ones
-        """
-        return util.removedirs(self.join(path))
-
-    def rmtree(self, path=None, ignore_errors=False, forcibly=False):
-        """Remove a directory tree recursively
-
-        If ``forcibly``, this tries to remove READ-ONLY files, too.
-        """
-        if forcibly:
-            def onerror(function, path, excinfo):
-                if function is not os.remove:
-                    raise
-                # read-only files cannot be unlinked under Windows
-                s = os.stat(path)
-                if (s.st_mode & stat.S_IWRITE) != 0:
-                    raise
-                os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
-                os.remove(path)
-        else:
-            onerror = None
-        return shutil.rmtree(self.join(path),
-                             ignore_errors=ignore_errors, onerror=onerror)
-
-    def setflags(self, path, l, x):
-        return util.setflags(self.join(path), l, x)
-
-    def stat(self, path=None):
-        return os.stat(self.join(path))
-
-    def unlink(self, path=None):
-        return util.unlink(self.join(path))
-
-    def unlinkpath(self, path=None, ignoremissing=False):
-        return util.unlinkpath(self.join(path), ignoremissing)
-
-    def utime(self, path=None, t=None):
-        return os.utime(self.join(path), t)
-
-    def walk(self, path=None, onerror=None):
-        """Yield (dirpath, dirs, files) tuple for each directories under path
-
-        ``dirpath`` is relative one from the root of this vfs. This
-        uses ``os.sep`` as path separator, even you specify POSIX
-        style ``path``.
-
-        "The root of this vfs" is represented as empty ``dirpath``.
-        """
-        root = os.path.normpath(self.join(None))
-        # when dirpath == root, dirpath[prefixlen:] becomes empty
-        # because len(dirpath) < prefixlen.
-        prefixlen = len(pathutil.normasprefix(root))
-        for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
-            yield (dirpath[prefixlen:], dirs, files)
+def _deprecated(old, new, func):
+    msg = ('class at mercurial.scmutil.%s moved to mercurial.vfs.%s'
+           % (old, new))
+    def wrapper(*args, **kwargs):
+        util.nouideprecwarn(msg, '4.2')
+        return func(*args, **kwargs)
+    return wrapper
 
-    @contextlib.contextmanager
-    def backgroundclosing(self, ui, expectedcount=-1):
-        """Allow files to be closed asynchronously.
-
-        When this context manager is active, ``backgroundclose`` can be passed
-        to ``__call__``/``open`` to result in the file possibly being closed
-        asynchronously, on a background thread.
-        """
-        # This is an arbitrary restriction and could be changed if we ever
-        # have a use case.
-        vfs = getattr(self, 'vfs', self)
-        if getattr(vfs, '_backgroundfilecloser', None):
-            raise error.Abort(
-                _('can only have 1 active background file closer'))
-
-        with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
-            try:
-                vfs._backgroundfilecloser = bfc
-                yield bfc
-            finally:
-                vfs._backgroundfilecloser = None
-
-class vfs(abstractvfs):
-    '''Operate files relative to a base directory
-
-    This class is used to hide the details of COW semantics and
-    remote file access from higher level code.
-    '''
-    def __init__(self, base, audit=True, expandpath=False, realpath=False):
-        if expandpath:
-            base = util.expandpath(base)
-        if realpath:
-            base = os.path.realpath(base)
-        self.base = base
-        self.mustaudit = audit
-        self.createmode = None
-        self._trustnlink = None
-
-    @property
-    def mustaudit(self):
-        return self._audit
-
-    @mustaudit.setter
-    def mustaudit(self, onoff):
-        self._audit = onoff
-        if onoff:
-            self.audit = pathutil.pathauditor(self.base)
-        else:
-            self.audit = util.always
-
-    @util.propertycache
-    def _cansymlink(self):
-        return util.checklink(self.base)
-
-    @util.propertycache
-    def _chmod(self):
-        return util.checkexec(self.base)
-
-    def _fixfilemode(self, name):
-        if self.createmode is None or not self._chmod:
-            return
-        os.chmod(name, self.createmode & 0o666)
-
-    def __call__(self, path, mode="r", text=False, atomictemp=False,
-                 notindexed=False, backgroundclose=False, checkambig=False):
-        '''Open ``path`` file, which is relative to vfs root.
-
-        Newly created directories are marked as "not to be indexed by
-        the content indexing service", if ``notindexed`` is specified
-        for "write" mode access.
-
-        If ``backgroundclose`` is passed, the file may be closed asynchronously.
-        It can only be used if the ``self.backgroundclosing()`` context manager
-        is active. This should only be specified if the following criteria hold:
-
-        1. There is a potential for writing thousands of files. Unless you
-           are writing thousands of files, the performance benefits of
-           asynchronously closing files is not realized.
-        2. Files are opened exactly once for the ``backgroundclosing``
-           active duration and are therefore free of race conditions between
-           closing a file on a background thread and reopening it. (If the
-           file were opened multiple times, there could be unflushed data
-           because the original file handle hasn't been flushed/closed yet.)
-
-        ``checkambig`` argument is passed to atomictemplfile (valid
-        only for writing), and is useful only if target file is
-        guarded by any lock (e.g. repo.lock or repo.wlock).
-        '''
-        if self._audit:
-            r = util.checkosfilename(path)
-            if r:
-                raise error.Abort("%s: %r" % (r, path))
-        self.audit(path)
-        f = self.join(path)
-
-        if not text and "b" not in mode:
-            mode += "b" # for that other OS
-
-        nlink = -1
-        if mode not in ('r', 'rb'):
-            dirname, basename = util.split(f)
-            # If basename is empty, then the path is malformed because it points
-            # to a directory. Let the posixfile() call below raise IOError.
-            if basename:
-                if atomictemp:
-                    util.makedirs(dirname, self.createmode, notindexed)
-                    return util.atomictempfile(f, mode, self.createmode,
-                                               checkambig=checkambig)
-                try:
-                    if 'w' in mode:
-                        util.unlink(f)
-                        nlink = 0
-                    else:
-                        # nlinks() may behave differently for files on Windows
-                        # shares if the file is open.
-                        with util.posixfile(f):
-                            nlink = util.nlinks(f)
-                            if nlink < 1:
-                                nlink = 2 # force mktempcopy (issue1922)
-                except (OSError, IOError) as e:
-                    if e.errno != errno.ENOENT:
-                        raise
-                    nlink = 0
-                    util.makedirs(dirname, self.createmode, notindexed)
-                if nlink > 0:
-                    if self._trustnlink is None:
-                        self._trustnlink = nlink > 1 or util.checknlink(f)
-                    if nlink > 1 or not self._trustnlink:
-                        util.rename(util.mktempcopy(f), f)
-        fp = util.posixfile(f, mode)
-        if nlink == 0:
-            self._fixfilemode(f)
-
-        if checkambig:
-            if mode in ('r', 'rb'):
-                raise error.Abort(_('implementation error: mode %s is not'
-                                    ' valid for checkambig=True') % mode)
-            fp = checkambigatclosing(fp)
-
-        if backgroundclose:
-            if not self._backgroundfilecloser:
-                raise error.Abort(_('backgroundclose can only be used when a '
-                                  'backgroundclosing context manager is active')
-                                  )
-
-            fp = delayclosedfile(fp, self._backgroundfilecloser)
-
-        return fp
-
-    def symlink(self, src, dst):
-        self.audit(dst)
-        linkname = self.join(dst)
-        try:
-            os.unlink(linkname)
-        except OSError:
-            pass
-
-        util.makedirs(os.path.dirname(linkname), self.createmode)
-
-        if self._cansymlink:
-            try:
-                os.symlink(src, linkname)
-            except OSError as err:
-                raise OSError(err.errno, _('could not symlink to %r: %s') %
-                              (src, err.strerror), linkname)
-        else:
-            self.write(dst, src)
-
-    def join(self, path, *insidef):
-        if path:
-            return os.path.join(self.base, path, *insidef)
-        else:
-            return self.base
-
-opener = vfs
-
-class auditvfs(object):
-    def __init__(self, vfs):
-        self.vfs = vfs
-
-    @property
-    def mustaudit(self):
-        return self.vfs.mustaudit
-
-    @mustaudit.setter
-    def mustaudit(self, onoff):
-        self.vfs.mustaudit = onoff
-
-    @property
-    def options(self):
-        return self.vfs.options
-
-    @options.setter
-    def options(self, value):
-        self.vfs.options = value
-
-class filtervfs(abstractvfs, auditvfs):
-    '''Wrapper vfs for filtering filenames with a function.'''
-
-    def __init__(self, vfs, filter):
-        auditvfs.__init__(self, vfs)
-        self._filter = filter
-
-    def __call__(self, path, *args, **kwargs):
-        return self.vfs(self._filter(path), *args, **kwargs)
-
-    def join(self, path, *insidef):
-        if path:
-            return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
-        else:
-            return self.vfs.join(path)
-
-filteropener = filtervfs
-
-class readonlyvfs(abstractvfs, auditvfs):
-    '''Wrapper vfs preventing any writing.'''
-
-    def __init__(self, vfs):
-        auditvfs.__init__(self, vfs)
-
-    def __call__(self, path, mode='r', *args, **kw):
-        if mode not in ('r', 'rb'):
-            raise error.Abort(_('this vfs is read only'))
-        return self.vfs(path, mode, *args, **kw)
-
-    def join(self, path, *insidef):
-        return self.vfs.join(path, *insidef)
+# compatibility layer since all 'vfs' code moved to 'mercurial.vfs'
+#
+# This is hard to instal deprecation warning to this since we do not have
+# access to a 'ui' object.
+opener = _deprecated('opener', 'vfs', vfsmod.vfs)
+vfs = _deprecated('vfs', 'vfs', vfsmod.vfs)
+filteropener = _deprecated('filteropener', 'filtervfs', vfsmod.filtervfs)
+filtervfs = _deprecated('filtervfs', 'filtervfs', vfsmod.filtervfs)
+abstractvfs = _deprecated('abstractvfs', 'abstractvfs', vfsmod.abstractvfs)
+readonlyvfs = _deprecated('readonlyvfs', 'readonlyvfs', vfsmod.readonlyvfs)
+auditvfs = _deprecated('auditvfs', 'auditvfs', vfsmod.auditvfs)
+checkambigatclosing = vfsmod.checkambigatclosing
 
 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
     '''yield every hg repository under path, always recursively.
@@ -834,45 +402,6 @@
                         newdirs.append(d)
             dirs[:] = newdirs
 
-def osrcpath():
-    '''return default os-specific hgrc search path'''
-    path = []
-    defaultpath = os.path.join(util.datapath, 'default.d')
-    if os.path.isdir(defaultpath):
-        for f, kind in osutil.listdir(defaultpath):
-            if f.endswith('.rc'):
-                path.append(os.path.join(defaultpath, f))
-    path.extend(systemrcpath())
-    path.extend(userrcpath())
-    path = [os.path.normpath(f) for f in path]
-    return path
-
-_rcpath = None
-
-def rcpath():
-    '''return hgrc search path. if env var HGRCPATH is set, use it.
-    for each item in path, if directory, use files ending in .rc,
-    else use item.
-    make HGRCPATH empty to only look in .hg/hgrc of current repo.
-    if no HGRCPATH, use default os-specific path.'''
-    global _rcpath
-    if _rcpath is None:
-        if 'HGRCPATH' in encoding.environ:
-            _rcpath = []
-            for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
-                if not p:
-                    continue
-                p = util.expandpath(p)
-                if os.path.isdir(p):
-                    for f, kind in osutil.listdir(p):
-                        if f.endswith('.rc'):
-                            _rcpath.append(os.path.join(p, f))
-                else:
-                    _rcpath.append(p)
-        else:
-            _rcpath = osrcpath()
-    return _rcpath
-
 def intrev(rev):
     """Return integer for a given revision that can be used in comparison or
     arithmetic operation"""
@@ -890,7 +419,7 @@
     return repo[l.last()]
 
 def _pairspec(revspec):
-    tree = revset.parse(revspec)
+    tree = revsetlang.parse(revspec)
     return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
 
 def revpair(repo, revs):
@@ -936,7 +465,7 @@
     revision numbers.
 
     It is assumed the revsets are already formatted. If you have arguments
-    that need to be expanded in the revset, call ``revset.formatspec()``
+    that need to be expanded in the revset, call ``revsetlang.formatspec()``
     and pass the result as an element of ``specs``.
 
     Specifying a single revset is allowed.
@@ -947,10 +476,9 @@
     allspecs = []
     for spec in specs:
         if isinstance(spec, int):
-            spec = revset.formatspec('rev(%d)', spec)
+            spec = revsetlang.formatspec('rev(%d)', spec)
         allspecs.append(spec)
-    m = revset.matchany(repo.ui, allspecs, repo)
-    return m(repo)
+    return repo.anyrevs(allspecs, user=True)
 
 def meaningfulparents(repo, ctx):
     """Return list of meaningful (or all if debug) parentrevs for rev.
@@ -1325,11 +853,11 @@
         function to call the appropriate join function on 'obj' (an instance
         of the class that its member function was decorated).
         """
-        return obj.join(fname)
+        raise NotImplementedError
 
     def __call__(self, func):
         self.func = func
-        self.name = func.__name__
+        self.name = func.__name__.encode('ascii')
         return self
 
     def __get__(self, obj, type=None):
@@ -1410,164 +938,39 @@
     # experimental config: format.generaldelta
     return ui.configbool('format', 'generaldelta', False)
 
-class closewrapbase(object):
-    """Base class of wrapper, which hooks closing
-
-    Do not instantiate outside of the vfs layer.
-    """
-    def __init__(self, fh):
-        object.__setattr__(self, '_origfh', fh)
-
-    def __getattr__(self, attr):
-        return getattr(self._origfh, attr)
-
-    def __setattr__(self, attr, value):
-        return setattr(self._origfh, attr, value)
-
-    def __delattr__(self, attr):
-        return delattr(self._origfh, attr)
+class simplekeyvaluefile(object):
+    """A simple file with key=value lines
 
-    def __enter__(self):
-        return self._origfh.__enter__()
-
-    def __exit__(self, exc_type, exc_value, exc_tb):
-        raise NotImplementedError('attempted instantiating ' + str(type(self)))
-
-    def close(self):
-        raise NotImplementedError('attempted instantiating ' + str(type(self)))
-
-class delayclosedfile(closewrapbase):
-    """Proxy for a file object whose close is delayed.
-
-    Do not instantiate outside of the vfs layer.
-    """
-    def __init__(self, fh, closer):
-        super(delayclosedfile, self).__init__(fh)
-        object.__setattr__(self, '_closer', closer)
-
-    def __exit__(self, exc_type, exc_value, exc_tb):
-        self._closer.close(self._origfh)
+    Keys must be alphanumerics and start with a letter, values must not
+    contain '\n' characters"""
 
-    def close(self):
-        self._closer.close(self._origfh)
-
-class backgroundfilecloser(object):
-    """Coordinates background closing of file handles on multiple threads."""
-    def __init__(self, ui, expectedcount=-1):
-        self._running = False
-        self._entered = False
-        self._threads = []
-        self._threadexception = None
-
-        # Only Windows/NTFS has slow file closing. So only enable by default
-        # on that platform. But allow to be enabled elsewhere for testing.
-        defaultenabled = pycompat.osname == 'nt'
-        enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
-
-        if not enabled:
-            return
+    def __init__(self, vfs, path, keys=None):
+        self.vfs = vfs
+        self.path = path
 
-        # There is overhead to starting and stopping the background threads.
-        # Don't do background processing unless the file count is large enough
-        # to justify it.
-        minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
-                                    2048)
-        # FUTURE dynamically start background threads after minfilecount closes.
-        # (We don't currently have any callers that don't know their file count)
-        if expectedcount > 0 and expectedcount < minfilecount:
-            return
-
-        # Windows defaults to a limit of 512 open files. A buffer of 128
-        # should give us enough headway.
-        maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
-        threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
-
-        ui.debug('starting %d threads for background file closing\n' %
-                 threadcount)
-
-        self._queue = util.queue(maxsize=maxqueue)
-        self._running = True
+    def read(self):
+        lines = self.vfs.readlines(self.path)
+        try:
+            d = dict(line[:-1].split('=', 1) for line in lines if line)
+        except ValueError as e:
+            raise error.CorruptedState(str(e))
+        return d
 
-        for i in range(threadcount):
-            t = threading.Thread(target=self._worker, name='backgroundcloser')
-            self._threads.append(t)
-            t.start()
-
-    def __enter__(self):
-        self._entered = True
-        return self
-
-    def __exit__(self, exc_type, exc_value, exc_tb):
-        self._running = False
-
-        # Wait for threads to finish closing so open files don't linger for
-        # longer than lifetime of context manager.
-        for t in self._threads:
-            t.join()
-
-    def _worker(self):
-        """Main routine for worker thread."""
-        while True:
-            try:
-                fh = self._queue.get(block=True, timeout=0.100)
-                # Need to catch or the thread will terminate and
-                # we could orphan file descriptors.
-                try:
-                    fh.close()
-                except Exception as e:
-                    # Stash so can re-raise from main thread later.
-                    self._threadexception = e
-            except util.empty:
-                if not self._running:
-                    break
-
-    def close(self, fh):
-        """Schedule a file for closing."""
-        if not self._entered:
-            raise error.Abort(_('can only call close() when context manager '
-                              'active'))
-
-        # If a background thread encountered an exception, raise now so we fail
-        # fast. Otherwise we may potentially go on for minutes until the error
-        # is acted on.
-        if self._threadexception:
-            e = self._threadexception
-            self._threadexception = None
-            raise e
-
-        # If we're not actively running, close synchronously.
-        if not self._running:
-            fh.close()
-            return
-
-        self._queue.put(fh, block=True, timeout=None)
-
-class checkambigatclosing(closewrapbase):
-    """Proxy for a file object, to avoid ambiguity of file stat
-
-    See also util.filestat for detail about "ambiguity of file stat".
-
-    This proxy is useful only if the target file is guarded by any
-    lock (e.g. repo.lock or repo.wlock)
-
-    Do not instantiate outside of the vfs layer.
-    """
-    def __init__(self, fh):
-        super(checkambigatclosing, self).__init__(fh)
-        object.__setattr__(self, '_oldstat', util.filestat(fh.name))
-
-    def _checkambig(self):
-        oldstat = self._oldstat
-        if oldstat.stat:
-            newstat = util.filestat(self._origfh.name)
-            if newstat.isambig(oldstat):
-                # stat of changed file is ambiguous to original one
-                newstat.avoidambig(self._origfh.name, oldstat)
-
-    def __exit__(self, exc_type, exc_value, exc_tb):
-        self._origfh.__exit__(exc_type, exc_value, exc_tb)
-        self._checkambig()
-
-    def close(self):
-        self._origfh.close()
-        self._checkambig()
+    def write(self, data):
+        """Write key=>value mapping to a file
+        data is a dict. Keys must be alphanumerical and start with a letter.
+        Values must not contain newline characters."""
+        lines = []
+        for k, v in data.items():
+            if not k[0].isalpha():
+                e = "keys must start with a letter in a key-value file"
+                raise error.ProgrammingError(e)
+            if not k.isalnum():
+                e = "invalid key name in a simple key-value file"
+                raise error.ProgrammingError(e)
+            if '\n' in v:
+                e = "invalid value in a simple key-value file"
+                raise error.ProgrammingError(e)
+            lines.append("%s=%s\n" % (k, v))
+        with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
+            fp.write(''.join(lines))
--- a/mercurial/server.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/server.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import errno
 import os
 import sys
 import tempfile
@@ -16,6 +15,7 @@
 
 from . import (
     chgserver,
+    cmdutil,
     commandserver,
     error,
     hgweb,
@@ -60,11 +60,7 @@
                 raise error.Abort(_('child process failed to start'))
             writepid(pid)
         finally:
-            try:
-                os.unlink(lockpath)
-            except OSError as e:
-                if e.errno != errno.ENOENT:
-                    raise
+            util.tryunlink(lockpath)
         if parentfn:
             return parentfn(pid)
         else:
@@ -135,11 +131,22 @@
         baseui = ui
     webconf = opts.get('web_conf') or opts.get('webdir_conf')
     if webconf:
+        if opts.get('subrepos'):
+            raise error.Abort(_('--web-conf cannot be used with --subrepos'))
+
         # load server settings (e.g. web.port) to "copied" ui, which allows
         # hgwebdir to reload webconf cleanly
         servui = ui.copy()
         servui.readconfig(webconf, sections=['web'])
         alluis.add(servui)
+    elif opts.get('subrepos'):
+        servui = ui
+
+        # If repo is None, hgweb.createapp() already raises a proper abort
+        # message as long as webconf is None.
+        if repo:
+            webconf = dict()
+            cmdutil.addwebdirpath(repo, "", webconf)
     else:
         servui = ui
 
--- a/mercurial/similar.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/similar.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,8 +7,6 @@
 
 from __future__ import absolute_import
 
-import hashlib
-
 from .i18n import _
 from . import (
     bdiff,
@@ -23,21 +21,29 @@
     '''
     numfiles = len(added) + len(removed)
 
-    # Get hashes of removed files.
+    # Build table of removed files: {hash(fctx.data()): [fctx, ...]}.
+    # We use hash() to discard fctx.data() from memory.
     hashes = {}
     for i, fctx in enumerate(removed):
         repo.ui.progress(_('searching for exact renames'), i, total=numfiles,
                          unit=_('files'))
-        h = hashlib.sha1(fctx.data()).digest()
-        hashes[h] = fctx
+        h = hash(fctx.data())
+        if h not in hashes:
+            hashes[h] = [fctx]
+        else:
+            hashes[h].append(fctx)
 
     # For each added file, see if it corresponds to a removed file.
     for i, fctx in enumerate(added):
         repo.ui.progress(_('searching for exact renames'), i + len(removed),
                 total=numfiles, unit=_('files'))
-        h = hashlib.sha1(fctx.data()).digest()
-        if h in hashes:
-            yield (hashes[h], fctx)
+        adata = fctx.data()
+        h = hash(adata)
+        for rfctx in hashes.get(h, []):
+            # compare between actual file contents for exact identity
+            if adata == rfctx.data():
+                yield (rfctx, fctx)
+                break
 
     # Done
     repo.ui.progress(_('searching for exact renames'), None)
@@ -81,7 +87,7 @@
             if data is None:
                 data = _ctxdata(r)
             myscore = _score(a, data)
-            if myscore >= bestscore:
+            if myscore > bestscore:
                 copies[a] = (r, myscore)
     repo.ui.progress(_('searching'), None)
 
@@ -89,27 +95,29 @@
         source, bscore = v
         yield source, dest, bscore
 
+def _dropempty(fctxs):
+    return [x for x in fctxs if x.size() > 0]
+
 def findrenames(repo, added, removed, threshold):
     '''find renamed files -- yields (before, after, score) tuples'''
-    parentctx = repo['.']
-    workingctx = repo[None]
+    wctx = repo[None]
+    pctx = wctx.p1()
 
     # Zero length files will be frequently unrelated to each other, and
     # tracking the deletion/addition of such a file will probably cause more
     # harm than good. We strip them out here to avoid matching them later on.
-    addedfiles = set([workingctx[fp] for fp in added
-            if workingctx[fp].size() > 0])
-    removedfiles = set([parentctx[fp] for fp in removed
-            if fp in parentctx and parentctx[fp].size() > 0])
+    addedfiles = _dropempty(wctx[fp] for fp in sorted(added))
+    removedfiles = _dropempty(pctx[fp] for fp in sorted(removed) if fp in pctx)
 
     # Find exact matches.
-    for (a, b) in _findexactmatches(repo,
-            sorted(addedfiles), sorted(removedfiles)):
-        addedfiles.remove(b)
+    matchedfiles = set()
+    for (a, b) in _findexactmatches(repo, addedfiles, removedfiles):
+        matchedfiles.add(b)
         yield (a.path(), b.path(), 1.0)
 
     # If the user requested similar files to be matched, search for them also.
     if threshold < 1.0:
-        for (a, b, score) in _findsimilarmatches(repo,
-                sorted(addedfiles), sorted(removedfiles), threshold):
+        addedfiles = [x for x in addedfiles if x not in matchedfiles]
+        for (a, b, score) in _findsimilarmatches(repo, addedfiles,
+                                                 removedfiles, threshold):
             yield (a.path(), b.path(), score)
--- a/mercurial/simplemerge.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/simplemerge.py	Tue Apr 18 12:24:34 2017 -0400
@@ -24,8 +24,8 @@
 from . import (
     error,
     mdiff,
-    scmutil,
     util,
+    vfs as vfsmod,
 )
 
 class CantReprocessAndShowBase(Exception):
@@ -437,7 +437,7 @@
 
     local = os.path.realpath(local)
     if not opts.get('print'):
-        opener = scmutil.opener(os.path.dirname(local))
+        opener = vfsmod.vfs(os.path.dirname(local))
         out = opener(os.path.basename(local), "w", atomictemp=True)
     else:
         out = ui.fout
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/smartset.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,1066 @@
+# smartset.py - data structure for revision set
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from . import (
+    util,
+)
+
+def _formatsetrepr(r):
+    """Format an optional printable representation of a set
+
+    ========  =================================
+    type(r)   example
+    ========  =================================
+    tuple     ('<not %r>', other)
+    str       '<branch closed>'
+    callable  lambda: '<branch %r>' % sorted(b)
+    object    other
+    ========  =================================
+    """
+    if r is None:
+        return ''
+    elif isinstance(r, tuple):
+        return r[0] % r[1:]
+    elif isinstance(r, str):
+        return r
+    elif callable(r):
+        return r()
+    else:
+        return repr(r)
+
+class abstractsmartset(object):
+
+    def __nonzero__(self):
+        """True if the smartset is not empty"""
+        raise NotImplementedError()
+
+    __bool__ = __nonzero__
+
+    def __contains__(self, rev):
+        """provide fast membership testing"""
+        raise NotImplementedError()
+
+    def __iter__(self):
+        """iterate the set in the order it is supposed to be iterated"""
+        raise NotImplementedError()
+
+    # Attributes containing a function to perform a fast iteration in a given
+    # direction. A smartset can have none, one, or both defined.
+    #
+    # Default value is None instead of a function returning None to avoid
+    # initializing an iterator just for testing if a fast method exists.
+    fastasc = None
+    fastdesc = None
+
+    def isascending(self):
+        """True if the set will iterate in ascending order"""
+        raise NotImplementedError()
+
+    def isdescending(self):
+        """True if the set will iterate in descending order"""
+        raise NotImplementedError()
+
+    def istopo(self):
+        """True if the set will iterate in topographical order"""
+        raise NotImplementedError()
+
+    def min(self):
+        """return the minimum element in the set"""
+        if self.fastasc is None:
+            v = min(self)
+        else:
+            for v in self.fastasc():
+                break
+            else:
+                raise ValueError('arg is an empty sequence')
+        self.min = lambda: v
+        return v
+
+    def max(self):
+        """return the maximum element in the set"""
+        if self.fastdesc is None:
+            return max(self)
+        else:
+            for v in self.fastdesc():
+                break
+            else:
+                raise ValueError('arg is an empty sequence')
+        self.max = lambda: v
+        return v
+
+    def first(self):
+        """return the first element in the set (user iteration perspective)
+
+        Return None if the set is empty"""
+        raise NotImplementedError()
+
+    def last(self):
+        """return the last element in the set (user iteration perspective)
+
+        Return None if the set is empty"""
+        raise NotImplementedError()
+
+    def __len__(self):
+        """return the length of the smartsets
+
+        This can be expensive on smartset that could be lazy otherwise."""
+        raise NotImplementedError()
+
+    def reverse(self):
+        """reverse the expected iteration order"""
+        raise NotImplementedError()
+
+    def sort(self, reverse=True):
+        """get the set to iterate in an ascending or descending order"""
+        raise NotImplementedError()
+
+    def __and__(self, other):
+        """Returns a new object with the intersection of the two collections.
+
+        This is part of the mandatory API for smartset."""
+        if isinstance(other, fullreposet):
+            return self
+        return self.filter(other.__contains__, condrepr=other, cache=False)
+
+    def __add__(self, other):
+        """Returns a new object with the union of the two collections.
+
+        This is part of the mandatory API for smartset."""
+        return addset(self, other)
+
+    def __sub__(self, other):
+        """Returns a new object with the substraction of the two collections.
+
+        This is part of the mandatory API for smartset."""
+        c = other.__contains__
+        return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
+                           cache=False)
+
+    def filter(self, condition, condrepr=None, cache=True):
+        """Returns this smartset filtered by condition as a new smartset.
+
+        `condition` is a callable which takes a revision number and returns a
+        boolean. Optional `condrepr` provides a printable representation of
+        the given `condition`.
+
+        This is part of the mandatory API for smartset."""
+        # builtin cannot be cached. but do not needs to
+        if cache and util.safehasattr(condition, 'func_code'):
+            condition = util.cachefunc(condition)
+        return filteredset(self, condition, condrepr)
+
+class baseset(abstractsmartset):
+    """Basic data structure that represents a revset and contains the basic
+    operation that it should be able to perform.
+
+    Every method in this class should be implemented by any smartset class.
+
+    This class could be constructed by an (unordered) set, or an (ordered)
+    list-like object. If a set is provided, it'll be sorted lazily.
+
+    >>> x = [4, 0, 7, 6]
+    >>> y = [5, 6, 7, 3]
+
+    Construct by a set:
+    >>> xs = baseset(set(x))
+    >>> ys = baseset(set(y))
+    >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
+    [[0, 4, 6, 7, 3, 5], [6, 7], [0, 4]]
+    >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
+    ['addset', 'baseset', 'baseset']
+
+    Construct by a list-like:
+    >>> xs = baseset(x)
+    >>> ys = baseset(i for i in y)
+    >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
+    [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]]
+    >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
+    ['addset', 'filteredset', 'filteredset']
+
+    Populate "_set" fields in the lists so set optimization may be used:
+    >>> [1 in xs, 3 in ys]
+    [False, True]
+
+    Without sort(), results won't be changed:
+    >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
+    [[4, 0, 7, 6, 5, 3], [7, 6], [4, 0]]
+    >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
+    ['addset', 'filteredset', 'filteredset']
+
+    With sort(), set optimization could be used:
+    >>> xs.sort(reverse=True)
+    >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
+    [[7, 6, 4, 0, 5, 3], [7, 6], [4, 0]]
+    >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
+    ['addset', 'baseset', 'baseset']
+
+    >>> ys.sort()
+    >>> [list(i) for i in [xs + ys, xs & ys, xs - ys]]
+    [[7, 6, 4, 0, 3, 5], [7, 6], [4, 0]]
+    >>> [type(i).__name__ for i in [xs + ys, xs & ys, xs - ys]]
+    ['addset', 'baseset', 'baseset']
+
+    istopo is preserved across set operations
+    >>> xs = baseset(set(x), istopo=True)
+    >>> rs = xs & ys
+    >>> type(rs).__name__
+    'baseset'
+    >>> rs._istopo
+    True
+    """
+    def __init__(self, data=(), datarepr=None, istopo=False):
+        """
+        datarepr: a tuple of (format, obj, ...), a function or an object that
+                  provides a printable representation of the given data.
+        """
+        self._ascending = None
+        self._istopo = istopo
+        if isinstance(data, set):
+            # converting set to list has a cost, do it lazily
+            self._set = data
+            # set has no order we pick one for stability purpose
+            self._ascending = True
+        else:
+            if not isinstance(data, list):
+                data = list(data)
+            self._list = data
+        self._datarepr = datarepr
+
+    @util.propertycache
+    def _set(self):
+        return set(self._list)
+
+    @util.propertycache
+    def _asclist(self):
+        asclist = self._list[:]
+        asclist.sort()
+        return asclist
+
+    @util.propertycache
+    def _list(self):
+        # _list is only lazily constructed if we have _set
+        assert '_set' in self.__dict__
+        return list(self._set)
+
+    def __iter__(self):
+        if self._ascending is None:
+            return iter(self._list)
+        elif self._ascending:
+            return iter(self._asclist)
+        else:
+            return reversed(self._asclist)
+
+    def fastasc(self):
+        return iter(self._asclist)
+
+    def fastdesc(self):
+        return reversed(self._asclist)
+
+    @util.propertycache
+    def __contains__(self):
+        return self._set.__contains__
+
+    def __nonzero__(self):
+        return bool(len(self))
+
+    __bool__ = __nonzero__
+
+    def sort(self, reverse=False):
+        self._ascending = not bool(reverse)
+        self._istopo = False
+
+    def reverse(self):
+        if self._ascending is None:
+            self._list.reverse()
+        else:
+            self._ascending = not self._ascending
+        self._istopo = False
+
+    def __len__(self):
+        if '_list' in self.__dict__:
+            return len(self._list)
+        else:
+            return len(self._set)
+
+    def isascending(self):
+        """Returns True if the collection is ascending order, False if not.
+
+        This is part of the mandatory API for smartset."""
+        if len(self) <= 1:
+            return True
+        return self._ascending is not None and self._ascending
+
+    def isdescending(self):
+        """Returns True if the collection is descending order, False if not.
+
+        This is part of the mandatory API for smartset."""
+        if len(self) <= 1:
+            return True
+        return self._ascending is not None and not self._ascending
+
+    def istopo(self):
+        """Is the collection is in topographical order or not.
+
+        This is part of the mandatory API for smartset."""
+        if len(self) <= 1:
+            return True
+        return self._istopo
+
+    def first(self):
+        if self:
+            if self._ascending is None:
+                return self._list[0]
+            elif self._ascending:
+                return self._asclist[0]
+            else:
+                return self._asclist[-1]
+        return None
+
+    def last(self):
+        if self:
+            if self._ascending is None:
+                return self._list[-1]
+            elif self._ascending:
+                return self._asclist[-1]
+            else:
+                return self._asclist[0]
+        return None
+
+    def _fastsetop(self, other, op):
+        # try to use native set operations as fast paths
+        if (type(other) is baseset and '_set' in other.__dict__ and '_set' in
+            self.__dict__ and self._ascending is not None):
+            s = baseset(data=getattr(self._set, op)(other._set),
+                        istopo=self._istopo)
+            s._ascending = self._ascending
+        else:
+            s = getattr(super(baseset, self), op)(other)
+        return s
+
+    def __and__(self, other):
+        return self._fastsetop(other, '__and__')
+
+    def __sub__(self, other):
+        return self._fastsetop(other, '__sub__')
+
+    def __repr__(self):
+        d = {None: '', False: '-', True: '+'}[self._ascending]
+        s = _formatsetrepr(self._datarepr)
+        if not s:
+            l = self._list
+            # if _list has been built from a set, it might have a different
+            # order from one python implementation to another.
+            # We fallback to the sorted version for a stable output.
+            if self._ascending is not None:
+                l = self._asclist
+            s = repr(l)
+        return '<%s%s %s>' % (type(self).__name__, d, s)
+
+class filteredset(abstractsmartset):
+    """Duck type for baseset class which iterates lazily over the revisions in
+    the subset and contains a function which tests for membership in the
+    revset
+    """
+    def __init__(self, subset, condition=lambda x: True, condrepr=None):
+        """
+        condition: a function that decide whether a revision in the subset
+                   belongs to the revset or not.
+        condrepr: a tuple of (format, obj, ...), a function or an object that
+                  provides a printable representation of the given condition.
+        """
+        self._subset = subset
+        self._condition = condition
+        self._condrepr = condrepr
+
+    def __contains__(self, x):
+        return x in self._subset and self._condition(x)
+
+    def __iter__(self):
+        return self._iterfilter(self._subset)
+
+    def _iterfilter(self, it):
+        cond = self._condition
+        for x in it:
+            if cond(x):
+                yield x
+
+    @property
+    def fastasc(self):
+        it = self._subset.fastasc
+        if it is None:
+            return None
+        return lambda: self._iterfilter(it())
+
+    @property
+    def fastdesc(self):
+        it = self._subset.fastdesc
+        if it is None:
+            return None
+        return lambda: self._iterfilter(it())
+
+    def __nonzero__(self):
+        fast = None
+        candidates = [self.fastasc if self.isascending() else None,
+                      self.fastdesc if self.isdescending() else None,
+                      self.fastasc,
+                      self.fastdesc]
+        for candidate in candidates:
+            if candidate is not None:
+                fast = candidate
+                break
+
+        if fast is not None:
+            it = fast()
+        else:
+            it = self
+
+        for r in it:
+            return True
+        return False
+
+    __bool__ = __nonzero__
+
+    def __len__(self):
+        # Basic implementation to be changed in future patches.
+        # until this gets improved, we use generator expression
+        # here, since list comprehensions are free to call __len__ again
+        # causing infinite recursion
+        l = baseset(r for r in self)
+        return len(l)
+
+    def sort(self, reverse=False):
+        self._subset.sort(reverse=reverse)
+
+    def reverse(self):
+        self._subset.reverse()
+
+    def isascending(self):
+        return self._subset.isascending()
+
+    def isdescending(self):
+        return self._subset.isdescending()
+
+    def istopo(self):
+        return self._subset.istopo()
+
+    def first(self):
+        for x in self:
+            return x
+        return None
+
+    def last(self):
+        it = None
+        if self.isascending():
+            it = self.fastdesc
+        elif self.isdescending():
+            it = self.fastasc
+        if it is not None:
+            for x in it():
+                return x
+            return None #empty case
+        else:
+            x = None
+            for x in self:
+                pass
+            return x
+
+    def __repr__(self):
+        xs = [repr(self._subset)]
+        s = _formatsetrepr(self._condrepr)
+        if s:
+            xs.append(s)
+        return '<%s %s>' % (type(self).__name__, ', '.join(xs))
+
+def _iterordered(ascending, iter1, iter2):
+    """produce an ordered iteration from two iterators with the same order
+
+    The ascending is used to indicated the iteration direction.
+    """
+    choice = max
+    if ascending:
+        choice = min
+
+    val1 = None
+    val2 = None
+    try:
+        # Consume both iterators in an ordered way until one is empty
+        while True:
+            if val1 is None:
+                val1 = next(iter1)
+            if val2 is None:
+                val2 = next(iter2)
+            n = choice(val1, val2)
+            yield n
+            if val1 == n:
+                val1 = None
+            if val2 == n:
+                val2 = None
+    except StopIteration:
+        # Flush any remaining values and consume the other one
+        it = iter2
+        if val1 is not None:
+            yield val1
+            it = iter1
+        elif val2 is not None:
+            # might have been equality and both are empty
+            yield val2
+        for val in it:
+            yield val
+
+class addset(abstractsmartset):
+    """Represent the addition of two sets
+
+    Wrapper structure for lazily adding two structures without losing much
+    performance on the __contains__ method
+
+    If the ascending attribute is set, that means the two structures are
+    ordered in either an ascending or descending way. Therefore, we can add
+    them maintaining the order by iterating over both at the same time
+
+    >>> xs = baseset([0, 3, 2])
+    >>> ys = baseset([5, 2, 4])
+
+    >>> rs = addset(xs, ys)
+    >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
+    (True, True, False, True, 0, 4)
+    >>> rs = addset(xs, baseset([]))
+    >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
+    (True, True, False, 0, 2)
+    >>> rs = addset(baseset([]), baseset([]))
+    >>> bool(rs), 0 in rs, rs.first(), rs.last()
+    (False, False, None, None)
+
+    iterate unsorted:
+    >>> rs = addset(xs, ys)
+    >>> # (use generator because pypy could call len())
+    >>> list(x for x in rs)  # without _genlist
+    [0, 3, 2, 5, 4]
+    >>> assert not rs._genlist
+    >>> len(rs)
+    5
+    >>> [x for x in rs]  # with _genlist
+    [0, 3, 2, 5, 4]
+    >>> assert rs._genlist
+
+    iterate ascending:
+    >>> rs = addset(xs, ys, ascending=True)
+    >>> # (use generator because pypy could call len())
+    >>> list(x for x in rs), list(x for x in rs.fastasc())  # without _asclist
+    ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
+    >>> assert not rs._asclist
+    >>> len(rs)
+    5
+    >>> [x for x in rs], [x for x in rs.fastasc()]
+    ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
+    >>> assert rs._asclist
+
+    iterate descending:
+    >>> rs = addset(xs, ys, ascending=False)
+    >>> # (use generator because pypy could call len())
+    >>> list(x for x in rs), list(x for x in rs.fastdesc())  # without _asclist
+    ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
+    >>> assert not rs._asclist
+    >>> len(rs)
+    5
+    >>> [x for x in rs], [x for x in rs.fastdesc()]
+    ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
+    >>> assert rs._asclist
+
+    iterate ascending without fastasc:
+    >>> rs = addset(xs, generatorset(ys), ascending=True)
+    >>> assert rs.fastasc is None
+    >>> [x for x in rs]
+    [0, 2, 3, 4, 5]
+
+    iterate descending without fastdesc:
+    >>> rs = addset(generatorset(xs), ys, ascending=False)
+    >>> assert rs.fastdesc is None
+    >>> [x for x in rs]
+    [5, 4, 3, 2, 0]
+    """
+    def __init__(self, revs1, revs2, ascending=None):
+        self._r1 = revs1
+        self._r2 = revs2
+        self._iter = None
+        self._ascending = ascending
+        self._genlist = None
+        self._asclist = None
+
+    def __len__(self):
+        return len(self._list)
+
+    def __nonzero__(self):
+        return bool(self._r1) or bool(self._r2)
+
+    __bool__ = __nonzero__
+
+    @util.propertycache
+    def _list(self):
+        if not self._genlist:
+            self._genlist = baseset(iter(self))
+        return self._genlist
+
+    def __iter__(self):
+        """Iterate over both collections without repeating elements
+
+        If the ascending attribute is not set, iterate over the first one and
+        then over the second one checking for membership on the first one so we
+        dont yield any duplicates.
+
+        If the ascending attribute is set, iterate over both collections at the
+        same time, yielding only one value at a time in the given order.
+        """
+        if self._ascending is None:
+            if self._genlist:
+                return iter(self._genlist)
+            def arbitraryordergen():
+                for r in self._r1:
+                    yield r
+                inr1 = self._r1.__contains__
+                for r in self._r2:
+                    if not inr1(r):
+                        yield r
+            return arbitraryordergen()
+        # try to use our own fast iterator if it exists
+        self._trysetasclist()
+        if self._ascending:
+            attr = 'fastasc'
+        else:
+            attr = 'fastdesc'
+        it = getattr(self, attr)
+        if it is not None:
+            return it()
+        # maybe half of the component supports fast
+        # get iterator for _r1
+        iter1 = getattr(self._r1, attr)
+        if iter1 is None:
+            # let's avoid side effect (not sure it matters)
+            iter1 = iter(sorted(self._r1, reverse=not self._ascending))
+        else:
+            iter1 = iter1()
+        # get iterator for _r2
+        iter2 = getattr(self._r2, attr)
+        if iter2 is None:
+            # let's avoid side effect (not sure it matters)
+            iter2 = iter(sorted(self._r2, reverse=not self._ascending))
+        else:
+            iter2 = iter2()
+        return _iterordered(self._ascending, iter1, iter2)
+
+    def _trysetasclist(self):
+        """populate the _asclist attribute if possible and necessary"""
+        if self._genlist is not None and self._asclist is None:
+            self._asclist = sorted(self._genlist)
+
+    @property
+    def fastasc(self):
+        self._trysetasclist()
+        if self._asclist is not None:
+            return self._asclist.__iter__
+        iter1 = self._r1.fastasc
+        iter2 = self._r2.fastasc
+        if None in (iter1, iter2):
+            return None
+        return lambda: _iterordered(True, iter1(), iter2())
+
+    @property
+    def fastdesc(self):
+        self._trysetasclist()
+        if self._asclist is not None:
+            return self._asclist.__reversed__
+        iter1 = self._r1.fastdesc
+        iter2 = self._r2.fastdesc
+        if None in (iter1, iter2):
+            return None
+        return lambda: _iterordered(False, iter1(), iter2())
+
+    def __contains__(self, x):
+        return x in self._r1 or x in self._r2
+
+    def sort(self, reverse=False):
+        """Sort the added set
+
+        For this we use the cached list with all the generated values and if we
+        know they are ascending or descending we can sort them in a smart way.
+        """
+        self._ascending = not reverse
+
+    def isascending(self):
+        return self._ascending is not None and self._ascending
+
+    def isdescending(self):
+        return self._ascending is not None and not self._ascending
+
+    def istopo(self):
+        # not worth the trouble asserting if the two sets combined are still
+        # in topographical order. Use the sort() predicate to explicitly sort
+        # again instead.
+        return False
+
+    def reverse(self):
+        if self._ascending is None:
+            self._list.reverse()
+        else:
+            self._ascending = not self._ascending
+
+    def first(self):
+        for x in self:
+            return x
+        return None
+
+    def last(self):
+        self.reverse()
+        val = self.first()
+        self.reverse()
+        return val
+
+    def __repr__(self):
+        d = {None: '', False: '-', True: '+'}[self._ascending]
+        return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
+
+class generatorset(abstractsmartset):
+    """Wrap a generator for lazy iteration
+
+    Wrapper structure for generators that provides lazy membership and can
+    be iterated more than once.
+    When asked for membership it generates values until either it finds the
+    requested one or has gone through all the elements in the generator
+    """
+    def __init__(self, gen, iterasc=None):
+        """
+        gen: a generator producing the values for the generatorset.
+        """
+        self._gen = gen
+        self._asclist = None
+        self._cache = {}
+        self._genlist = []
+        self._finished = False
+        self._ascending = True
+        if iterasc is not None:
+            if iterasc:
+                self.fastasc = self._iterator
+                self.__contains__ = self._asccontains
+            else:
+                self.fastdesc = self._iterator
+                self.__contains__ = self._desccontains
+
+    def __nonzero__(self):
+        # Do not use 'for r in self' because it will enforce the iteration
+        # order (default ascending), possibly unrolling a whole descending
+        # iterator.
+        if self._genlist:
+            return True
+        for r in self._consumegen():
+            return True
+        return False
+
+    __bool__ = __nonzero__
+
+    def __contains__(self, x):
+        if x in self._cache:
+            return self._cache[x]
+
+        # Use new values only, as existing values would be cached.
+        for l in self._consumegen():
+            if l == x:
+                return True
+
+        self._cache[x] = False
+        return False
+
+    def _asccontains(self, x):
+        """version of contains optimised for ascending generator"""
+        if x in self._cache:
+            return self._cache[x]
+
+        # Use new values only, as existing values would be cached.
+        for l in self._consumegen():
+            if l == x:
+                return True
+            if l > x:
+                break
+
+        self._cache[x] = False
+        return False
+
+    def _desccontains(self, x):
+        """version of contains optimised for descending generator"""
+        if x in self._cache:
+            return self._cache[x]
+
+        # Use new values only, as existing values would be cached.
+        for l in self._consumegen():
+            if l == x:
+                return True
+            if l < x:
+                break
+
+        self._cache[x] = False
+        return False
+
+    def __iter__(self):
+        if self._ascending:
+            it = self.fastasc
+        else:
+            it = self.fastdesc
+        if it is not None:
+            return it()
+        # we need to consume the iterator
+        for x in self._consumegen():
+            pass
+        # recall the same code
+        return iter(self)
+
+    def _iterator(self):
+        if self._finished:
+            return iter(self._genlist)
+
+        # We have to use this complex iteration strategy to allow multiple
+        # iterations at the same time. We need to be able to catch revision
+        # removed from _consumegen and added to genlist in another instance.
+        #
+        # Getting rid of it would provide an about 15% speed up on this
+        # iteration.
+        genlist = self._genlist
+        nextgen = self._consumegen()
+        _len, _next = len, next # cache global lookup
+        def gen():
+            i = 0
+            while True:
+                if i < _len(genlist):
+                    yield genlist[i]
+                else:
+                    yield _next(nextgen)
+                i += 1
+        return gen()
+
+    def _consumegen(self):
+        cache = self._cache
+        genlist = self._genlist.append
+        for item in self._gen:
+            cache[item] = True
+            genlist(item)
+            yield item
+        if not self._finished:
+            self._finished = True
+            asc = self._genlist[:]
+            asc.sort()
+            self._asclist = asc
+            self.fastasc = asc.__iter__
+            self.fastdesc = asc.__reversed__
+
+    def __len__(self):
+        for x in self._consumegen():
+            pass
+        return len(self._genlist)
+
+    def sort(self, reverse=False):
+        self._ascending = not reverse
+
+    def reverse(self):
+        self._ascending = not self._ascending
+
+    def isascending(self):
+        return self._ascending
+
+    def isdescending(self):
+        return not self._ascending
+
+    def istopo(self):
+        # not worth the trouble asserting if the two sets combined are still
+        # in topographical order. Use the sort() predicate to explicitly sort
+        # again instead.
+        return False
+
+    def first(self):
+        if self._ascending:
+            it = self.fastasc
+        else:
+            it = self.fastdesc
+        if it is None:
+            # we need to consume all and try again
+            for x in self._consumegen():
+                pass
+            return self.first()
+        return next(it(), None)
+
+    def last(self):
+        if self._ascending:
+            it = self.fastdesc
+        else:
+            it = self.fastasc
+        if it is None:
+            # we need to consume all and try again
+            for x in self._consumegen():
+                pass
+            return self.first()
+        return next(it(), None)
+
+    def __repr__(self):
+        d = {False: '-', True: '+'}[self._ascending]
+        return '<%s%s>' % (type(self).__name__, d)
+
+class spanset(abstractsmartset):
+    """Duck type for baseset class which represents a range of revisions and
+    can work lazily and without having all the range in memory
+
+    Note that spanset(x, y) behave almost like xrange(x, y) except for two
+    notable points:
+    - when x < y it will be automatically descending,
+    - revision filtered with this repoview will be skipped.
+
+    """
+    def __init__(self, repo, start=0, end=None):
+        """
+        start: first revision included the set
+               (default to 0)
+        end:   first revision excluded (last+1)
+               (default to len(repo)
+
+        Spanset will be descending if `end` < `start`.
+        """
+        if end is None:
+            end = len(repo)
+        self._ascending = start <= end
+        if not self._ascending:
+            start, end = end + 1, start +1
+        self._start = start
+        self._end = end
+        self._hiddenrevs = repo.changelog.filteredrevs
+
+    def sort(self, reverse=False):
+        self._ascending = not reverse
+
+    def reverse(self):
+        self._ascending = not self._ascending
+
+    def istopo(self):
+        # not worth the trouble asserting if the two sets combined are still
+        # in topographical order. Use the sort() predicate to explicitly sort
+        # again instead.
+        return False
+
+    def _iterfilter(self, iterrange):
+        s = self._hiddenrevs
+        for r in iterrange:
+            if r not in s:
+                yield r
+
+    def __iter__(self):
+        if self._ascending:
+            return self.fastasc()
+        else:
+            return self.fastdesc()
+
+    def fastasc(self):
+        iterrange = xrange(self._start, self._end)
+        if self._hiddenrevs:
+            return self._iterfilter(iterrange)
+        return iter(iterrange)
+
+    def fastdesc(self):
+        iterrange = xrange(self._end - 1, self._start - 1, -1)
+        if self._hiddenrevs:
+            return self._iterfilter(iterrange)
+        return iter(iterrange)
+
+    def __contains__(self, rev):
+        hidden = self._hiddenrevs
+        return ((self._start <= rev < self._end)
+                and not (hidden and rev in hidden))
+
+    def __nonzero__(self):
+        for r in self:
+            return True
+        return False
+
+    __bool__ = __nonzero__
+
+    def __len__(self):
+        if not self._hiddenrevs:
+            return abs(self._end - self._start)
+        else:
+            count = 0
+            start = self._start
+            end = self._end
+            for rev in self._hiddenrevs:
+                if (end < rev <= start) or (start <= rev < end):
+                    count += 1
+            return abs(self._end - self._start) - count
+
+    def isascending(self):
+        return self._ascending
+
+    def isdescending(self):
+        return not self._ascending
+
+    def first(self):
+        if self._ascending:
+            it = self.fastasc
+        else:
+            it = self.fastdesc
+        for x in it():
+            return x
+        return None
+
+    def last(self):
+        if self._ascending:
+            it = self.fastdesc
+        else:
+            it = self.fastasc
+        for x in it():
+            return x
+        return None
+
+    def __repr__(self):
+        d = {False: '-', True: '+'}[self._ascending]
+        return '<%s%s %d:%d>' % (type(self).__name__, d,
+                                 self._start, self._end - 1)
+
+class fullreposet(spanset):
+    """a set containing all revisions in the repo
+
+    This class exists to host special optimization and magic to handle virtual
+    revisions such as "null".
+    """
+
+    def __init__(self, repo):
+        super(fullreposet, self).__init__(repo)
+
+    def __and__(self, other):
+        """As self contains the whole repo, all of the other set should also be
+        in self. Therefore `self & other = other`.
+
+        This boldly assumes the other contains valid revs only.
+        """
+        # other not a smartset, make is so
+        if not util.safehasattr(other, 'isascending'):
+            # filter out hidden revision
+            # (this boldly assumes all smartset are pure)
+            #
+            # `other` was used with "&", let's assume this is a set like
+            # object.
+            other = baseset(other - self._hiddenrevs)
+
+        other.sort(reverse=self.isdescending())
+        return other
+
+def prettyformat(revs):
+    lines = []
+    rs = repr(revs)
+    p = 0
+    while p < len(rs):
+        q = rs.find('<', p + 1)
+        if q < 0:
+            q = len(rs)
+        l = rs.count('<', 0, p) - rs.count('>', 0, p)
+        assert l >= 0
+        lines.append((l, rs[p:q].rstrip()))
+        p = q
+    return '\n'.join('  ' * l + s for l, s in lines)
--- a/mercurial/sshpeer.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/sshpeer.py	Tue Apr 18 12:24:34 2017 -0400
@@ -62,7 +62,7 @@
     large read for data not yet emitted by the server.
 
     The main pipe is expected to be a 'bufferedinputpipe' from the util module
-    that handle all the os specific bites. This class lives in this module
+    that handle all the os specific bits. This class lives in this module
     because it focus on behavior specific to the ssh protocol."""
 
     def __init__(self, ui, main, side):
@@ -150,7 +150,7 @@
                 util.shellquote("%s init %s" %
                     (_serverquote(remotecmd), _serverquote(self.path))))
             ui.debug('running %s\n' % cmd)
-            res = ui.system(cmd)
+            res = ui.system(cmd, blockedtag='sshpeer')
             if res != 0:
                 self._abort(error.RepoError(_("could not create remote repo")))
 
--- a/mercurial/sslutil.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/sslutil.py	Tue Apr 18 12:24:34 2017 -0400
@@ -414,8 +414,10 @@
         # a hint to the user.
         # Only modern ssl module exposes SSLContext.get_ca_certs() so we can
         # only show this warning if modern ssl is available.
-        # The exception handler is here because of
-        # https://bugs.python.org/issue20916.
+        # The exception handler is here to handle bugs around cert attributes:
+        # https://bugs.python.org/issue20916#msg213479.  (See issues5313.)
+        # When the main 20916 bug occurs, 'sslcontext.get_ca_certs()' is a
+        # non-empty list, but the following conditional is otherwise True.
         try:
             if (caloaded and settings['verifymode'] == ssl.CERT_REQUIRED and
                 modernssl and not sslcontext.get_ca_certs()):
@@ -720,7 +722,8 @@
     # to load the system CA store. If we're running on Apple Python, use this
     # trick.
     if _plainapplepython():
-        dummycert = os.path.join(os.path.dirname(__file__), 'dummycert.pem')
+        dummycert = os.path.join(
+            os.path.dirname(pycompat.fsencode(__file__)), 'dummycert.pem')
         if os.path.exists(dummycert):
             return dummycert
 
@@ -814,6 +817,16 @@
             if peerfingerprints[hash].lower() == fingerprint:
                 ui.debug('%s certificate matched fingerprint %s:%s\n' %
                          (host, hash, fmtfingerprint(fingerprint)))
+                if settings['legacyfingerprint']:
+                    ui.warn(_('(SHA-1 fingerprint for %s found in legacy '
+                              '[hostfingerprints] section; '
+                              'if you trust this fingerprint, set the '
+                              'following config value in [hostsecurity] and '
+                              'remove the old one from [hostfingerprints] '
+                              'to upgrade to a more secure SHA-256 '
+                              'fingerprint: '
+                              '%s.fingerprints=%s)\n') % (
+                                  host, host, nicefingerprint))
                 return
 
         # Pinned fingerprint didn't match. This is a fatal error.
--- a/mercurial/statichttprepo.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/statichttprepo.py	Tue Apr 18 12:24:34 2017 -0400
@@ -24,6 +24,7 @@
     store,
     url,
     util,
+    vfs as vfsmod,
 )
 
 urlerr = util.urlerr
@@ -86,7 +87,7 @@
     urlopener = url.opener(ui, authinfo)
     urlopener.add_handler(byterange.HTTPRangeHandler())
 
-    class statichttpvfs(scmutil.abstractvfs):
+    class statichttpvfs(vfsmod.abstractvfs):
         def __init__(self, base):
             self.base = base
 
@@ -121,9 +122,8 @@
         u = util.url(path.rstrip('/') + "/.hg")
         self.path, authinfo = u.authinfo()
 
-        opener = build_opener(ui, authinfo)
-        self.opener = opener(self.path)
-        self.vfs = self.opener
+        vfsclass = build_opener(ui, authinfo)
+        self.vfs = vfsclass(self.path)
         self._phasedefaults = []
 
         self.names = namespaces.namespaces()
@@ -148,7 +148,7 @@
                 raise error.RepoError(msg)
 
         # setup store
-        self.store = store.store(requirements, self.path, opener)
+        self.store = store.store(requirements, self.path, vfsclass)
         self.spath = self.store.path
         self.svfs = self.store.opener
         self.sjoin = self.store.join
--- a/mercurial/statprof.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/statprof.py	Tue Apr 18 12:24:34 2017 -0400
@@ -433,6 +433,7 @@
     Hotpath = 3
     FlameGraph = 4
     Json = 5
+    Chrome = 6
 
 def display(fp=None, format=3, data=None, **kwargs):
     '''Print statistics, either to stdout or the given file object.'''
@@ -457,10 +458,12 @@
         write_to_flame(data, fp, **kwargs)
     elif format == DisplayFormats.Json:
         write_to_json(data, fp)
+    elif format == DisplayFormats.Chrome:
+        write_to_chrome(data, fp, **kwargs)
     else:
         raise Exception("Invalid display format")
 
-    if format != DisplayFormats.Json:
+    if format not in (DisplayFormats.Json, DisplayFormats.Chrome):
         print('---', file=fp)
         print('Sample count: %d' % len(data.samples), file=fp)
         print('Total time: %f seconds' % data.accumulated_time, file=fp)
@@ -713,6 +716,23 @@
     os.system("perl ~/flamegraph.pl %s > %s" % (path, outputfile))
     print("Written to %s" % outputfile, file=fp)
 
+_pathcache = {}
+def simplifypath(path):
+    '''Attempt to make the path to a Python module easier to read by
+    removing whatever part of the Python search path it was found
+    on.'''
+
+    if path in _pathcache:
+        return _pathcache[path]
+    hgpath = pycompat.fsencode(encoding.__file__).rsplit(os.sep, 2)[0]
+    for p in [hgpath] + sys.path:
+        prefix = p + os.sep
+        if path.startswith(prefix):
+            path = path[len(prefix):]
+            break
+    _pathcache[path] = path
+    return path
+
 def write_to_json(data, fp):
     samples = []
 
@@ -726,6 +746,102 @@
 
     print(json.dumps(samples), file=fp)
 
+def write_to_chrome(data, fp, minthreshold=0.005, maxthreshold=0.999):
+    samples = []
+    laststack = collections.deque()
+    lastseen = collections.deque()
+
+    # The Chrome tracing format allows us to use a compact stack
+    # representation to save space. It's fiddly but worth it.
+    # We maintain a bijection between stack and ID.
+    stack2id = {}
+    id2stack = [] # will eventually be rendered
+
+    def stackid(stack):
+        if not stack:
+            return
+        if stack in stack2id:
+            return stack2id[stack]
+        parent = stackid(stack[1:])
+        myid = len(stack2id)
+        stack2id[stack] = myid
+        id2stack.append(dict(category=stack[0][0], name='%s %s' % stack[0]))
+        if parent is not None:
+            id2stack[-1].update(parent=parent)
+        return myid
+
+    def endswith(a, b):
+        return list(a)[-len(b):] == list(b)
+
+    # The sampling profiler can sample multiple times without
+    # advancing the clock, potentially causing the Chrome trace viewer
+    # to render single-pixel columns that we cannot zoom in on.  We
+    # work around this by pretending that zero-duration samples are a
+    # millisecond in length.
+
+    clamp = 0.001
+
+    # We provide knobs that by default attempt to filter out stack
+    # frames that are too noisy:
+    #
+    # * A few take almost all execution time. These are usually boring
+    #   setup functions, giving a stack that is deep but uninformative.
+    #
+    # * Numerous samples take almost no time, but introduce lots of
+    #   noisy, oft-deep "spines" into a rendered profile.
+
+    blacklist = set()
+    totaltime = data.samples[-1].time - data.samples[0].time
+    minthreshold = totaltime * minthreshold
+    maxthreshold = max(totaltime * maxthreshold, clamp)
+
+    def poplast():
+        oldsid = stackid(tuple(laststack))
+        oldcat, oldfunc = laststack.popleft()
+        oldtime, oldidx = lastseen.popleft()
+        duration = sample.time - oldtime
+        if minthreshold <= duration <= maxthreshold:
+            # ensure no zero-duration events
+            sampletime = max(oldtime + clamp, sample.time)
+            samples.append(dict(ph='E', name=oldfunc, cat=oldcat, sf=oldsid,
+                                ts=sampletime*1e6, pid=0))
+        else:
+            blacklist.add(oldidx)
+
+    # Much fiddling to synthesize correctly(ish) nested begin/end
+    # events given only stack snapshots.
+
+    for sample in data.samples:
+        tos = sample.stack[0]
+        name = tos.function
+        path = simplifypath(tos.path)
+        category = '%s:%d' % (path, tos.lineno)
+        stack = tuple((('%s:%d' % (simplifypath(frame.path), frame.lineno),
+                        frame.function) for frame in sample.stack))
+        qstack = collections.deque(stack)
+        if laststack == qstack:
+            continue
+        while laststack and qstack and laststack[-1] == qstack[-1]:
+            laststack.pop()
+            qstack.pop()
+        while laststack:
+            poplast()
+        for f in reversed(qstack):
+            lastseen.appendleft((sample.time, len(samples)))
+            laststack.appendleft(f)
+            path, name = f
+            sid = stackid(tuple(laststack))
+            samples.append(dict(ph='B', name=name, cat=path, ts=sample.time*1e6,
+                                sf=sid, pid=0))
+        laststack = collections.deque(stack)
+    while laststack:
+        poplast()
+    events = [s[1] for s in enumerate(samples) if s[0] not in blacklist]
+    frames = collections.OrderedDict((str(k), v)
+                                     for (k,v) in enumerate(id2stack))
+    json.dump(dict(traceEvents=events, stackFrames=frames), fp, indent=1)
+    fp.write('\n')
+
 def printusage():
     print("""
 The statprof command line allows you to inspect the last profile's results in
--- a/mercurial/store.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/store.py	Tue Apr 18 12:24:34 2017 -0400
@@ -17,8 +17,8 @@
     error,
     parsers,
     pycompat,
-    scmutil,
     util,
+    vfs as vfsmod,
 )
 
 # This avoids a collision between a file named foo and a dir named
@@ -99,12 +99,8 @@
     'the\\x07quick\\xadshot'
     '''
     e = '_'
-    if pycompat.ispy3:
-        xchr = lambda x: bytes([x])
-        asciistr = bytes(xrange(127))
-    else:
-        xchr = chr
-        asciistr = map(chr, xrange(127))
+    xchr = pycompat.bytechr
+    asciistr = list(map(xchr, range(127)))
     capitals = list(range(ord("A"), ord("Z") + 1))
 
     cmap = dict((x, x) for x in asciistr)
@@ -128,7 +124,7 @@
                     pass
             else:
                 raise KeyError
-    return (lambda s: ''.join([cmap[c] for c in s]),
+    return (lambda s: ''.join([cmap[s[c:c + 1]] for c in xrange(len(s))]),
             lambda s: ''.join(list(decode(s))))
 
 _encodefname, _decodefname = _buildencodefun()
@@ -197,22 +193,22 @@
         if not n:
             continue
         if dotencode and n[0] in '. ':
-            n = "~%02x" % ord(n[0]) + n[1:]
+            n = "~%02x" % ord(n[0:1]) + n[1:]
             path[i] = n
         else:
             l = n.find('.')
             if l == -1:
                 l = len(n)
             if ((l == 3 and n[:3] in _winres3) or
-                (l == 4 and n[3] <= '9' and n[3] >= '1'
+                (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
                         and n[:3] in _winres4)):
                 # encode third letter ('aux' -> 'au~78')
-                ec = "~%02x" % ord(n[2])
+                ec = "~%02x" % ord(n[2:3])
                 n = n[0:2] + ec + n[3:]
                 path[i] = n
         if n[-1] in '. ':
             # encode last period or space ('foo...' -> 'foo..~2e')
-            path[i] = n[:-1] + "~%02x" % ord(n[-1])
+            path[i] = n[:-1] + "~%02x" % ord(n[-1:])
     return path
 
 _maxstorepathlen = 120
@@ -325,7 +321,7 @@
         self.createmode = _calcmode(vfs)
         vfs.createmode = self.createmode
         self.rawvfs = vfs
-        self.vfs = scmutil.filtervfs(vfs, encodedir)
+        self.vfs = vfsmod.filtervfs(vfs, encodedir)
         self.opener = self.vfs
 
     def join(self, f):
@@ -398,7 +394,7 @@
         self.createmode = _calcmode(vfs)
         vfs.createmode = self.createmode
         self.rawvfs = vfs
-        self.vfs = scmutil.filtervfs(vfs, encodefilename)
+        self.vfs = vfsmod.filtervfs(vfs, encodefilename)
         self.opener = self.vfs
 
     def datafiles(self):
@@ -477,9 +473,9 @@
             self._load()
         return iter(self.entries)
 
-class _fncachevfs(scmutil.abstractvfs, scmutil.auditvfs):
+class _fncachevfs(vfsmod.abstractvfs, vfsmod.auditvfs):
     def __init__(self, vfs, fnc, encode):
-        scmutil.auditvfs.__init__(self, vfs)
+        vfsmod.auditvfs.__init__(self, vfs)
         self.fncache = fnc
         self.encode = encode
 
--- a/mercurial/streamclone.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/streamclone.py	Tue Apr 18 12:24:34 2017 -0400
@@ -8,7 +8,6 @@
 from __future__ import absolute_import
 
 import struct
-import time
 
 from .i18n import _
 from . import (
@@ -297,7 +296,7 @@
                        (filecount, util.bytecount(bytecount)))
         handled_bytes = 0
         repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
-        start = time.time()
+        start = util.timer()
 
         # TODO: get rid of (potential) inconsistency
         #
@@ -340,7 +339,7 @@
             # streamclone-ed file at next access
             repo.invalidate(clearfilecache=True)
 
-        elapsed = time.time() - start
+        elapsed = util.timer() - start
         if elapsed <= 0:
             elapsed = 0.001
         repo.ui.progress(_('clone'), None)
--- a/mercurial/subrepo.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/subrepo.py	Tue Apr 18 12:24:34 2017 -0400
@@ -35,6 +35,7 @@
     pycompat,
     scmutil,
     util,
+    vfs as vfsmod,
 )
 
 hg = None
@@ -129,7 +130,7 @@
         for pattern, repl in p.items('subpaths'):
             # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
             # does a string decode.
-            repl = repl.encode('string-escape')
+            repl = util.escapestr(repl)
             # However, we still want to allow back references to go
             # through unharmed, so we turn r'\\1' into r'\1'. Again,
             # extra escapes are needed because re.sub string decodes.
@@ -443,6 +444,15 @@
         self._ctx = ctx
         self._path = path
 
+    def addwebdirpath(self, serverpath, webconf):
+        """Add the hgwebdir entries for this subrepo, and any of its subrepos.
+
+        ``serverpath`` is the path component of the URL for this repo.
+
+        ``webconf`` is the dictionary of hgwebdir entries.
+        """
+        pass
+
     def storeclean(self, path):
         """
         returns true if the repository has not changed since it was last
@@ -547,8 +557,8 @@
         """return filename iterator"""
         raise NotImplementedError
 
-    def filedata(self, name):
-        """return file data"""
+    def filedata(self, name, decode):
+        """return file data, optionally passed through repo decoders"""
         raise NotImplementedError
 
     def fileflags(self, name):
@@ -563,7 +573,7 @@
         """handle the files command for this subrepo"""
         return 1
 
-    def archive(self, archiver, prefix, match=None):
+    def archive(self, archiver, prefix, match=None, decode=True):
         if match is not None:
             files = [f for f in self.files() if match(f)]
         else:
@@ -577,7 +587,7 @@
             mode = 'x' in flags and 0o755 or 0o644
             symlink = 'l' in flags
             archiver.addfile(prefix + self._path + '/' + name,
-                             mode, symlink, self.filedata(name))
+                             mode, symlink, self.filedata(name, decode))
             self.ui.progress(_('archiving (%s)') % relpath, i + 1,
                              unit=_('files'), total=total)
         self.ui.progress(_('archiving (%s)') % relpath, None)
@@ -620,7 +630,7 @@
     def wvfs(self):
         """return vfs to access the working directory of this subrepository
         """
-        return scmutil.vfs(self._ctx.repo().wvfs.join(self._path))
+        return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path))
 
     @propertycache
     def _relpath(self):
@@ -650,6 +660,10 @@
         self.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
         self._initrepo(r, state[0], create)
 
+    @annotatesubrepoerror
+    def addwebdirpath(self, serverpath, webconf):
+        cmdutil.addwebdirpath(self._repo, subrelpath(self), webconf)
+
     def storeclean(self, path):
         with self._repo.lock():
             return self._storeclean(path)
@@ -682,7 +696,7 @@
 
     @propertycache
     def _cachestorehashvfs(self):
-        return scmutil.vfs(self._repo.join('cache/storehash'))
+        return vfsmod.vfs(self._repo.vfs.join('cache/storehash'))
 
     def _readstorehashcache(self, remotepath):
         '''read the store hash cache for a given remote repository'''
@@ -787,7 +801,7 @@
                           % (inst, subrelpath(self)))
 
     @annotatesubrepoerror
-    def archive(self, archiver, prefix, match=None):
+    def archive(self, archiver, prefix, match=None, decode=True):
         self._get(self._state + ('hg',))
         total = abstractsubrepo.archive(self, archiver, prefix, match)
         rev = self._state[1]
@@ -795,7 +809,8 @@
         for subpath in ctx.substate:
             s = subrepo(ctx, subpath, True)
             submatch = matchmod.subdirmatcher(subpath, match)
-            total += s.archive(archiver, prefix + self._path + '/', submatch)
+            total += s.archive(archiver, prefix + self._path + '/', submatch,
+                               decode)
         return total
 
     @annotatesubrepoerror
@@ -961,9 +976,12 @@
         ctx = self._repo[rev]
         return ctx.manifest().keys()
 
-    def filedata(self, name):
+    def filedata(self, name, decode):
         rev = self._state[1]
-        return self._repo[rev][name].data()
+        data = self._repo[rev][name].data()
+        if decode:
+            data = self._repo.wwritedata(name, data)
+        return data
 
     def fileflags(self, name):
         rev = self._state[1]
@@ -1297,7 +1315,7 @@
             paths.append(name.encode('utf-8'))
         return paths
 
-    def filedata(self, name):
+    def filedata(self, name, decode):
         return self._svncommand(['cat'], name)[0]
 
 
@@ -1415,6 +1433,10 @@
         errpipe = None
         if self.ui.quiet:
             errpipe = open(os.devnull, 'w')
+        if self.ui._colormode and len(commands) and commands[0] == "diff":
+            # insert the argument in the front,
+            # the end of git diff arguments is used for paths
+            commands.insert(1, '--color')
         p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
                              cwd=cwd, env=env, close_fds=util.closefds,
                              stdout=subprocess.PIPE, stderr=errpipe)
@@ -1777,7 +1799,7 @@
             else:
                 self.wvfs.unlink(f)
 
-    def archive(self, archiver, prefix, match=None):
+    def archive(self, archiver, prefix, match=None, decode=True):
         total = 0
         source, revision = self._state
         if not revision:
--- a/mercurial/tagmerge.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/tagmerge.py	Tue Apr 18 12:24:34 2017 -0400
@@ -169,7 +169,7 @@
     # finally we can join the sorted groups to get the final contents of the
     # merged .hgtags file, and then write it to disk
     mergedtagstring = '\n'.join([tags for rank, tags in finaltags if tags])
-    fp = repo.wfile('.hgtags', 'wb')
+    fp = repo.wvfs('.hgtags', 'wb')
     fp.write(mergedtagstring + '\n')
     fp.close()
 
--- a/mercurial/tags.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/tags.py	Tue Apr 18 12:24:34 2017 -0400
@@ -12,9 +12,7 @@
 
 from __future__ import absolute_import
 
-import array
 import errno
-import time
 
 from .node import (
     bin,
@@ -22,14 +20,15 @@
     nullid,
     short,
 )
+from .i18n import _
 from . import (
     encoding,
     error,
+    match as matchmod,
+    scmutil,
     util,
 )
 
-array = array.array
-
 # Tags computation can be expensive and caches exist to make it fast in
 # the common case.
 #
@@ -79,53 +78,154 @@
 # The most recent changeset (in terms of revlog ordering for the head
 # setting it) for each tag is last.
 
-def findglobaltags(ui, repo, alltags, tagtypes):
-    '''Find global tags in a repo.
+def fnoderevs(ui, repo, revs):
+    """return the list of '.hgtags' fnodes used in a set revisions
+
+    This is returned as list of unique fnodes. We use a list instead of a set
+    because order matters when it comes to tags."""
+    unfi = repo.unfiltered()
+    tonode = unfi.changelog.node
+    nodes = [tonode(r) for r in revs]
+    fnodes = _getfnodes(ui, repo, nodes[::-1]) # reversed help the cache
+    fnodes = _filterfnodes(fnodes, nodes)
+    return fnodes
+
+def _nulltonone(value):
+    """convert nullid to None
 
-    "alltags" maps tag name to (node, hist) 2-tuples.
+    For tag value, nullid means "deleted". This small utility function helps
+    translating that to None."""
+    if value == nullid:
+        return None
+    return value
+
+def difftags(ui, repo, oldfnodes, newfnodes):
+    """list differences between tags expressed in two set of file-nodes
+
+    The list contains entries in the form: (tagname, oldvalue, new value).
+    None is used to expressed missing value:
+        ('foo', None, 'abcd') is a new tag,
+        ('bar', 'ef01', None) is a deletion,
+        ('baz', 'abcd', 'ef01') is a tag movement.
+    """
+    if oldfnodes == newfnodes:
+        return []
+    oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
+    newtags = _tagsfromfnodes(ui, repo, newfnodes)
 
-    "tagtypes" maps tag name to tag type. Global tags always have the
-    "global" tag type.
+    # list of (tag, old, new): None means missing
+    entries = []
+    for tag, (new, __) in newtags.items():
+        new = _nulltonone(new)
+        old, __ = oldtags.pop(tag, (None, None))
+        old = _nulltonone(old)
+        if old != new:
+            entries.append((tag, old, new))
+    # handle deleted tags
+    for tag, (old, __) in oldtags.items():
+        old = _nulltonone(old)
+        if old is not None:
+            entries.append((tag, old, None))
+    entries.sort()
+    return entries
+
+def writediff(fp, difflist):
+    """write tags diff information to a file.
+
+    Data are stored with a line based format:
+
+        <action> <hex-node> <tag-name>\n
+
+    Action are defined as follow:
+       -R tag is removed,
+       +A tag is added,
+       -M tag is moved (old value),
+       +M tag is moved (new value),
 
-    The "alltags" and "tagtypes" dicts are updated in place. Empty dicts
-    should be passed in.
+    Example:
+
+         +A 875517b4806a848f942811a315a5bce30804ae85 t5
+
+    See documentation of difftags output for details about the input.
+    """
+    add = '+A %s %s\n'
+    remove = '-R %s %s\n'
+    updateold = '-M %s %s\n'
+    updatenew = '+M %s %s\n'
+    for tag, old, new in difflist:
+        # translate to hex
+        if old is not None:
+            old = hex(old)
+        if new is not None:
+            new = hex(new)
+        # write to file
+        if old is None:
+            fp.write(add % (new, tag))
+        elif new is None:
+            fp.write(remove % (old, tag))
+        else:
+            fp.write(updateold % (old, tag))
+            fp.write(updatenew % (new, tag))
+
+def findglobaltags(ui, repo):
+    '''Find global tags in a repo: return a tagsmap
+
+    tagsmap: tag name to (node, hist) 2-tuples.
 
     The tags cache is read and updated as a side-effect of calling.
     '''
-    # This is so we can be lazy and assume alltags contains only global
-    # tags when we pass it to _writetagcache().
-    assert len(alltags) == len(tagtypes) == 0, \
-           "findglobaltags() should be called first"
-
     (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
     if cachetags is not None:
         assert not shouldwrite
         # XXX is this really 100% correct?  are there oddball special
         # cases where a global tag should outrank a local tag but won't,
         # because cachetags does not contain rank info?
-        _updatetags(cachetags, 'global', alltags, tagtypes)
-        return
+        alltags = {}
+        _updatetags(cachetags, alltags)
+        return alltags
 
-    seen = set()  # set of fnode
-    fctx = None
     for head in reversed(heads):  # oldest to newest
         assert head in repo.changelog.nodemap, \
                "tag cache returned bogus head %s" % short(head)
-
-        fnode = tagfnode.get(head)
-        if fnode and fnode not in seen:
-            seen.add(fnode)
-            if not fctx:
-                fctx = repo.filectx('.hgtags', fileid=fnode)
-            else:
-                fctx = fctx.filectx(fnode)
-
-            filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
-            _updatetags(filetags, 'global', alltags, tagtypes)
+    fnodes = _filterfnodes(tagfnode, reversed(heads))
+    alltags = _tagsfromfnodes(ui, repo, fnodes)
 
     # and update the cache (if necessary)
     if shouldwrite:
         _writetagcache(ui, repo, valid, alltags)
+    return alltags
+
+def _filterfnodes(tagfnode, nodes):
+    """return a list of unique fnodes
+
+    The order of this list matches the order of "nodes". Preserving this order
+    is important as reading tags in different order provides different
+    results."""
+    seen = set()  # set of fnode
+    fnodes = []
+    for no in nodes:  # oldest to newest
+        fnode = tagfnode.get(no)
+        if fnode and fnode not in seen:
+            seen.add(fnode)
+            fnodes.append(fnode)
+    return fnodes
+
+def _tagsfromfnodes(ui, repo, fnodes):
+    """return a tagsmap from a list of file-node
+
+    tagsmap: tag name to (node, hist) 2-tuples.
+
+    The order of the list matters."""
+    alltags = {}
+    fctx = None
+    for fnode in fnodes:
+        if fctx is None:
+            fctx = repo.filectx('.hgtags', fileid=fnode)
+        else:
+            fctx = fctx.filectx(fnode)
+        filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
+        _updatetags(filetags, alltags)
+    return alltags
 
 def readlocaltags(ui, repo, alltags, tagtypes):
     '''Read local tags in repo. Update alltags and tagtypes.'''
@@ -150,7 +250,7 @@
         except (LookupError, ValueError):
             del filetags[t]
 
-    _updatetags(filetags, "local", alltags, tagtypes)
+    _updatetags(filetags, alltags, 'local', tagtypes)
 
 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
     '''Read tag definitions from a file (or any source of lines).
@@ -228,15 +328,22 @@
         newtags[tag] = (taghist[-1], taghist[:-1])
     return newtags
 
-def _updatetags(filetags, tagtype, alltags, tagtypes):
-    '''Incorporate the tag info read from one file into the two
-    dictionaries, alltags and tagtypes, that contain all tag
-    info (global across all heads plus local).'''
+def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
+    """Incorporate the tag info read from one file into dictionnaries
+
+    The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
+
+    The second one, 'tagtypes', is optional and will be updated to track the
+    "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
+    needs to be set."""
+    if tagtype is None:
+        assert tagtypes is None
 
     for name, nodehist in filetags.iteritems():
         if name not in alltags:
             alltags[name] = nodehist
-            tagtypes[name] = tagtype
+            if tagtype is not None:
+                tagtypes[name] = tagtype
             continue
 
         # we prefer alltags[name] if:
@@ -248,7 +355,7 @@
         if (bnode != anode and anode in bhist and
             (bnode not in ahist or len(bhist) > len(ahist))):
             anode = bnode
-        else:
+        elif tagtype is not None:
             tagtypes[name] = tagtype
         ahist.extend([n for n in bhist if n not in ahist])
         alltags[name] = anode, ahist
@@ -278,8 +385,6 @@
     If the cache is not up to date, the caller is responsible for reading tag
     info from each returned head. (See findglobaltags().)
     '''
-    from . import scmutil  # avoid cycle
-
     try:
         cachefile = repo.vfs(_filename(repo), 'r')
         # force reading the file for static-http
@@ -344,30 +449,39 @@
         # potentially expensive search.
         return ([], {}, valid, None, True)
 
-    starttime = time.time()
 
     # Now we have to lookup the .hgtags filenode for every new head.
     # This is the most expensive part of finding tags, so performance
     # depends primarily on the size of newheads.  Worst case: no cache
     # file, so newheads == repoheads.
+    cachefnode = _getfnodes(ui, repo, repoheads)
+
+    # Caller has to iterate over all heads, but can use the filenodes in
+    # cachefnode to get to each .hgtags revision quickly.
+    return (repoheads, cachefnode, valid, None, True)
+
+def _getfnodes(ui, repo, nodes):
+    """return .hgtags fnodes for a list of changeset nodes
+
+    Return value is a {node: fnode} mapping. There will be no entry for nodes
+    without a '.hgtags' file.
+    """
+    starttime = util.timer()
     fnodescache = hgtagsfnodescache(repo.unfiltered())
     cachefnode = {}
-    for head in reversed(repoheads):
-        fnode = fnodescache.getfnode(head)
+    for node in reversed(nodes):
+        fnode = fnodescache.getfnode(node)
         if fnode != nullid:
-            cachefnode[head] = fnode
+            cachefnode[node] = fnode
 
     fnodescache.write()
 
-    duration = time.time() - starttime
+    duration = util.timer() - starttime
     ui.log('tagscache',
            '%d/%d cache hits/lookups in %0.4f '
            'seconds\n',
            fnodescache.hitcount, fnodescache.lookupcount, duration)
-
-    # Caller has to iterate over all heads, but can use the filenodes in
-    # cachefnode to get to each .hgtags revision quickly.
-    return (repoheads, cachefnode, valid, None, True)
+    return cachefnode
 
 def _writetagcache(ui, repo, valid, cachetags):
     filename = _filename(repo)
@@ -398,6 +512,110 @@
     except (OSError, IOError):
         pass
 
+def tag(repo, names, node, message, local, user, date, editor=False):
+    '''tag a revision with one or more symbolic names.
+
+    names is a list of strings or, when adding a single tag, names may be a
+    string.
+
+    if local is True, the tags are stored in a per-repository file.
+    otherwise, they are stored in the .hgtags file, and a new
+    changeset is committed with the change.
+
+    keyword arguments:
+
+    local: whether to store tags in non-version-controlled file
+    (default False)
+
+    message: commit message to use if committing
+
+    user: name of user to use if committing
+
+    date: date tuple to use if committing'''
+
+    if not local:
+        m = matchmod.exact(repo.root, '', ['.hgtags'])
+        if any(repo.status(match=m, unknown=True, ignored=True)):
+            raise error.Abort(_('working copy of .hgtags is changed'),
+                             hint=_('please commit .hgtags manually'))
+
+    repo.tags() # instantiate the cache
+    _tag(repo.unfiltered(), names, node, message, local, user, date,
+         editor=editor)
+
+def _tag(repo, names, node, message, local, user, date, extra=None,
+         editor=False):
+    if isinstance(names, str):
+        names = (names,)
+
+    branches = repo.branchmap()
+    for name in names:
+        repo.hook('pretag', throw=True, node=hex(node), tag=name,
+                  local=local)
+        if name in branches:
+            repo.ui.warn(_("warning: tag %s conflicts with existing"
+            " branch name\n") % name)
+
+    def writetags(fp, names, munge, prevtags):
+        fp.seek(0, 2)
+        if prevtags and prevtags[-1] != '\n':
+            fp.write('\n')
+        for name in names:
+            if munge:
+                m = munge(name)
+            else:
+                m = name
+
+            if (repo._tagscache.tagtypes and
+                name in repo._tagscache.tagtypes):
+                old = repo.tags().get(name, nullid)
+                fp.write('%s %s\n' % (hex(old), m))
+            fp.write('%s %s\n' % (hex(node), m))
+        fp.close()
+
+    prevtags = ''
+    if local:
+        try:
+            fp = repo.vfs('localtags', 'r+')
+        except IOError:
+            fp = repo.vfs('localtags', 'a')
+        else:
+            prevtags = fp.read()
+
+        # local tags are stored in the current charset
+        writetags(fp, names, None, prevtags)
+        for name in names:
+            repo.hook('tag', node=hex(node), tag=name, local=local)
+        return
+
+    try:
+        fp = repo.wvfs('.hgtags', 'rb+')
+    except IOError as e:
+        if e.errno != errno.ENOENT:
+            raise
+        fp = repo.wvfs('.hgtags', 'ab')
+    else:
+        prevtags = fp.read()
+
+    # committed tags are stored in UTF-8
+    writetags(fp, names, encoding.fromlocal, prevtags)
+
+    fp.close()
+
+    repo.invalidatecaches()
+
+    if '.hgtags' not in repo.dirstate:
+        repo[None].add(['.hgtags'])
+
+    m = matchmod.exact(repo.root, '', ['.hgtags'])
+    tagnode = repo.commit(message, user, date, extra=extra, match=m,
+                          editor=editor)
+
+    for name in names:
+        repo.hook('tag', node=hex(node), tag=name, local=local)
+
+    return tagnode
+
 _fnodescachefile = 'cache/hgtagsfnodes1'
 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
 _fnodesmissingrec = '\xff' * 24
@@ -430,13 +648,12 @@
         self.lookupcount = 0
         self.hitcount = 0
 
-        self._raw = array('c')
 
         try:
             data = repo.vfs.read(_fnodescachefile)
         except (OSError, IOError):
             data = ""
-        self._raw.fromstring(data)
+        self._raw = bytearray(data)
 
         # The end state of self._raw is an array that is of the exact length
         # required to hold a record for every revision in the repository.
@@ -477,7 +694,7 @@
         self.lookupcount += 1
 
         offset = rev * _fnodesrecsize
-        record = self._raw[offset:offset + _fnodesrecsize].tostring()
+        record = '%s' % self._raw[offset:offset + _fnodesrecsize]
         properprefix = node[0:4]
 
         # Validate and return existing entry.
@@ -518,7 +735,7 @@
 
     def _writeentry(self, offset, prefix, fnode):
         # Slices on array instances only accept other array.
-        entry = array('c', prefix + fnode)
+        entry = bytearray(prefix + fnode)
         self._raw[offset:offset + _fnodesrecsize] = entry
         # self._dirtyoffset could be None.
         self._dirtyoffset = min(self._dirtyoffset, offset) or 0
--- a/mercurial/templatefilters.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templatefilters.py	Tue Apr 18 12:24:34 2017 -0400
@@ -218,26 +218,24 @@
     return "".join(indenter())
 
 @templatefilter('json')
-def json(obj):
-    if obj is None or obj is False or obj is True:
-        return {None: 'null', False: 'false', True: 'true'}[obj]
-    elif isinstance(obj, int) or isinstance(obj, float):
+def json(obj, paranoid=True):
+    if obj is None:
+        return 'null'
+    elif obj is False:
+        return 'false'
+    elif obj is True:
+        return 'true'
+    elif isinstance(obj, (int, long, float)):
         return str(obj)
     elif isinstance(obj, str):
-        return '"%s"' % encoding.jsonescape(obj, paranoid=True)
+        return '"%s"' % encoding.jsonescape(obj, paranoid=paranoid)
     elif util.safehasattr(obj, 'keys'):
-        out = []
-        for k, v in sorted(obj.iteritems()):
-            s = '%s: %s' % (json(k), json(v))
-            out.append(s)
+        out = ['%s: %s' % (json(k), json(v))
+               for k, v in sorted(obj.iteritems())]
         return '{' + ', '.join(out) + '}'
     elif util.safehasattr(obj, '__iter__'):
-        out = []
-        for i in obj:
-            out.append(json(i))
+        out = [json(i) for i in obj]
         return '[' + ', '.join(out) + ']'
-    elif util.safehasattr(obj, '__call__'):
-        return json(obj())
     else:
         raise TypeError('cannot encode type %s' % obj.__class__.__name__)
 
@@ -341,17 +339,18 @@
 @templatefilter('splitlines')
 def splitlines(text):
     """Any text. Split text into a list of lines."""
-    return templatekw.showlist('line', text.splitlines(), 'lines')
+    return templatekw.hybridlist(text.splitlines(), name='line')
 
 @templatefilter('stringescape')
 def stringescape(text):
-    return text.encode('string_escape')
+    return util.escapestr(text)
 
 @templatefilter('stringify')
 def stringify(thing):
     """Any type. Turns the value into text by converting values into
     text and concatenating them.
     """
+    thing = templatekw.unwraphybrid(thing)
     if util.safehasattr(thing, '__iter__') and not isinstance(thing, str):
         return "".join([stringify(t) for t in thing if t is not None])
     if thing is None:
--- a/mercurial/templatekw.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templatekw.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,6 +7,7 @@
 
 from __future__ import absolute_import
 
+from .i18n import _
 from .node import hex, nullid
 from . import (
     encoding,
@@ -18,41 +19,78 @@
     util,
 )
 
-# This helper class allows us to handle both:
-#  "{files}" (legacy command-line-specific list hack) and
-#  "{files % '{file}\n'}" (hgweb-style with inlining and function support)
-# and to access raw values:
-#  "{ifcontains(file, files, ...)}", "{ifcontains(key, extras, ...)}"
-#  "{get(extras, key)}"
+class _hybrid(object):
+    """Wrapper for list or dict to support legacy template
 
-class _hybrid(object):
+    This class allows us to handle both:
+    - "{files}" (legacy command-line-specific list hack) and
+    - "{files % '{file}\n'}" (hgweb-style with inlining and function support)
+    and to access raw values:
+    - "{ifcontains(file, files, ...)}", "{ifcontains(key, extras, ...)}"
+    - "{get(extras, key)}"
+    - "{files|json}"
+    """
+
     def __init__(self, gen, values, makemap, joinfmt):
-        self.gen = gen
-        self.values = values
+        if gen is not None:
+            self.gen = gen
+        self._values = values
         self._makemap = makemap
         self.joinfmt = joinfmt
-    def __iter__(self):
-        return self.gen
+    @util.propertycache
+    def gen(self):
+        return self._defaultgen()
+    def _defaultgen(self):
+        """Generator to stringify this as {join(self, ' ')}"""
+        for i, d in enumerate(self.itermaps()):
+            if i > 0:
+                yield ' '
+            yield self.joinfmt(d)
     def itermaps(self):
         makemap = self._makemap
-        for x in self.values:
+        for x in self._values:
             yield makemap(x)
     def __contains__(self, x):
-        return x in self.values
+        return x in self._values
     def __len__(self):
-        return len(self.values)
+        return len(self._values)
+    def __iter__(self):
+        return iter(self._values)
     def __getattr__(self, name):
-        if name != 'get':
+        if name not in ('get', 'items', 'iteritems', 'iterkeys', 'itervalues',
+                        'keys', 'values'):
             raise AttributeError(name)
-        return getattr(self.values, name)
+        return getattr(self._values, name)
+
+def hybriddict(data, key='key', value='value', fmt='%s=%s', gen=None):
+    """Wrap data to support both dict-like and string-like operations"""
+    return _hybrid(gen, data, lambda k: {key: k, value: data[k]},
+                   lambda d: fmt % (d[key], d[value]))
+
+def hybridlist(data, name, fmt='%s', gen=None):
+    """Wrap data to support both list-like and string-like operations"""
+    return _hybrid(gen, data, lambda x: {name: x}, lambda d: fmt % d[name])
 
-def showlist(name, values, plural=None, element=None, separator=' ', **args):
+def unwraphybrid(thing):
+    """Return an object which can be stringified possibly by using a legacy
+    template"""
+    if not util.safehasattr(thing, 'gen'):
+        return thing
+    return thing.gen
+
+def showdict(name, data, mapping, plural=None, key='key', value='value',
+             fmt='%s=%s', separator=' '):
+    c = [{key: k, value: v} for k, v in data.iteritems()]
+    f = _showlist(name, c, mapping, plural, separator)
+    return hybriddict(data, key=key, value=value, fmt=fmt, gen=f)
+
+def showlist(name, values, mapping, plural=None, element=None, separator=' '):
     if not element:
         element = name
-    f = _showlist(name, values, plural, separator, **args)
-    return _hybrid(f, values, lambda x: {element: x}, lambda d: d[element])
+    f = _showlist(name, values, mapping, plural, separator)
+    return hybridlist(values, name=element, gen=f)
 
-def _showlist(name, values, plural=None, separator=' ', **args):
+def _showlist(name, values, mapping, plural=None, separator=' '):
     '''expand set of values.
     name is name of key in template map.
     values is list of strings or dicts.
@@ -73,36 +111,35 @@
 
     expand 'end_foos'.
     '''
-    templ = args['templ']
-    if plural:
-        names = plural
-    else: names = name + 's'
+    templ = mapping['templ']
+    if not plural:
+        plural = name + 's'
     if not values:
-        noname = 'no_' + names
+        noname = 'no_' + plural
         if noname in templ:
-            yield templ(noname, **args)
+            yield templ(noname, **mapping)
         return
     if name not in templ:
         if isinstance(values[0], str):
             yield separator.join(values)
         else:
             for v in values:
-                yield dict(v, **args)
+                yield dict(v, **mapping)
         return
-    startname = 'start_' + names
+    startname = 'start_' + plural
     if startname in templ:
-        yield templ(startname, **args)
-    vargs = args.copy()
+        yield templ(startname, **mapping)
+    vmapping = mapping.copy()
     def one(v, tag=name):
         try:
-            vargs.update(v)
+            vmapping.update(v)
         except (AttributeError, ValueError):
             try:
                 for a, b in v:
-                    vargs[a] = b
+                    vmapping[a] = b
             except ValueError:
-                vargs[name] = v
-        return templ(tag, **vargs)
+                vmapping[name] = v
+        return templ(tag, **vmapping)
     lastname = 'last_' + name
     if lastname in templ:
         last = values.pop()
@@ -112,9 +149,9 @@
         yield one(v)
     if last is not None:
         yield one(last, tag=lastname)
-    endname = 'end_' + names
+    endname = 'end_' + plural
     if endname in templ:
-        yield templ(endname, **args)
+        yield templ(endname, **mapping)
 
 def _formatrevnode(ctx):
     """Format changeset as '{rev}:{node|formatnode}', which is the default
@@ -204,6 +241,17 @@
 
     return getrenamed
 
+# default templates internally used for rendering of lists
+defaulttempl = {
+    'parent': '{rev}:{node|formatnode} ',
+    'manifest': '{rev}:{node|formatnode}',
+    'file_copy': '{name} ({source})',
+    'envvar': '{key}={value}',
+    'extra': '{key}={value|stringescape}'
+}
+# filecopy is preserved for compatibility reasons
+defaulttempl['filecopy'] = defaulttempl['file_copy']
+
 # keywords are callables like:
 # fn(repo, ctx, templ, cache, revcache, **args)
 # with:
@@ -241,8 +289,8 @@
     """
     branch = args['ctx'].branch()
     if branch != 'default':
-        return showlist('branch', [branch], plural='branches', **args)
-    return showlist('branch', [], plural='branches', **args)
+        return showlist('branch', [branch], args, plural='branches')
+    return showlist('branch', [], args, plural='branches')
 
 @templatekeyword('bookmarks')
 def showbookmarks(**args):
@@ -253,7 +301,7 @@
     bookmarks = args['ctx'].bookmarks()
     active = repo._activebookmark
     makemap = lambda v: {'bookmark': v, 'active': active, 'current': active}
-    f = _showlist('bookmark', bookmarks, **args)
+    f = _showlist('bookmark', bookmarks, args)
     return _hybrid(f, bookmarks, makemap, lambda x: x['bookmark'])
 
 @templatekeyword('children')
@@ -261,7 +309,7 @@
     """List of strings. The children of the changeset."""
     ctx = args['ctx']
     childrevs = ['%d:%s' % (cctx, cctx) for cctx in ctx.children()]
-    return showlist('children', childrevs, element='child', **args)
+    return showlist('children', childrevs, args, element='child')
 
 # Deprecated, but kept alive for help generation a purpose.
 @templatekeyword('currentbookmark')
@@ -306,14 +354,9 @@
 @templatekeyword('envvars')
 def showenvvars(repo, **args):
     """A dictionary of environment variables. (EXPERIMENTAL)"""
-
     env = repo.ui.exportableenviron()
     env = util.sortdict((k, env[k]) for k in sorted(env))
-    makemap = lambda k: {'key': k, 'value': env[k]}
-    c = [makemap(k) for k in env]
-    f = _showlist('envvar', c, plural='envvars', **args)
-    return _hybrid(f, env, makemap,
-                   lambda x: '%s=%s' % (x['key'], x['value']))
+    return showdict('envvar', env, args, plural='envvars')
 
 @templatekeyword('extras')
 def showextras(**args):
@@ -323,16 +366,16 @@
     extras = util.sortdict((k, extras[k]) for k in sorted(extras))
     makemap = lambda k: {'key': k, 'value': extras[k]}
     c = [makemap(k) for k in extras]
-    f = _showlist('extra', c, plural='extras', **args)
+    f = _showlist('extra', c, args, plural='extras')
     return _hybrid(f, extras, makemap,
-                   lambda x: '%s=%s' % (x['key'], x['value']))
+                   lambda x: '%s=%s' % (x['key'], util.escapestr(x['value'])))
 
 @templatekeyword('file_adds')
 def showfileadds(**args):
     """List of strings. Files added by this changeset."""
     repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
-    return showlist('file_add', getfiles(repo, ctx, revcache)[1],
-                    element='file', **args)
+    return showlist('file_add', getfiles(repo, ctx, revcache)[1], args,
+                    element='file')
 
 @templatekeyword('file_copies')
 def showfilecopies(**args):
@@ -352,11 +395,8 @@
                 copies.append((fn, rename[0]))
 
     copies = util.sortdict(copies)
-    makemap = lambda k: {'name': k, 'source': copies[k]}
-    c = [makemap(k) for k in copies]
-    f = _showlist('file_copy', c, plural='file_copies', **args)
-    return _hybrid(f, copies, makemap,
-                   lambda x: '%s (%s)' % (x['name'], x['source']))
+    return showdict('file_copy', copies, args, plural='file_copies',
+                    key='name', value='source', fmt='%s (%s)')
 
 # showfilecopiesswitch() displays file copies only if copy records are
 # provided before calling the templater, usually with a --copies
@@ -368,32 +408,29 @@
     """
     copies = args['revcache'].get('copies') or []
     copies = util.sortdict(copies)
-    makemap = lambda k: {'name': k, 'source': copies[k]}
-    c = [makemap(k) for k in copies]
-    f = _showlist('file_copy', c, plural='file_copies', **args)
-    return _hybrid(f, copies, makemap,
-                   lambda x: '%s (%s)' % (x['name'], x['source']))
+    return showdict('file_copy', copies, args, plural='file_copies',
+                    key='name', value='source', fmt='%s (%s)')
 
 @templatekeyword('file_dels')
 def showfiledels(**args):
     """List of strings. Files removed by this changeset."""
     repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
-    return showlist('file_del', getfiles(repo, ctx, revcache)[2],
-                    element='file', **args)
+    return showlist('file_del', getfiles(repo, ctx, revcache)[2], args,
+                    element='file')
 
 @templatekeyword('file_mods')
 def showfilemods(**args):
     """List of strings. Files modified by this changeset."""
     repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
-    return showlist('file_mod', getfiles(repo, ctx, revcache)[0],
-                    element='file', **args)
+    return showlist('file_mod', getfiles(repo, ctx, revcache)[0], args,
+                    element='file')
 
 @templatekeyword('files')
 def showfiles(**args):
     """List of strings. All files modified, added, or removed by this
     changeset.
     """
-    return showlist('file', args['ctx'].files(), **args)
+    return showlist('file', args['ctx'].files(), args)
 
 @templatekeyword('graphnode')
 def showgraphnode(repo, ctx, **args):
@@ -411,10 +448,17 @@
     else:
         return 'o'
 
+@templatekeyword('index')
+def showindex(**args):
+    """Integer. The current iteration of the loop. (0 indexed)"""
+    # just hosts documentation; should be overridden by template mapping
+    raise error.Abort(_("can't use index in this context"))
+
 @templatekeyword('latesttag')
 def showlatesttag(**args):
     """List of strings. The global tags on the most recent globally
-    tagged ancestor of this changeset.
+    tagged ancestor of this changeset.  If no such tags exist, the list
+    consists of the single string "null".
     """
     return showlatesttags(None, **args)
 
@@ -435,7 +479,7 @@
     }
 
     tags = latesttags[2]
-    f = _showlist('latesttag', tags, separator=':', **args)
+    f = _showlist('latesttag', tags, args, separator=':')
     return _hybrid(f, tags, makemap, lambda x: x['latesttag'])
 
 @templatekeyword('latesttagdistance')
@@ -480,7 +524,7 @@
     repo = ctx.repo()
     ns = repo.names[namespace]
     names = ns.names(repo, ctx.node())
-    return showlist(ns.templatename, names, plural=namespace, **args)
+    return showlist(ns.templatename, names, args, plural=namespace)
 
 @templatekeyword('namespaces')
 def shownamespaces(**args):
@@ -489,9 +533,9 @@
     ctx = args['ctx']
     repo = ctx.repo()
     namespaces = util.sortdict((k, showlist('name', ns.names(repo, ctx.node()),
-                                            **args))
+                                            args))
                                for k, ns in repo.names.iteritems())
-    f = _showlist('namespace', list(namespaces), **args)
+    f = _showlist('namespace', list(namespaces), args)
     return _hybrid(f, namespaces,
                    lambda k: {'namespace': k, 'names': namespaces[k]},
                    lambda x: x['namespace'])
@@ -503,6 +547,14 @@
     """
     return ctx.hex()
 
+@templatekeyword('obsolete')
+def showobsolete(repo, ctx, templ, **args):
+    """String. Whether the changeset is obsolete.
+    """
+    if ctx.obsolete():
+        return 'obsolete'
+    return ''
+
 @templatekeyword('p1rev')
 def showp1rev(repo, ctx, templ, **args):
     """Integer. The repository-local revision number of the changeset's
@@ -542,7 +594,7 @@
                 ('node', p.hex()),
                 ('phase', p.phasestr())]
                for p in pctxs]
-    f = _showlist('parent', parents, **args)
+    f = _showlist('parent', parents, args)
     return _hybrid(f, prevs, lambda x: {'ctx': repo[int(x)], 'revcache': {}},
                    lambda d: _formatrevnode(d['ctx']))
 
@@ -566,7 +618,7 @@
     be evaluated"""
     repo = args['ctx'].repo()
     revs = [str(r) for r in revs]  # ifcontains() needs a list of str
-    f = _showlist(name, revs, **args)
+    f = _showlist(name, revs, args)
     return _hybrid(f, revs,
                    lambda x: {name: x, 'ctx': repo[int(x)], 'revcache': {}},
                    lambda d: d[name])
@@ -577,7 +629,7 @@
     ctx = args['ctx']
     substate = ctx.substate
     if not substate:
-        return showlist('subrepo', [], **args)
+        return showlist('subrepo', [], args)
     psubstate = ctx.parents()[0].substate or {}
     subrepos = []
     for sub in substate:
@@ -586,7 +638,7 @@
     for sub in psubstate:
         if sub not in substate:
             subrepos.append(sub) # removed in ctx
-    return showlist('subrepo', sorted(subrepos), **args)
+    return showlist('subrepo', sorted(subrepos), args)
 
 # don't remove "showtags" definition, even though namespaces will put
 # a helper function for "tags" keyword into "keywords" map automatically,
@@ -613,7 +665,7 @@
 
     (EXPERIMENTAL)
     """
-    return showlist('trouble', args['ctx'].troubles(), **args)
+    return showlist('trouble', args['ctx'].troubles(), args)
 
 # tell hggettext to extract docstrings from these functions:
 i18nfunctions = keywords.values()
--- a/mercurial/templater.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templater.py	Tue Apr 18 12:24:34 2017 -0400
@@ -13,13 +13,16 @@
 
 from .i18n import _
 from . import (
+    color,
     config,
+    encoding,
     error,
     minirst,
     parser,
     pycompat,
     registrar,
     revset as revsetmod,
+    revsetlang,
     templatefilters,
     templatekw,
     util,
@@ -30,14 +33,15 @@
 elements = {
     # token-type: binding-strength, primary, prefix, infix, suffix
     "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
+    "%": (16, None, None, ("%", 16), None),
+    "|": (15, None, None, ("|", 15), None),
+    "*": (5, None, None, ("*", 5), None),
+    "/": (5, None, None, ("/", 5), None),
+    "+": (4, None, None, ("+", 4), None),
+    "-": (4, None, ("negate", 19), ("-", 4), None),
+    "=": (3, None, None, ("keyvalue", 3), None),
     ",": (2, None, None, ("list", 2), None),
-    "|": (5, None, None, ("|", 5), None),
-    "%": (6, None, None, ("%", 6), None),
     ")": (0, None, None, None, None),
-    "+": (3, None, None, ("+", 3), None),
-    "-": (3, None, ("negate", 10), ("-", 3), None),
-    "*": (4, None, None, ("*", 4), None),
-    "/": (4, None, None, ("/", 4), None),
     "integer": (0, "integer", None, None, None),
     "symbol": (0, "symbol", None, None, None),
     "string": (0, "string", None, None, None),
@@ -53,7 +57,7 @@
         c = program[pos]
         if c.isspace(): # skip inter-token whitespace
             pass
-        elif c in "(,)%|+-*/": # handle simple operators
+        elif c in "(=,)%|+-*/": # handle simple operators
             yield (c, None, pos)
         elif c in '"\'': # handle quoted templates
             s = pos + 1
@@ -280,6 +284,18 @@
         return context._load(exp[1])
     raise error.ParseError(_("expected template specifier"))
 
+def findsymbolicname(arg):
+    """Find symbolic name for the given compiled expression; returns None
+    if nothing found reliably"""
+    while True:
+        func, data = arg
+        if func is runsymbol:
+            return data
+        elif func is runfilter:
+            arg = data[0]
+        else:
+            return None
+
 def evalfuncarg(context, mapping, arg):
     func, data = arg
     # func() may return string, generator of strings or arbitrary object such
@@ -366,14 +382,15 @@
         yield func(context, mapping, data)
 
 def buildfilter(exp, context):
-    arg = compileexp(exp[1], context, methods)
     n = getsymbol(exp[2])
     if n in context._filters:
         filt = context._filters[n]
+        arg = compileexp(exp[1], context, methods)
         return (runfilter, (arg, filt))
     if n in funcs:
         f = funcs[n]
-        return (f, [arg])
+        args = _buildfuncargs(exp[1], context, methods, n, f._argspec)
+        return (f, args)
     raise error.ParseError(_("unknown function '%s'") % n)
 
 def runfilter(context, mapping, data):
@@ -382,12 +399,13 @@
     try:
         return filt(thing)
     except (ValueError, AttributeError, TypeError):
-        if isinstance(arg[1], tuple):
-            dt = arg[1][1]
+        sym = findsymbolicname(arg)
+        if sym:
+            msg = (_("template filter '%s' is not compatible with keyword '%s'")
+                   % (filt.func_name, sym))
         else:
-            dt = arg[1]
-        raise error.Abort(_("template filter '%s' is not compatible with "
-                           "keyword '%s'") % (filt.func_name, dt))
+            msg = _("incompatible use of template filter '%s'") % filt.func_name
+        raise error.Abort(msg)
 
 def buildmap(exp, context):
     func, data = compileexp(exp[1], context, methods)
@@ -408,17 +426,18 @@
             else:
                 raise error.ParseError(_("%r is not iterable") % d)
 
-    for i in diter:
+    for i, v in enumerate(diter):
         lm = mapping.copy()
-        if isinstance(i, dict):
-            lm.update(i)
+        lm['index'] = i
+        if isinstance(v, dict):
+            lm.update(v)
             lm['originalnode'] = mapping.get('node')
             yield tfunc(context, lm, tdata)
         else:
             # v is not an iterable of dicts, this happen when 'key'
             # has been fully expanded already and format is useless.
             # If so, return the expanded value.
-            yield i
+            yield v
 
 def buildnegate(exp, context):
     arg = compileexp(exp[1], context, exprmethods)
@@ -447,17 +466,58 @@
 
 def buildfunc(exp, context):
     n = getsymbol(exp[1])
-    args = [compileexp(x, context, exprmethods) for x in getlist(exp[2])]
     if n in funcs:
         f = funcs[n]
+        args = _buildfuncargs(exp[2], context, exprmethods, n, f._argspec)
         return (f, args)
     if n in context._filters:
+        args = _buildfuncargs(exp[2], context, exprmethods, n, argspec=None)
         if len(args) != 1:
             raise error.ParseError(_("filter %s expects one argument") % n)
         f = context._filters[n]
         return (runfilter, (args[0], f))
     raise error.ParseError(_("unknown function '%s'") % n)
 
+def _buildfuncargs(exp, context, curmethods, funcname, argspec):
+    """Compile parsed tree of function arguments into list or dict of
+    (func, data) pairs
+
+    >>> context = engine(lambda t: (runsymbol, t))
+    >>> def fargs(expr, argspec):
+    ...     x = _parseexpr(expr)
+    ...     n = getsymbol(x[1])
+    ...     return _buildfuncargs(x[2], context, exprmethods, n, argspec)
+    >>> fargs('a(l=1, k=2)', 'k l m').keys()
+    ['l', 'k']
+    >>> args = fargs('a(opts=1, k=2)', '**opts')
+    >>> args.keys(), args['opts'].keys()
+    (['opts'], ['opts', 'k'])
+    """
+    def compiledict(xs):
+        return util.sortdict((k, compileexp(x, context, curmethods))
+                             for k, x in xs.iteritems())
+    def compilelist(xs):
+        return [compileexp(x, context, curmethods) for x in xs]
+
+    if not argspec:
+        # filter or function with no argspec: return list of positional args
+        return compilelist(getlist(exp))
+
+    # function with argspec: return dict of named args
+    _poskeys, varkey, _keys, optkey = argspec = parser.splitargspec(argspec)
+    treeargs = parser.buildargsdict(getlist(exp), funcname, argspec,
+                                    keyvaluenode='keyvalue', keynode='symbol')
+    compargs = util.sortdict()
+    if varkey:
+        compargs[varkey] = compilelist(treeargs.pop(varkey))
+    if optkey:
+        compargs[optkey] = compiledict(treeargs.pop(optkey))
+    compargs.update(compiledict(treeargs))
+    return compargs
+
+def buildkeyvaluepair(exp, content):
+    raise error.ParseError(_("can't use a key-value pair in this context"))
+
 # dict of template built-in functions
 funcs = {}
 
@@ -485,6 +545,24 @@
         # i18n: "date" is a keyword
         raise error.ParseError(_("date expects a date information"))
 
+@templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
+def dict_(context, mapping, args):
+    """Construct a dict from key-value pairs. A key may be omitted if
+    a value expression can provide an unambiguous name."""
+    data = util.sortdict()
+
+    for v in args['args']:
+        k = findsymbolicname(v)
+        if not k:
+            raise error.ParseError(_('dict key cannot be inferred'))
+        if k in data or k in args['kwargs']:
+            raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
+        data[k] = evalfuncarg(context, mapping, v)
+
+    data.update((k, evalfuncarg(context, mapping, v))
+                for k, v in args['kwargs'].iteritems())
+    return templatekw.hybriddict(data)
+
 @templatefunc('diff([includepattern [, excludepattern]])')
 def diff(context, mapping, args):
     """Show a diff, optionally
@@ -517,7 +595,7 @@
     ctx = mapping['ctx']
     m = ctx.match([raw])
     files = list(ctx.matches(m))
-    return templatekw.showlist("file", files, **mapping)
+    return templatekw.showlist("file", files, mapping)
 
 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
 def fill(context, mapping, args):
@@ -543,31 +621,51 @@
 
     return templatefilters.fill(text, width, initindent, hangindent)
 
-@templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])')
+@templatefunc('formatnode(node)')
+def formatnode(context, mapping, args):
+    """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
+    if len(args) != 1:
+        # i18n: "formatnode" is a keyword
+        raise error.ParseError(_("formatnode expects one argument"))
+
+    ui = mapping['ui']
+    node = evalstring(context, mapping, args[0])
+    if ui.debugflag:
+        return node
+    return templatefilters.short(node)
+
+@templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
+              argspec='text width fillchar left')
 def pad(context, mapping, args):
     """Pad text with a
     fill character."""
-    if not (2 <= len(args) <= 4):
+    if 'text' not in args or 'width' not in args:
         # i18n: "pad" is a keyword
         raise error.ParseError(_("pad() expects two to four arguments"))
 
-    width = evalinteger(context, mapping, args[1],
+    width = evalinteger(context, mapping, args['width'],
                         # i18n: "pad" is a keyword
                         _("pad() expects an integer width"))
 
-    text = evalstring(context, mapping, args[0])
+    text = evalstring(context, mapping, args['text'])
 
     left = False
     fillchar = ' '
-    if len(args) > 2:
-        fillchar = evalstring(context, mapping, args[2])
-    if len(args) > 3:
-        left = evalboolean(context, mapping, args[3])
+    if 'fillchar' in args:
+        fillchar = evalstring(context, mapping, args['fillchar'])
+        if len(color.stripeffects(fillchar)) != 1:
+            # i18n: "pad" is a keyword
+            raise error.ParseError(_("pad() expects a single fill character"))
+    if 'left' in args:
+        left = evalboolean(context, mapping, args['left'])
 
+    fillwidth = width - encoding.colwidth(color.stripeffects(text))
+    if fillwidth <= 0:
+        return text
     if left:
-        return text.rjust(width, fillchar)
+        return fillchar * fillwidth + text
     else:
-        return text.ljust(width, fillchar)
+        return text + fillchar * fillwidth
 
 @templatefunc('indent(text, indentchars[, firstline])')
 def indent(context, mapping, args):
@@ -696,7 +794,9 @@
 @templatefunc('latesttag([pattern])')
 def latesttag(context, mapping, args):
     """The global tags matching the given pattern on the
-    most recent globally tagged ancestor of this changeset."""
+    most recent globally tagged ancestor of this changeset.
+    If no such tags exist, the "{tag}" template resolves to
+    the string "null"."""
     if len(args) > 1:
         # i18n: "latesttag" is a keyword
         raise error.ParseError(_("latesttag expects at most one argument"))
@@ -778,7 +878,7 @@
 
     if len(args) > 1:
         formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
-        revs = query(revsetmod.formatspec(raw, *formatargs))
+        revs = query(revsetlang.formatspec(raw, *formatargs))
         revs = list(revs)
     else:
         revsetcache = mapping['cache'].setdefault("revsetcache", {})
@@ -803,16 +903,16 @@
 
     return minirst.format(text, style=style, keep=['verbose'])
 
-@templatefunc('separate(sep, args)')
+@templatefunc('separate(sep, args)', argspec='sep *args')
 def separate(context, mapping, args):
     """Add a separator between non-empty arguments."""
-    if not args:
+    if 'sep' not in args:
         # i18n: "separate" is a keyword
         raise error.ParseError(_("separate expects at least one argument"))
 
-    sep = evalstring(context, mapping, args[0])
+    sep = evalstring(context, mapping, args['sep'])
     first = True
-    for arg in args[1:]:
+    for arg in args['args']:
         argstr = evalstring(context, mapping, arg)
         if not argstr:
             continue
@@ -959,6 +1059,7 @@
     "|": buildfilter,
     "%": buildmap,
     "func": buildfunc,
+    "keyvalue": buildkeyvaluepair,
     "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
     "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
     "negate": buildnegate,
@@ -995,6 +1096,7 @@
 
 def _flatten(thing):
     '''yield a single stream from a possibly nested set of iterators'''
+    thing = templatekw.unwraphybrid(thing)
     if isinstance(thing, str):
         yield thing
     elif thing is None:
@@ -1003,6 +1105,7 @@
         yield str(thing)
     else:
         for i in thing:
+            i = templatekw.unwraphybrid(i)
             if isinstance(i, str):
                 yield i
             elif i is None:
--- a/mercurial/templates/gitweb/changelogentry.tmpl	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/gitweb/changelogentry.tmpl	Tue Apr 18 12:24:34 2017 -0400
@@ -7,7 +7,7 @@
 </div>
 <i>{author|obfuscate} [{date|rfc822date}] rev {rev}</i><br/>
 </div>
-<div class="log_body">
+<div class="log_body description">
 {desc|strip|escape|websub|addbreaks|nonempty}
 <br/>
 <br/>
--- a/mercurial/templates/gitweb/changeset.tmpl	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/gitweb/changeset.tmpl	Tue Apr 18 12:24:34 2017 -0400
@@ -43,7 +43,7 @@
 {child%changesetchild}
 </table></div>
 
-<div class="page_body">
+<div class="page_body description">
 {desc|strip|escape|websub|addbreaks|nonempty}
 </div>
 <div class="list_head"></div>
--- a/mercurial/templates/gitweb/fileannotate.tmpl	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/gitweb/fileannotate.tmpl	Tue Apr 18 12:24:34 2017 -0400
@@ -59,7 +59,7 @@
 </table>
 </div>
 
-<div class="page_path">
+<div class="page_path description">
 {desc|strip|escape|websub|addbreaks|nonempty}
 </div>
 <div class="page_body">
--- a/mercurial/templates/gitweb/filelog.tmpl	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/gitweb/filelog.tmpl	Tue Apr 18 12:24:34 2017 -0400
@@ -31,13 +31,18 @@
 {nav%filenav}
 </div>
 
-<div class="title" >{file|urlescape}</div>
+<div class="title" >
+  {file|urlescape}{if(linerange,
+' (following lines {linerange}{if(descend, ', descending')} <a href="{url|urlescape}log/{symrev}/{file|urlescape}{sessionvars%urlparameter}">back to filelog</a>)')}
+</div>
 
 <table>
 {entries%filelogentry}
 </table>
 
 <div class="page_nav">
+<a href="{url|urlescape}log/{symrev}/{file|urlescape}{lessvars%urlparameter}">less</a>
+<a href="{url|urlescape}log/{symrev}/{file|urlescape}{morevars%urlparameter}">more</a>
 {nav%filenav}
 </div>
 
--- a/mercurial/templates/gitweb/filerevision.tmpl	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/gitweb/filerevision.tmpl	Tue Apr 18 12:24:34 2017 -0400
@@ -59,12 +59,14 @@
 </table>
 </div>
 
-<div class="page_path">
+<div class="page_path description">
 {desc|strip|escape|websub|addbreaks|nonempty}
 </div>
 
 <div class="page_body">
-<pre class="sourcelines stripes">{text%fileline}</pre>
+<pre class="sourcelines stripes" data-logurl="{url|urlescape}log/{symrev}/{file|urlescape}">{text%fileline}</pre>
 </div>
 
+<script type="text/javascript" src="{staticurl|urlescape}followlines.js"></script>
+
 {footer}
--- a/mercurial/templates/gitweb/map	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/gitweb/map	Tue Apr 18 12:24:34 2017 -0400
@@ -282,7 +282,7 @@
     </td>
   </tr>'
 filelogentry = '
-  <tr class="parity{parity}">
+  <tr class="parity{if(patch, '1', '{parity}')}">
     <td class="age"><i class="age">{date|rfc822date}</i></td>
     <td><i>{author|person}</i></td>
     <td>
@@ -297,7 +297,8 @@
       <a href="{url|urlescape}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
       {rename%filelogrename}
     </td>
-  </tr>'
+  </tr>
+  {if(patch, '<tr><td colspan="4">{diff}</td></tr>')}'
 archiveentry = ' | <a href="{url|urlescape}archive/{symrev}{extension}{ifeq(path,'/','',path|urlescape)}">{type|escape}</a> '
 indexentry = '
   <tr class="parity{parity}">
--- a/mercurial/templates/map-cmdline.default	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/map-cmdline.default	Tue Apr 18 12:24:34 2017 -0400
@@ -29,7 +29,9 @@
 
 # General templates
 _trouble_label = 'trouble.{trouble}'
-_cset_labels = 'log.changeset changeset.{phase}{if(troubles, " changeset.troubled {troubles%_trouble_label}")}'
+_troubles_labels = '{if(troubles, "changeset.troubled {troubles%_trouble_label}")}'
+_obsolete_label = '{if(obsolete, "changeset.obsolete")}'
+_cset_labels = '{separate(" ", "log.changeset", "changeset.{phase}", "{_obsolete_label}", "{_troubles_labels}")}'
 cset = '{label("{_cset_labels}",
                "changeset:   {rev}:{node|short}")}\n'
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/templates/map-cmdline.show	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,3 @@
+# TODO add label() once we figure out which namespace the labels belong on.
+showbookmarks = '{if(active, "*", " ")} {pad(bookmark, longestbookmarklen + 4)}{shortest(node, 5)}\n'
+showunderway = '{shortest(node, 5)}{if(branches, " ({branch})")}{if(bookmarks, " ({bookmarks})")} {desc|firstline}'
--- a/mercurial/templates/paper/filelog.tmpl	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/paper/filelog.tmpl	Tue Apr 18 12:24:34 2017 -0400
@@ -47,6 +47,8 @@
 <h3>
  log {file|escape} @ {rev}:<a href="{url|urlescape}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
  {branch%changelogbranchname}{tags%changelogtag}{bookmarks%changelogtag}
+ {if(linerange,
+' (following lines {linerange}{if(descend, ', descending')} <a href="{url|urlescape}log/{symrev}/{file|urlescape}{sessionvars%urlparameter}">back to filelog</a>)')}
 </h3>
 
 <form class="search" action="{url|urlescape}log">
--- a/mercurial/templates/paper/filelogentry.tmpl	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/paper/filelogentry.tmpl	Tue Apr 18 12:24:34 2017 -0400
@@ -6,3 +6,4 @@
    {inbranch%changelogbranchname}{branches%changelogbranchhead}{tags%changelogtag}{bookmarks%changelogtag}{rename%filelogrename}
   </td>
  </tr>
+ {if(patch, '<tr><td colspan="3">{diff}</td></tr>')}
--- a/mercurial/templates/paper/filerevision.tmpl	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/paper/filerevision.tmpl	Tue Apr 18 12:24:34 2017 -0400
@@ -71,8 +71,11 @@
 <div class="overflow">
 <div class="sourcefirst linewraptoggle">line wrap: <a class="linewraplink" href="javascript:toggleLinewrap()">on</a></div>
 <div class="sourcefirst"> line source</div>
-<pre class="sourcelines stripes4 wrap bottomline">{text%fileline}</pre>
+<pre class="sourcelines stripes4 wrap bottomline" data-logurl="{url|urlescape}log/{symrev}/{file|urlescape}">{text%fileline}</pre>
 </div>
+
+<script type="text/javascript" src="{staticurl|urlescape}followlines.js"></script>
+
 </div>
 </div>
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/templates/static/followlines.js	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,229 @@
+// followlines.js - JavaScript utilities for followlines UI
+//
+// Copyright 2017 Logilab SA <contact@logilab.fr>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//** Install event listeners for line block selection and followlines action */
+document.addEventListener('DOMContentLoaded', function() {
+    var sourcelines = document.getElementsByClassName('sourcelines')[0];
+    if (typeof sourcelines === 'undefined') {
+        return;
+    }
+    // URL to complement with "linerange" query parameter
+    var targetUri = sourcelines.dataset.logurl;
+    if (typeof targetUri === 'undefined') {
+        return;
+    }
+
+    // tooltip to invite on lines selection
+    var tooltip = document.createElement('div');
+    tooltip.id = 'followlines-tooltip';
+    tooltip.classList.add('hidden');
+    var initTooltipText = 'click to start following lines history from here';
+    tooltip.textContent = initTooltipText;
+    sourcelines.appendChild(tooltip);
+
+    //* position "element" on top-right of cursor */
+    function positionTopRight(element, event) {
+        var x = (event.clientX + 10) + 'px',
+            y = (event.clientY - 20) + 'px';
+        element.style.top = y;
+        element.style.left = x;
+    }
+
+    var tooltipTimeoutID;
+    //* move the "tooltip" with cursor (top-right) and show it after 1s */
+    function moveAndShowTooltip(e) {
+        if (typeof tooltipTimeoutID !== 'undefined') {
+            // avoid accumulation of timeout callbacks (blinking)
+            window.clearTimeout(tooltipTimeoutID);
+        }
+        tooltip.classList.add('hidden');
+        positionTopRight(tooltip, e);
+        tooltipTimeoutID = window.setTimeout(function() {
+            tooltip.classList.remove('hidden');
+        }, 1000);
+    }
+
+    // on mousemove, show tooltip close to cursor position
+    sourcelines.addEventListener('mousemove', moveAndShowTooltip);
+
+    // retrieve all direct <span> children of <pre class="sourcelines">
+    var spans = Array.prototype.filter.call(
+        sourcelines.children,
+        function(x) { return x.tagName === 'SPAN' });
+
+    // add a "followlines-select" class to change cursor type in CSS
+    for (var i = 0; i < spans.length; i++) {
+        spans[i].classList.add('followlines-select');
+    }
+
+    var lineSelectedCSSClass = 'followlines-selected';
+
+    //** add CSS class on <span> element in `from`-`to` line range */
+    function addSelectedCSSClass(from, to) {
+        for (var i = from; i <= to; i++) {
+            spans[i].classList.add(lineSelectedCSSClass);
+        }
+    }
+
+    //** remove CSS class from previously selected lines */
+    function removeSelectedCSSClass() {
+        var elements = sourcelines.getElementsByClassName(
+            lineSelectedCSSClass);
+        while (elements.length) {
+            elements[0].classList.remove(lineSelectedCSSClass);
+        }
+    }
+
+    // ** return the <span> element parent of `element` */
+    function findParentSpan(element) {
+        var parent = element.parentElement;
+        if (parent === null) {
+            return null;
+        }
+        if (element.tagName == 'SPAN' && parent.isSameNode(sourcelines)) {
+            return element;
+        }
+        return findParentSpan(parent);
+    }
+
+    //** event handler for "click" on the first line of a block */
+    function lineSelectStart(e) {
+        var startElement = findParentSpan(e.target);
+        if (startElement === null) {
+            // not a <span> (maybe <a>): abort, keeping event listener
+            // registered for other click with <span> target
+            return;
+        }
+
+        // update tooltip text
+        tooltip.textContent = 'click again to terminate line block selection here';
+
+        var startId = parseInt(startElement.id.slice(1));
+        startElement.classList.add(lineSelectedCSSClass); // CSS
+
+        // remove this event listener
+        sourcelines.removeEventListener('click', lineSelectStart);
+
+        //** event handler for "click" on the last line of the block */
+        function lineSelectEnd(e) {
+            var endElement = findParentSpan(e.target);
+            if (endElement === null) {
+                // not a <span> (maybe <a>): abort, keeping event listener
+                // registered for other click with <span> target
+                return;
+            }
+
+            // remove this event listener
+            sourcelines.removeEventListener('click', lineSelectEnd);
+
+            // hide tooltip and disable motion tracking
+            tooltip.classList.add('hidden');
+            sourcelines.removeEventListener('mousemove', moveAndShowTooltip);
+            window.clearTimeout(tooltipTimeoutID);
+
+            //* restore initial "tooltip" state */
+            function restoreTooltip() {
+                tooltip.textContent = initTooltipText;
+                sourcelines.addEventListener('mousemove', moveAndShowTooltip);
+            }
+
+            // compute line range (startId, endId)
+            var endId = parseInt(endElement.id.slice(1));
+            if (endId == startId) {
+                // clicked twice the same line, cancel and reset initial state
+                // (CSS, event listener for selection start, tooltip)
+                removeSelectedCSSClass();
+                sourcelines.addEventListener('click', lineSelectStart);
+                restoreTooltip();
+                return;
+            }
+            var inviteElement = endElement;
+            if (endId < startId) {
+                var tmp = endId;
+                endId = startId;
+                startId = tmp;
+                inviteElement = startElement;
+            }
+
+            addSelectedCSSClass(startId - 1, endId -1);  // CSS
+
+            // append the <div id="followlines"> element to last line of the
+            // selection block
+            var divAndButton = followlinesBox(targetUri, startId, endId);
+            var div = divAndButton[0],
+                button = divAndButton[1];
+            inviteElement.appendChild(div);
+            // set position close to cursor (top-right)
+            positionTopRight(div, e);
+
+            //** event handler for cancelling selection */
+            function cancel() {
+                // remove invite box
+                div.parentNode.removeChild(div);
+                // restore initial event listeners
+                sourcelines.addEventListener('click', lineSelectStart);
+                sourcelines.removeEventListener('click', cancel);
+                // remove styles on selected lines
+                removeSelectedCSSClass();
+                // restore tooltip element
+                restoreTooltip();
+            }
+
+            // bind cancel event to click on <button>
+            button.addEventListener('click', cancel);
+            // as well as on an click on any source line
+            sourcelines.addEventListener('click', cancel);
+        }
+
+        sourcelines.addEventListener('click', lineSelectEnd);
+
+    }
+
+    sourcelines.addEventListener('click', lineSelectStart);
+
+    //** return a <div id="followlines"> and inner cancel <button> elements */
+    function followlinesBox(targetUri, fromline, toline) {
+        // <div id="followlines">
+        var div = document.createElement('div');
+        div.id = 'followlines';
+
+        //   <div class="followlines-cancel">
+        var buttonDiv = document.createElement('div');
+        buttonDiv.classList.add('followlines-cancel');
+
+        //     <button>x</button>
+        var button = document.createElement('button');
+        button.textContent = 'x';
+        buttonDiv.appendChild(button);
+        div.appendChild(buttonDiv);
+
+        //   <div class="followlines-link">
+        var aDiv = document.createElement('div');
+        aDiv.classList.add('followlines-link');
+        aDiv.textContent = 'follow history of lines ' + fromline + ':' + toline + ':';
+        var linesep = document.createElement('br');
+        aDiv.appendChild(linesep);
+        //     link to "ascending" followlines
+        var aAsc = document.createElement('a');
+        var url = targetUri + '?patch=&linerange=' + fromline + ':' + toline;
+        aAsc.setAttribute('href', url);
+        aAsc.textContent = 'ascending';
+        aDiv.appendChild(aAsc);
+        var sep = document.createTextNode(' / ');
+        aDiv.appendChild(sep);
+        //     link to "descending" followlines
+        var aDesc = document.createElement('a');
+        aDesc.setAttribute('href', url + '&descend=');
+        aDesc.textContent = 'descending';
+        aDiv.appendChild(aDesc);
+
+        div.appendChild(aDiv);
+
+        return [div, button];
+    }
+
+}, false);
--- a/mercurial/templates/static/style-gitweb.css	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/static/style-gitweb.css	Tue Apr 18 12:24:34 2017 -0400
@@ -145,6 +145,66 @@
 	background-color: #bfdfff;
 }
 
+.description {
+    font-family: monospace;
+}
+
+/* Followlines */
+div.page_body pre.sourcelines > span.followlines-select:hover {
+  cursor: cell;
+}
+
+pre.sourcelines > span.followlines-selected {
+  background-color: #99C7E9 !important;
+}
+
+div#followlines {
+  background-color: #B7B7B7;
+  border: 1px solid #CCC;
+  border-radius: 5px;
+  padding: 4px;
+  position: fixed;
+}
+
+div.followlines-cancel {
+  text-align: right;
+}
+
+div.followlines-cancel > button {
+  line-height: 80%;
+  padding: 0;
+  border: 0;
+  border-radius: 2px;
+  background-color: inherit;
+  font-weight: bold;
+}
+
+div.followlines-cancel > button:hover {
+  color: #FFFFFF;
+  background-color: #CF1F1F;
+}
+
+div.followlines-link {
+  margin: 2px;
+  margin-top: 4px;
+  font-family: sans-serif;
+}
+
+div#followlines-tooltip {
+  display: none;
+  position: fixed;
+  background-color: #ffc;
+  border: 1px solid #999;
+  padding: 2px;
+}
+
+.sourcelines:hover > div#followlines-tooltip {
+  display: inline;
+}
+
+.sourcelines:hover > div#followlines-tooltip.hidden {
+  display: none;
+}
 /* Graph */
 div#wrapper {
 	position: relative;
--- a/mercurial/templates/static/style-paper.css	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/templates/static/style-paper.css	Tue Apr 18 12:24:34 2017 -0400
@@ -280,6 +280,62 @@
   background-color: #bfdfff;
 }
 
+div.overflow pre.sourcelines > span.followlines-select:hover {
+  cursor: cell;
+}
+
+pre.sourcelines > span.followlines-selected {
+  background-color: #99C7E9;
+}
+
+div#followlines {
+  background-color: #B7B7B7;
+  border: 1px solid #CCC;
+  border-radius: 5px;
+  padding: 4px;
+  position: fixed;
+}
+
+div.followlines-cancel {
+  text-align: right;
+}
+
+div.followlines-cancel > button {
+  line-height: 80%;
+  padding: 0;
+  border: 0;
+  border-radius: 2px;
+  background-color: inherit;
+  font-weight: bold;
+}
+
+div.followlines-cancel > button:hover {
+  color: #FFFFFF;
+  background-color: #CF1F1F;
+}
+
+div.followlines-link {
+  margin: 2px;
+  margin-top: 4px;
+  font-family: sans-serif;
+}
+
+div#followlines-tooltip {
+  display: none;
+  position: fixed;
+  background-color: #ffc;
+  border: 1px solid #999;
+  padding: 2px;
+}
+
+.sourcelines:hover > div#followlines-tooltip {
+  display: inline;
+}
+
+.sourcelines:hover > div#followlines-tooltip.hidden {
+  display: none;
+}
+
 .sourcelines > a {
     display: inline-block;
     position: absolute;
--- a/mercurial/transaction.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/transaction.py	Tue Apr 18 12:24:34 2017 -0400
@@ -226,7 +226,7 @@
         """
         if self._queue:
             msg = 'cannot use transaction.addbackup inside "group"'
-            raise RuntimeError(msg)
+            raise error.ProgrammingError(msg)
 
         if file in self.map or file in self._backupmap:
             return
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/txnutil.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,36 @@
+# txnutil.py - transaction related utilities
+#
+#  Copyright FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import errno
+
+from . import (
+    encoding,
+)
+
+def mayhavepending(root):
+    '''return whether 'root' may have pending changes, which are
+    visible to this process.
+    '''
+    return root == encoding.environ.get('HG_PENDING')
+
+def trypending(root, vfs, filename, **kwargs):
+    '''Open  file to be read according to HG_PENDING environment variable
+
+    This opens '.pending' of specified 'filename' only when HG_PENDING
+    is equal to 'root'.
+
+    This returns '(fp, is_pending_opened)' tuple.
+    '''
+    if mayhavepending(root):
+        try:
+            return (vfs('%s.pending' % filename, **kwargs), True)
+        except IOError as inst:
+            if inst.errno != errno.ENOENT:
+                raise
+    return (vfs(filename, **kwargs), False)
--- a/mercurial/ui.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/ui.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,13 +7,16 @@
 
 from __future__ import absolute_import
 
+import collections
 import contextlib
 import errno
 import getpass
 import inspect
 import os
 import re
+import signal
 import socket
+import subprocess
 import sys
 import tempfile
 import traceback
@@ -22,18 +25,24 @@
 from .node import hex
 
 from . import (
+    color,
     config,
     encoding,
     error,
     formatter,
     progress,
     pycompat,
+    rcutil,
     scmutil,
     util,
 )
 
 urlreq = util.urlreq
 
+# for use with str.translate(None, _keepalnum), to keep just alphanumerics
+_keepalnum = ''.join(c for c in map(pycompat.bytechr, range(256))
+                     if not c.isalnum())
+
 samplehgrcs = {
     'user':
 """# example user config (see 'hg help config' for more info)
@@ -42,12 +51,14 @@
 # username = Jane Doe <jdoe@example.com>
 username =
 
+# uncomment to colorize command output
+# color = auto
+
 [extensions]
 # uncomment these lines to enable some popular extensions
 # (see 'hg help extensions' for more info)
 #
-# pager =
-# color =""",
+# pager =""",
 
     'cloned':
 """# example repository config (see 'hg help config' for more info)
@@ -85,15 +96,38 @@
     'global':
 """# example system-wide hg config (see 'hg help config' for more info)
 
+[ui]
+# uncomment to colorize command output
+# color = auto
+
 [extensions]
 # uncomment these lines to enable some popular extensions
 # (see 'hg help extensions' for more info)
 #
 # blackbox =
-# color =
 # pager =""",
 }
 
+
+class httppasswordmgrdbproxy(object):
+    """Delays loading urllib2 until it's needed."""
+    def __init__(self):
+        self._mgr = None
+
+    def _get_mgr(self):
+        if self._mgr is None:
+            self._mgr = urlreq.httppasswordmgrwithdefaultrealm()
+        return self._mgr
+
+    def add_password(self, *args, **kwargs):
+        return self._get_mgr().add_password(*args, **kwargs)
+
+    def find_user_password(self, *args, **kwargs):
+        return self._get_mgr().find_user_password(*args, **kwargs)
+
+def _catchterm(*args):
+    raise error.SignalInterrupt
+
 class ui(object):
     def __init__(self, src=None):
         """Create a fresh new ui object if no src given
@@ -104,6 +138,8 @@
         """
         # _buffers: used for temporary capture of output
         self._buffers = []
+        # _exithandlers: callbacks run at the end of a request
+        self._exithandlers = []
         # 3-tuple describing how each buffer in the stack behaves.
         # Values are (capture stderr, capture subprocesses, apply labels).
         self._bufferstates = []
@@ -120,11 +156,20 @@
         self.callhooks = True
         # Insecure server connections requested.
         self.insecureconnections = False
+        # Blocked time
+        self.logblockedtimes = False
+        # color mode: see mercurial/color.py for possible value
+        self._colormode = None
+        self._terminfoparams = {}
+        self._styles = {}
 
         if src:
+            self._exithandlers = src._exithandlers
             self.fout = src.fout
             self.ferr = src.ferr
             self.fin = src.fin
+            self.pageractive = src.pageractive
+            self._disablepager = src._disablepager
 
             self._tcfg = src._tcfg.copy()
             self._ucfg = src._ucfg.copy()
@@ -134,18 +179,26 @@
             self.environ = src.environ
             self.callhooks = src.callhooks
             self.insecureconnections = src.insecureconnections
+            self._colormode = src._colormode
+            self._terminfoparams = src._terminfoparams.copy()
+            self._styles = src._styles.copy()
+
             self.fixconfig()
 
             self.httppasswordmgrdb = src.httppasswordmgrdb
+            self._blockedtimes = src._blockedtimes
         else:
             self.fout = util.stdout
             self.ferr = util.stderr
             self.fin = util.stdin
+            self.pageractive = False
+            self._disablepager = False
 
             # shared read-only environment
             self.environ = encoding.environ
 
-            self.httppasswordmgrdb = urlreq.httppasswordmgrwithdefaultrealm()
+            self.httppasswordmgrdb = httppasswordmgrdbproxy()
+            self._blockedtimes = collections.defaultdict(int)
 
         allowed = self.configlist('experimental', 'exportableenviron')
         if '*' in allowed:
@@ -160,9 +213,22 @@
     def load(cls):
         """Create a ui and load global and user configs"""
         u = cls()
-        # we always trust global config files
-        for f in scmutil.rcpath():
-            u.readconfig(f, trust=True)
+        # we always trust global config files and environment variables
+        for t, f in rcutil.rccomponents():
+            if t == 'path':
+                u.readconfig(f, trust=True)
+            elif t == 'items':
+                sections = set()
+                for section, name, value, source in f:
+                    # do not set u._ocfg
+                    # XXX clean this up once immutable config object is a thing
+                    u._tcfg.set(section, name, value, source)
+                    u._ucfg.set(section, name, value, source)
+                    sections.add(section)
+                for section in sections:
+                    u.fixconfig(section=section)
+            else:
+                raise error.ProgrammingError('unknown rctype: %s' % t)
         return u
 
     def copy(self):
@@ -172,7 +238,17 @@
         """Clear internal state that shouldn't persist across commands"""
         if self._progbar:
             self._progbar.resetstate()  # reset last-print time of progress bar
-        self.httppasswordmgrdb = urlreq.httppasswordmgrwithdefaultrealm()
+        self.httppasswordmgrdb = httppasswordmgrdbproxy()
+
+    @contextlib.contextmanager
+    def timeblockedsection(self, key):
+        # this is open-coded below - search for timeblockedsection to find them
+        starttime = util.timer()
+        try:
+            yield
+        finally:
+            self._blockedtimes[key + '_blocked'] += \
+                (util.timer() - starttime) * 1000
 
     def formatter(self, topic, opts):
         return formatter.formatter(self, topic, opts)
@@ -224,6 +300,8 @@
                     del cfg['ui'][k]
             for k, v in cfg.items('defaults'):
                 del cfg['defaults'][k]
+            for k, v in cfg.items('commands'):
+                del cfg['commands'][k]
         # Don't remove aliases from the configuration if in the exceptionlist
         if self.plain('alias'):
             for k, v in cfg.items('alias'):
@@ -277,6 +355,7 @@
             self._reportuntrusted = self.debugflag or self.configbool("ui",
                 "report_untrusted", True)
             self.tracebackflag = self.configbool('ui', 'traceback', False)
+            self.logblockedtimes = self.configbool('ui', 'logblockedtimes')
 
         if section in (None, 'trusted'):
             # update trust information
@@ -402,6 +481,41 @@
                                     % (section, name, v))
         return b
 
+    def configwith(self, convert, section, name, default=None,
+                   desc=None, untrusted=False):
+        """parse a configuration element with a conversion function
+
+        >>> u = ui(); s = 'foo'
+        >>> u.setconfig(s, 'float1', '42')
+        >>> u.configwith(float, s, 'float1')
+        42.0
+        >>> u.setconfig(s, 'float2', '-4.25')
+        >>> u.configwith(float, s, 'float2')
+        -4.25
+        >>> u.configwith(float, s, 'unknown', 7)
+        7
+        >>> u.setconfig(s, 'invalid', 'somevalue')
+        >>> u.configwith(float, s, 'invalid')
+        Traceback (most recent call last):
+            ...
+        ConfigError: foo.invalid is not a valid float ('somevalue')
+        >>> u.configwith(float, s, 'invalid', desc='womble')
+        Traceback (most recent call last):
+            ...
+        ConfigError: foo.invalid is not a valid womble ('somevalue')
+        """
+
+        v = self.config(section, name, None, untrusted)
+        if v is None:
+            return default
+        try:
+            return convert(v)
+        except ValueError:
+            if desc is None:
+                desc = convert.__name__
+            raise error.ConfigError(_("%s.%s is not a valid %s ('%s')")
+                                    % (section, name, desc, v))
+
     def configint(self, section, name, default=None, untrusted=False):
         """parse a configuration element as an integer
 
@@ -418,17 +532,11 @@
         >>> u.configint(s, 'invalid')
         Traceback (most recent call last):
             ...
-        ConfigError: foo.invalid is not an integer ('somevalue')
+        ConfigError: foo.invalid is not a valid integer ('somevalue')
         """
 
-        v = self.config(section, name, None, untrusted)
-        if v is None:
-            return default
-        try:
-            return int(v)
-        except ValueError:
-            raise error.ConfigError(_("%s.%s is not an integer ('%s')")
-                                    % (section, name, v))
+        return self.configwith(int, section, name, default, 'integer',
+                               untrusted)
 
     def configbytes(self, section, name, default=0, untrusted=False):
         """parse a configuration element as a quantity in bytes
@@ -452,7 +560,7 @@
         ConfigError: foo.invalid is not a byte quantity ('somevalue')
         """
 
-        value = self.config(section, name)
+        value = self.config(section, name, None, untrusted)
         if value is None:
             if not isinstance(default, str):
                 return default
@@ -472,84 +580,11 @@
         >>> u.configlist(s, 'list1')
         ['this', 'is', 'a small', 'test']
         """
-
-        def _parse_plain(parts, s, offset):
-            whitespace = False
-            while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
-                whitespace = True
-                offset += 1
-            if offset >= len(s):
-                return None, parts, offset
-            if whitespace:
-                parts.append('')
-            if s[offset] == '"' and not parts[-1]:
-                return _parse_quote, parts, offset + 1
-            elif s[offset] == '"' and parts[-1][-1] == '\\':
-                parts[-1] = parts[-1][:-1] + s[offset]
-                return _parse_plain, parts, offset + 1
-            parts[-1] += s[offset]
-            return _parse_plain, parts, offset + 1
-
-        def _parse_quote(parts, s, offset):
-            if offset < len(s) and s[offset] == '"': # ""
-                parts.append('')
-                offset += 1
-                while offset < len(s) and (s[offset].isspace() or
-                        s[offset] == ','):
-                    offset += 1
-                return _parse_plain, parts, offset
-
-            while offset < len(s) and s[offset] != '"':
-                if (s[offset] == '\\' and offset + 1 < len(s)
-                        and s[offset + 1] == '"'):
-                    offset += 1
-                    parts[-1] += '"'
-                else:
-                    parts[-1] += s[offset]
-                offset += 1
-
-            if offset >= len(s):
-                real_parts = _configlist(parts[-1])
-                if not real_parts:
-                    parts[-1] = '"'
-                else:
-                    real_parts[0] = '"' + real_parts[0]
-                    parts = parts[:-1]
-                    parts.extend(real_parts)
-                return None, parts, offset
-
-            offset += 1
-            while offset < len(s) and s[offset] in [' ', ',']:
-                offset += 1
-
-            if offset < len(s):
-                if offset + 1 == len(s) and s[offset] == '"':
-                    parts[-1] += '"'
-                    offset += 1
-                else:
-                    parts.append('')
-            else:
-                return None, parts, offset
-
-            return _parse_plain, parts, offset
-
-        def _configlist(s):
-            s = s.rstrip(' ,')
-            if not s:
-                return []
-            parser, parts, offset = _parse_plain, [''], 0
-            while parser:
-                parser, parts, offset = parser(parts, s, offset)
-            return parts
-
-        result = self.config(section, name, untrusted=untrusted)
-        if result is None:
-            result = default or []
-        if isinstance(result, bytes):
-            result = _configlist(result.lstrip(' ,\n'))
-            if result is None:
-                result = default or []
-        return result
+        # default is not always a list
+        if isinstance(default, bytes):
+            default = config.parselist(default)
+        return self.configwith(config.parselist, section, name, default or [],
+                               'list', untrusted)
 
     def hasconfig(self, section, name, untrusted=False):
         return self._data(untrusted).hasitem(section, name)
@@ -696,55 +731,236 @@
     def write(self, *args, **opts):
         '''write args to output
 
-        By default, this method simply writes to the buffer or stdout,
-        but extensions or GUI tools may override this method,
-        write_err(), popbuffer(), and label() to style output from
-        various parts of hg.
+        By default, this method simply writes to the buffer or stdout.
+        Color mode can be set on the UI class to have the output decorated
+        with color modifier before being written to stdout.
 
-        An optional keyword argument, "label", can be passed in.
-        This should be a string containing label names separated by
-        space. Label names take the form of "topic.type". For example,
-        ui.debug() issues a label of "ui.debug".
+        The color used is controlled by an optional keyword argument, "label".
+        This should be a string containing label names separated by space.
+        Label names take the form of "topic.type". For example, ui.debug()
+        issues a label of "ui.debug".
 
         When labeling output for a specific command, a label of
         "cmdname.type" is recommended. For example, status issues
         a label of "status.modified" for modified files.
         '''
         if self._buffers and not opts.get('prompt', False):
-            self._buffers[-1].extend(a for a in args)
+            if self._bufferapplylabels:
+                label = opts.get('label', '')
+                self._buffers[-1].extend(self.label(a, label) for a in args)
+            else:
+                self._buffers[-1].extend(args)
+        elif self._colormode == 'win32':
+            # windows color printing is its own can of crab, defer to
+            # the color module and that is it.
+            color.win32print(self, self._write, *args, **opts)
         else:
-            self._progclear()
-            for a in args:
+            msgs = args
+            if self._colormode is not None:
+                label = opts.get('label', '')
+                msgs = [self.label(a, label) for a in args]
+            self._write(*msgs, **opts)
+
+    def _write(self, *msgs, **opts):
+        self._progclear()
+        # opencode timeblockedsection because this is a critical path
+        starttime = util.timer()
+        try:
+            for a in msgs:
                 self.fout.write(a)
+        except IOError as err:
+            raise error.StdioError(err)
+        finally:
+            self._blockedtimes['stdio_blocked'] += \
+                (util.timer() - starttime) * 1000
 
     def write_err(self, *args, **opts):
         self._progclear()
+        if self._bufferstates and self._bufferstates[-1][0]:
+            self.write(*args, **opts)
+        elif self._colormode == 'win32':
+            # windows color printing is its own can of crab, defer to
+            # the color module and that is it.
+            color.win32print(self, self._write_err, *args, **opts)
+        else:
+            msgs = args
+            if self._colormode is not None:
+                label = opts.get('label', '')
+                msgs = [self.label(a, label) for a in args]
+            self._write_err(*msgs, **opts)
+
+    def _write_err(self, *msgs, **opts):
         try:
-            if self._bufferstates and self._bufferstates[-1][0]:
-                return self.write(*args, **opts)
-            if not getattr(self.fout, 'closed', False):
-                self.fout.flush()
-            for a in args:
-                self.ferr.write(a)
-            # stderr may be buffered under win32 when redirected to files,
-            # including stdout.
-            if not getattr(self.ferr, 'closed', False):
-                self.ferr.flush()
+            with self.timeblockedsection('stdio'):
+                if not getattr(self.fout, 'closed', False):
+                    self.fout.flush()
+                for a in msgs:
+                    self.ferr.write(a)
+                # stderr may be buffered under win32 when redirected to files,
+                # including stdout.
+                if not getattr(self.ferr, 'closed', False):
+                    self.ferr.flush()
         except IOError as inst:
-            if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
-                raise
+            raise error.StdioError(inst)
 
     def flush(self):
-        try: self.fout.flush()
-        except (IOError, ValueError): pass
-        try: self.ferr.flush()
-        except (IOError, ValueError): pass
+        # opencode timeblockedsection because this is a critical path
+        starttime = util.timer()
+        try:
+            try:
+                self.fout.flush()
+            except IOError as err:
+                raise error.StdioError(err)
+            finally:
+                try:
+                    self.ferr.flush()
+                except IOError as err:
+                    raise error.StdioError(err)
+        finally:
+            self._blockedtimes['stdio_blocked'] += \
+                (util.timer() - starttime) * 1000
 
     def _isatty(self, fh):
         if self.configbool('ui', 'nontty', False):
             return False
         return util.isatty(fh)
 
+    def disablepager(self):
+        self._disablepager = True
+
+    def pager(self, command):
+        """Start a pager for subsequent command output.
+
+        Commands which produce a long stream of output should call
+        this function to activate the user's preferred pagination
+        mechanism (which may be no pager). Calling this function
+        precludes any future use of interactive functionality, such as
+        prompting the user or activating curses.
+
+        Args:
+          command: The full, non-aliased name of the command. That is, "log"
+                   not "history, "summary" not "summ", etc.
+        """
+        if (self._disablepager
+            or self.pageractive
+            or command in self.configlist('pager', 'ignore')
+            or not self.configbool('pager', 'enable', True)
+            or not self.configbool('pager', 'attend-' + command, True)
+            # TODO: if we want to allow HGPLAINEXCEPT=pager,
+            # formatted() will need some adjustment.
+            or not self.formatted()
+            or self.plain()
+            # TODO: expose debugger-enabled on the UI object
+            or '--debugger' in pycompat.sysargv):
+            # We only want to paginate if the ui appears to be
+            # interactive, the user didn't say HGPLAIN or
+            # HGPLAINEXCEPT=pager, and the user didn't specify --debug.
+            return
+
+        fallbackpager = 'more'
+        pagercmd = self.config('pager', 'pager', fallbackpager)
+        if not pagercmd:
+            return
+
+        pagerenv = {}
+        for name, value in rcutil.defaultpagerenv().items():
+            if name not in encoding.environ:
+                pagerenv[name] = value
+
+        self.debug('starting pager for command %r\n' % command)
+        self.flush()
+
+        wasformatted = self.formatted()
+        if util.safehasattr(signal, "SIGPIPE"):
+            signal.signal(signal.SIGPIPE, _catchterm)
+        if self._runpager(pagercmd, pagerenv):
+            self.pageractive = True
+            # Preserve the formatted-ness of the UI. This is important
+            # because we mess with stdout, which might confuse
+            # auto-detection of things being formatted.
+            self.setconfig('ui', 'formatted', wasformatted, 'pager')
+            self.setconfig('ui', 'interactive', False, 'pager')
+
+            # If pagermode differs from color.mode, reconfigure color now that
+            # pageractive is set.
+            cm = self._colormode
+            if cm != self.config('color', 'pagermode', cm):
+                color.setup(self)
+        else:
+            # If the pager can't be spawned in dispatch when --pager=on is
+            # given, don't try again when the command runs, to avoid a duplicate
+            # warning about a missing pager command.
+            self.disablepager()
+
+    def _runpager(self, command, env=None):
+        """Actually start the pager and set up file descriptors.
+
+        This is separate in part so that extensions (like chg) can
+        override how a pager is invoked.
+        """
+        if command == 'cat':
+            # Save ourselves some work.
+            return False
+        # If the command doesn't contain any of these characters, we
+        # assume it's a binary and exec it directly. This means for
+        # simple pager command configurations, we can degrade
+        # gracefully and tell the user about their broken pager.
+        shell = any(c in command for c in "|&;<>()$`\\\"' \t\n*?[#~=%")
+
+        if pycompat.osname == 'nt' and not shell:
+            # Window's built-in `more` cannot be invoked with shell=False, but
+            # its `more.com` can.  Hide this implementation detail from the
+            # user so we can also get sane bad PAGER behavior.  MSYS has
+            # `more.exe`, so do a cmd.exe style resolution of the executable to
+            # determine which one to use.
+            fullcmd = util.findexe(command)
+            if not fullcmd:
+                self.warn(_("missing pager command '%s', skipping pager\n")
+                          % command)
+                return False
+
+            command = fullcmd
+
+        try:
+            pager = subprocess.Popen(
+                command, shell=shell, bufsize=-1,
+                close_fds=util.closefds, stdin=subprocess.PIPE,
+                stdout=util.stdout, stderr=util.stderr,
+                env=util.shellenviron(env))
+        except OSError as e:
+            if e.errno == errno.ENOENT and not shell:
+                self.warn(_("missing pager command '%s', skipping pager\n")
+                          % command)
+                return False
+            raise
+
+        # back up original file descriptors
+        stdoutfd = os.dup(util.stdout.fileno())
+        stderrfd = os.dup(util.stderr.fileno())
+
+        os.dup2(pager.stdin.fileno(), util.stdout.fileno())
+        if self._isatty(util.stderr):
+            os.dup2(pager.stdin.fileno(), util.stderr.fileno())
+
+        @self.atexit
+        def killpager():
+            if util.safehasattr(signal, "SIGINT"):
+                signal.signal(signal.SIGINT, signal.SIG_IGN)
+            # restore original fds, closing pager.stdin copies in the process
+            os.dup2(stdoutfd, util.stdout.fileno())
+            os.dup2(stderrfd, util.stderr.fileno())
+            pager.stdin.close()
+            pager.wait()
+
+        return True
+
+    def atexit(self, func, *args, **kwargs):
+        '''register a function to run after dispatching a request
+
+        Handlers do not stay registered across request boundaries.'''
+        self._exithandlers.append((func, args, kwargs))
+        return func
+
     def interface(self, feature):
         """what interface to use for interactive console features?
 
@@ -900,13 +1116,14 @@
         sys.stdout = self.fout
         # prompt ' ' must exist; otherwise readline may delete entire line
         # - http://bugs.python.org/issue12833
-        line = raw_input(' ')
+        with self.timeblockedsection('stdio'):
+            line = raw_input(' ')
         sys.stdin = oldin
         sys.stdout = oldout
 
         # When stdin is in binary mode on Windows, it can cause
         # raw_input() to emit an extra trailing carriage return
-        if os.linesep == '\r\n' and line and line[-1] == '\r':
+        if pycompat.oslinesep == '\r\n' and line and line[-1] == '\r':
             line = line[:-1]
         return line
 
@@ -980,13 +1197,14 @@
             self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
             # disable getpass() only if explicitly specified. it's still valid
             # to interact with tty even if fin is not a tty.
-            if self.configbool('ui', 'nontty'):
-                l = self.fin.readline()
-                if not l:
-                    raise EOFError
-                return l.rstrip('\n')
-            else:
-                return getpass.getpass('')
+            with self.timeblockedsection('stdio'):
+                if self.configbool('ui', 'nontty'):
+                    l = self.fin.readline()
+                    if not l:
+                        raise EOFError
+                    return l.rstrip('\n')
+                else:
+                    return getpass.getpass('')
         except EOFError:
             raise error.ResponseExpected()
     def status(self, *msg, **opts):
@@ -995,14 +1213,14 @@
         This adds an output label of "ui.status".
         '''
         if not self.quiet:
-            opts['label'] = opts.get('label', '') + ' ui.status'
+            opts[r'label'] = opts.get(r'label', '') + ' ui.status'
             self.write(*msg, **opts)
     def warn(self, *msg, **opts):
         '''write warning message to output (stderr)
 
         This adds an output label of "ui.warning".
         '''
-        opts['label'] = opts.get('label', '') + ' ui.warning'
+        opts[r'label'] = opts.get(r'label', '') + ' ui.warning'
         self.write_err(*msg, **opts)
     def note(self, *msg, **opts):
         '''write note to output (if ui.verbose is True)
@@ -1010,7 +1228,7 @@
         This adds an output label of "ui.note".
         '''
         if self.verbose:
-            opts['label'] = opts.get('label', '') + ' ui.note'
+            opts[r'label'] = opts.get(r'label', '') + ' ui.note'
             self.write(*msg, **opts)
     def debug(self, *msg, **opts):
         '''write debug message to output (if ui.debugflag is True)
@@ -1018,7 +1236,7 @@
         This adds an output label of "ui.debug".
         '''
         if self.debugflag:
-            opts['label'] = opts.get('label', '') + ' ui.debug'
+            opts[r'label'] = opts.get(r'label', '') + ' ui.debug'
             self.write(*msg, **opts)
 
     def edit(self, text, user, extra=None, editform=None, pending=None,
@@ -1035,11 +1253,11 @@
         if self.configbool('experimental', 'editortmpinhg'):
             rdir = repopath
         (fd, name) = tempfile.mkstemp(prefix='hg-' + extra['prefix'] + '-',
-                                      suffix=extra['suffix'], text=True,
+                                      suffix=extra['suffix'],
                                       dir=rdir)
         try:
-            f = os.fdopen(fd, "w")
-            f.write(text)
+            f = os.fdopen(fd, r'wb')
+            f.write(util.tonativeeol(text))
             f.close()
 
             environ = {'HGUSER': user}
@@ -1058,25 +1276,47 @@
 
             self.system("%s \"%s\"" % (editor, name),
                         environ=environ,
-                        onerr=error.Abort, errprefix=_("edit failed"))
+                        onerr=error.Abort, errprefix=_("edit failed"),
+                        blockedtag='editor')
 
-            f = open(name)
-            t = f.read()
+            f = open(name, r'rb')
+            t = util.fromnativeeol(f.read())
             f.close()
         finally:
             os.unlink(name)
 
         return t
 
-    def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None):
+    def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None,
+               blockedtag=None):
         '''execute shell command with appropriate output stream. command
         output will be redirected if fout is not stdout.
+
+        if command fails and onerr is None, return status, else raise onerr
+        object as exception.
         '''
+        if blockedtag is None:
+            # Long cmds tend to be because of an absolute path on cmd. Keep
+            # the tail end instead
+            cmdsuffix = cmd.translate(None, _keepalnum)[-85:]
+            blockedtag = 'unknown_system_' + cmdsuffix
         out = self.fout
         if any(s[1] for s in self._bufferstates):
             out = self
-        return util.system(cmd, environ=environ, cwd=cwd, onerr=onerr,
-                           errprefix=errprefix, out=out)
+        with self.timeblockedsection(blockedtag):
+            rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out)
+        if rc and onerr:
+            errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
+                                util.explainexit(rc)[0])
+            if errprefix:
+                errmsg = '%s: %s' % (errprefix, errmsg)
+            raise onerr(errmsg)
+        return rc
+
+    def _runsystem(self, cmd, environ, cwd, out):
+        """actually execute the given shell command (can be overridden by
+        extensions like chg)"""
+        return util.system(cmd, environ=environ, cwd=cwd, out=out)
 
     def traceback(self, exc=None, force=False):
         '''print exception traceback if traceback printing enabled or forced.
@@ -1099,7 +1339,11 @@
                                ''.join(exconly))
             else:
                 output = traceback.format_exception(exc[0], exc[1], exc[2])
-                self.write_err(''.join(output))
+                data = r''.join(output)
+                if pycompat.ispy3:
+                    enc = pycompat.sysstr(encoding.encoding)
+                    data = data.encode(enc, errors=r'replace')
+                self.write_err(data)
         return self.tracebackflag or force
 
     def geteditor(self):
@@ -1112,9 +1356,7 @@
         else:
             editor = 'vi'
         return (encoding.environ.get("HGEDITOR") or
-                self.config("ui", "editor") or
-                encoding.environ.get("VISUAL") or
-                encoding.environ.get("EDITOR", editor))
+                self.config("ui", "editor", editor))
 
     @util.propertycache
     def _progbar(self):
@@ -1180,13 +1422,15 @@
     def label(self, msg, label):
         '''style msg based on supplied label
 
-        Like ui.write(), this just returns msg unchanged, but extensions
-        and GUI tools can override it to allow styling output without
-        writing it.
+        If some color mode is enabled, this will add the necessary control
+        characters to apply such color. In addition, 'debug' color mode adds
+        markup showing which label affects a piece of text.
 
         ui.write(s, 'label') is equivalent to
         ui.write(ui.label(s, 'label')).
         '''
+        if self._colormode is not None:
+            return color.colorlabel(self, msg, label)
         return msg
 
     def develwarn(self, msg, stacklevel=1, config=None):
@@ -1377,7 +1621,7 @@
 
         self.name = name
         self.rawloc = rawloc
-        self.loc = str(u)
+        self.loc = '%s' % u
 
         # When given a raw location but not a symbolic name, validate the
         # location is valid.
--- a/mercurial/unionrepo.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/unionrepo.py	Tue Apr 18 12:24:34 2017 -0400
@@ -27,8 +27,8 @@
     pathutil,
     pycompat,
     revlog,
-    scmutil,
     util,
+    vfs as vfsmod,
 )
 
 class unionrevlog(revlog.revlog):
@@ -39,7 +39,7 @@
         #
         # To differentiate a rev in the second revlog from a rev in the revlog,
         # we check revision against repotiprev.
-        opener = scmutil.readonlyvfs(opener)
+        opener = vfsmod.readonlyvfs(opener)
         revlog.revlog.__init__(self, opener, indexfile)
         self.revlog2 = revlog2
 
@@ -90,8 +90,7 @@
         elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
             return self.baserevdiff(rev1, rev2)
 
-        return mdiff.textdiff(self.revision(self.node(rev1)),
-                              self.revision(self.node(rev2)))
+        return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
 
     def revision(self, nodeorrev, raw=False):
         """return an uncompressed revision of a given node or revision
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/upgrade.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,826 @@
+# upgrade.py - functions for in place upgrade of Mercurial repository
+#
+# Copyright (c) 2016-present, Gregory Szorc
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import stat
+import tempfile
+
+from .i18n import _
+from . import (
+    changelog,
+    error,
+    localrepo,
+    manifest,
+    revlog,
+    scmutil,
+    util,
+    vfs as vfsmod,
+)
+
+def requiredsourcerequirements(repo):
+    """Obtain requirements required to be present to upgrade a repo.
+
+    An upgrade will not be allowed if the repository doesn't have the
+    requirements returned by this function.
+    """
+    return set([
+        # Introduced in Mercurial 0.9.2.
+        'revlogv1',
+        # Introduced in Mercurial 0.9.2.
+        'store',
+    ])
+
+def blocksourcerequirements(repo):
+    """Obtain requirements that will prevent an upgrade from occurring.
+
+    An upgrade cannot be performed if the source repository contains a
+    requirements in the returned set.
+    """
+    return set([
+        # The upgrade code does not yet support these experimental features.
+        # This is an artificial limitation.
+        'manifestv2',
+        'treemanifest',
+        # This was a precursor to generaldelta and was never enabled by default.
+        # It should (hopefully) not exist in the wild.
+        'parentdelta',
+        # Upgrade should operate on the actual store, not the shared link.
+        'shared',
+    ])
+
+def supportremovedrequirements(repo):
+    """Obtain requirements that can be removed during an upgrade.
+
+    If an upgrade were to create a repository that dropped a requirement,
+    the dropped requirement must appear in the returned set for the upgrade
+    to be allowed.
+    """
+    return set()
+
+def supporteddestrequirements(repo):
+    """Obtain requirements that upgrade supports in the destination.
+
+    If the result of the upgrade would create requirements not in this set,
+    the upgrade is disallowed.
+
+    Extensions should monkeypatch this to add their custom requirements.
+    """
+    return set([
+        'dotencode',
+        'fncache',
+        'generaldelta',
+        'revlogv1',
+        'store',
+    ])
+
+def allowednewrequirements(repo):
+    """Obtain requirements that can be added to a repository during upgrade.
+
+    This is used to disallow proposed requirements from being added when
+    they weren't present before.
+
+    We use a list of allowed requirement additions instead of a list of known
+    bad additions because the whitelist approach is safer and will prevent
+    future, unknown requirements from accidentally being added.
+    """
+    return set([
+        'dotencode',
+        'fncache',
+        'generaldelta',
+    ])
+
+deficiency = 'deficiency'
+optimisation = 'optimization'
+
+class improvement(object):
+    """Represents an improvement that can be made as part of an upgrade.
+
+    The following attributes are defined on each instance:
+
+    name
+       Machine-readable string uniquely identifying this improvement. It
+       will be mapped to an action later in the upgrade process.
+
+    type
+       Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
+       problem. An optimization is an action (sometimes optional) that
+       can be taken to further improve the state of the repository.
+
+    description
+       Message intended for humans explaining the improvement in more detail,
+       including the implications of it. For ``deficiency`` types, should be
+       worded in the present tense. For ``optimisation`` types, should be
+       worded in the future tense.
+
+    upgrademessage
+       Message intended for humans explaining what an upgrade addressing this
+       issue will do. Should be worded in the future tense.
+    """
+    def __init__(self, name, type, description, upgrademessage):
+        self.name = name
+        self.type = type
+        self.description = description
+        self.upgrademessage = upgrademessage
+
+    def __eq__(self, other):
+        if not isinstance(other, improvement):
+            # This is what python tell use to do
+            return NotImplemented
+        return self.name == other.name
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __hash__(self):
+        return hash(self.name)
+
+allformatvariant = []
+
+def registerformatvariant(cls):
+    allformatvariant.append(cls)
+    return cls
+
+class formatvariant(improvement):
+    """an improvement subclass dedicated to repository format"""
+    type = deficiency
+    ### The following attributes should be defined for each class:
+
+    # machine-readable string uniquely identifying this improvement. it will be
+    # mapped to an action later in the upgrade process.
+    name = None
+
+    # message intended for humans explaining the improvement in more detail,
+    # including the implications of it ``deficiency`` types, should be worded
+    # in the present tense.
+    description = None
+
+    # message intended for humans explaining what an upgrade addressing this
+    # issue will do. should be worded in the future tense.
+    upgrademessage = None
+
+    # value of current Mercurial default for new repository
+    default = None
+
+    def __init__(self):
+        raise NotImplementedError()
+
+    @staticmethod
+    def fromrepo(repo):
+        """current value of the variant in the repository"""
+        raise NotImplementedError()
+
+    @staticmethod
+    def fromconfig(repo):
+        """current value of the variant in the configuration"""
+        raise NotImplementedError()
+
+class requirementformatvariant(formatvariant):
+    """formatvariant based on a 'requirement' name.
+
+    Many format variant are controlled by a 'requirement'. We define a small
+    subclass to factor the code.
+    """
+
+    # the requirement that control this format variant
+    _requirement = None
+
+    @staticmethod
+    def _newreporequirements(repo):
+        return localrepo.newreporequirements(repo)
+
+    @classmethod
+    def fromrepo(cls, repo):
+        assert cls._requirement is not None
+        return cls._requirement in repo.requirements
+
+    @classmethod
+    def fromconfig(cls, repo):
+        assert cls._requirement is not None
+        return cls._requirement in cls._newreporequirements(repo)
+
+@registerformatvariant
+class fncache(requirementformatvariant):
+    name = 'fncache'
+
+    _requirement = 'fncache'
+
+    default = True
+
+    description = _('long and reserved filenames may not work correctly; '
+                    'repository performance is sub-optimal')
+
+    upgrademessage = _('repository will be more resilient to storing '
+                       'certain paths and performance of certain '
+                       'operations should be improved')
+
+@registerformatvariant
+class dotencode(requirementformatvariant):
+    name = 'dotencode'
+
+    _requirement = 'dotencode'
+
+    default = True
+
+    description = _('storage of filenames beginning with a period or '
+                    'space may not work correctly')
+
+    upgrademessage = _('repository will be better able to store files '
+                       'beginning with a space or period')
+
+@registerformatvariant
+class generaldelta(requirementformatvariant):
+    name = 'generaldelta'
+
+    _requirement = 'generaldelta'
+
+    default = True
+
+    description = _('deltas within internal storage are unable to '
+                    'choose optimal revisions; repository is larger and '
+                    'slower than it could be; interaction with other '
+                    'repositories may require extra network and CPU '
+                    'resources, making "hg push" and "hg pull" slower')
+
+    upgrademessage = _('repository storage will be able to create '
+                       'optimal deltas; new repository data will be '
+                       'smaller and read times should decrease; '
+                       'interacting with other repositories using this '
+                       'storage model should require less network and '
+                       'CPU resources, making "hg push" and "hg pull" '
+                       'faster')
+
+@registerformatvariant
+class removecldeltachain(formatvariant):
+    name = 'removecldeltachain'
+
+    default = True
+
+    description = _('changelog storage is using deltas instead of '
+                    'raw entries; changelog reading and any '
+                    'operation relying on changelog data are slower '
+                    'than they could be')
+
+    upgrademessage = _('changelog storage will be reformated to '
+                       'store raw entries; changelog reading will be '
+                       'faster; changelog size may be reduced')
+
+    @staticmethod
+    def fromrepo(repo):
+        # Mercurial 4.0 changed changelogs to not use delta chains. Search for
+        # changelogs with deltas.
+        cl = repo.changelog
+        chainbase = cl.chainbase
+        return all(rev == chainbase(rev) for rev in cl)
+
+    @staticmethod
+    def fromconfig(repo):
+        return True
+
+def finddeficiencies(repo):
+    """returns a list of deficiencies that the repo suffer from"""
+    deficiencies = []
+
+    # We could detect lack of revlogv1 and store here, but they were added
+    # in 0.9.2 and we don't support upgrading repos without these
+    # requirements, so let's not bother.
+
+    for fv in allformatvariant:
+        if not fv.fromrepo(repo):
+            deficiencies.append(fv)
+
+    return deficiencies
+
+def findoptimizations(repo):
+    """Determine optimisation that could be used during upgrade"""
+    # These are unconditionally added. There is logic later that figures out
+    # which ones to apply.
+    optimizations = []
+
+    optimizations.append(improvement(
+        name='redeltaparent',
+        type=optimisation,
+        description=_('deltas within internal storage will be recalculated to '
+                      'choose an optimal base revision where this was not '
+                      'already done; the size of the repository may shrink and '
+                      'various operations may become faster; the first time '
+                      'this optimization is performed could slow down upgrade '
+                      'execution considerably; subsequent invocations should '
+                      'not run noticeably slower'),
+        upgrademessage=_('deltas within internal storage will choose a new '
+                         'base revision if needed')))
+
+    optimizations.append(improvement(
+        name='redeltamultibase',
+        type=optimisation,
+        description=_('deltas within internal storage will be recalculated '
+                      'against multiple base revision and the smallest '
+                      'difference will be used; the size of the repository may '
+                      'shrink significantly when there are many merges; this '
+                      'optimization will slow down execution in proportion to '
+                      'the number of merges in the repository and the amount '
+                      'of files in the repository; this slow down should not '
+                      'be significant unless there are tens of thousands of '
+                      'files and thousands of merges'),
+        upgrademessage=_('deltas within internal storage will choose an '
+                         'optimal delta by computing deltas against multiple '
+                         'parents; may slow down execution time '
+                         'significantly')))
+
+    optimizations.append(improvement(
+        name='redeltaall',
+        type=optimisation,
+        description=_('deltas within internal storage will always be '
+                      'recalculated without reusing prior deltas; this will '
+                      'likely make execution run several times slower; this '
+                      'optimization is typically not needed'),
+        upgrademessage=_('deltas within internal storage will be fully '
+                         'recomputed; this will likely drastically slow down '
+                         'execution time')))
+
+    return optimizations
+
+def determineactions(repo, deficiencies, sourcereqs, destreqs):
+    """Determine upgrade actions that will be performed.
+
+    Given a list of improvements as returned by ``finddeficiencies`` and
+    ``findoptimizations``, determine the list of upgrade actions that
+    will be performed.
+
+    The role of this function is to filter improvements if needed, apply
+    recommended optimizations from the improvements list that make sense,
+    etc.
+
+    Returns a list of action names.
+    """
+    newactions = []
+
+    knownreqs = supporteddestrequirements(repo)
+
+    for d in deficiencies:
+        name = d.name
+
+        # If the action is a requirement that doesn't show up in the
+        # destination requirements, prune the action.
+        if name in knownreqs and name not in destreqs:
+            continue
+
+        newactions.append(d)
+
+    # FUTURE consider adding some optimizations here for certain transitions.
+    # e.g. adding generaldelta could schedule parent redeltas.
+
+    return newactions
+
+def _revlogfrompath(repo, path):
+    """Obtain a revlog from a repo path.
+
+    An instance of the appropriate class is returned.
+    """
+    if path == '00changelog.i':
+        return changelog.changelog(repo.svfs)
+    elif path.endswith('00manifest.i'):
+        mandir = path[:-len('00manifest.i')]
+        return manifest.manifestrevlog(repo.svfs, dir=mandir)
+    else:
+        # Filelogs don't do anything special with settings. So we can use a
+        # vanilla revlog.
+        return revlog.revlog(repo.svfs, path)
+
+def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
+    """Copy revlogs between 2 repos."""
+    revcount = 0
+    srcsize = 0
+    srcrawsize = 0
+    dstsize = 0
+    fcount = 0
+    frevcount = 0
+    fsrcsize = 0
+    frawsize = 0
+    fdstsize = 0
+    mcount = 0
+    mrevcount = 0
+    msrcsize = 0
+    mrawsize = 0
+    mdstsize = 0
+    crevcount = 0
+    csrcsize = 0
+    crawsize = 0
+    cdstsize = 0
+
+    # Perform a pass to collect metadata. This validates we can open all
+    # source files and allows a unified progress bar to be displayed.
+    for unencoded, encoded, size in srcrepo.store.walk():
+        if unencoded.endswith('.d'):
+            continue
+
+        rl = _revlogfrompath(srcrepo, unencoded)
+        revcount += len(rl)
+
+        datasize = 0
+        rawsize = 0
+        idx = rl.index
+        for rev in rl:
+            e = idx[rev]
+            datasize += e[1]
+            rawsize += e[2]
+
+        srcsize += datasize
+        srcrawsize += rawsize
+
+        # This is for the separate progress bars.
+        if isinstance(rl, changelog.changelog):
+            crevcount += len(rl)
+            csrcsize += datasize
+            crawsize += rawsize
+        elif isinstance(rl, manifest.manifestrevlog):
+            mcount += 1
+            mrevcount += len(rl)
+            msrcsize += datasize
+            mrawsize += rawsize
+        elif isinstance(rl, revlog.revlog):
+            fcount += 1
+            frevcount += len(rl)
+            fsrcsize += datasize
+            frawsize += rawsize
+
+    if not revcount:
+        return
+
+    ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
+               '%d in changelog)\n') %
+             (revcount, frevcount, mrevcount, crevcount))
+    ui.write(_('migrating %s in store; %s tracked data\n') % (
+             (util.bytecount(srcsize), util.bytecount(srcrawsize))))
+
+    # Used to keep track of progress.
+    progress = []
+    def oncopiedrevision(rl, rev, node):
+        progress[1] += 1
+        srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
+
+    # Do the actual copying.
+    # FUTURE this operation can be farmed off to worker processes.
+    seen = set()
+    for unencoded, encoded, size in srcrepo.store.walk():
+        if unencoded.endswith('.d'):
+            continue
+
+        oldrl = _revlogfrompath(srcrepo, unencoded)
+        newrl = _revlogfrompath(dstrepo, unencoded)
+
+        if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
+            ui.write(_('finished migrating %d manifest revisions across %d '
+                       'manifests; change in size: %s\n') %
+                     (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
+
+            ui.write(_('migrating changelog containing %d revisions '
+                       '(%s in store; %s tracked data)\n') %
+                     (crevcount, util.bytecount(csrcsize),
+                      util.bytecount(crawsize)))
+            seen.add('c')
+            progress[:] = [_('changelog revisions'), 0, crevcount]
+        elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
+            ui.write(_('finished migrating %d filelog revisions across %d '
+                       'filelogs; change in size: %s\n') %
+                     (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
+
+            ui.write(_('migrating %d manifests containing %d revisions '
+                       '(%s in store; %s tracked data)\n') %
+                     (mcount, mrevcount, util.bytecount(msrcsize),
+                      util.bytecount(mrawsize)))
+            seen.add('m')
+            progress[:] = [_('manifest revisions'), 0, mrevcount]
+        elif 'f' not in seen:
+            ui.write(_('migrating %d filelogs containing %d revisions '
+                       '(%s in store; %s tracked data)\n') %
+                     (fcount, frevcount, util.bytecount(fsrcsize),
+                      util.bytecount(frawsize)))
+            seen.add('f')
+            progress[:] = [_('file revisions'), 0, frevcount]
+
+        ui.progress(progress[0], progress[1], total=progress[2])
+
+        ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
+        oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
+                    deltareuse=deltareuse,
+                    aggressivemergedeltas=aggressivemergedeltas)
+
+        datasize = 0
+        idx = newrl.index
+        for rev in newrl:
+            datasize += idx[rev][1]
+
+        dstsize += datasize
+
+        if isinstance(newrl, changelog.changelog):
+            cdstsize += datasize
+        elif isinstance(newrl, manifest.manifestrevlog):
+            mdstsize += datasize
+        else:
+            fdstsize += datasize
+
+    ui.progress(progress[0], None)
+
+    ui.write(_('finished migrating %d changelog revisions; change in size: '
+               '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
+
+    ui.write(_('finished migrating %d total revisions; total change in store '
+               'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
+
+def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
+    """Determine whether to copy a store file during upgrade.
+
+    This function is called when migrating store files from ``srcrepo`` to
+    ``dstrepo`` as part of upgrading a repository.
+
+    Args:
+      srcrepo: repo we are copying from
+      dstrepo: repo we are copying to
+      requirements: set of requirements for ``dstrepo``
+      path: store file being examined
+      mode: the ``ST_MODE`` file type of ``path``
+      st: ``stat`` data structure for ``path``
+
+    Function should return ``True`` if the file is to be copied.
+    """
+    # Skip revlogs.
+    if path.endswith(('.i', '.d')):
+        return False
+    # Skip transaction related files.
+    if path.startswith('undo'):
+        return False
+    # Only copy regular files.
+    if mode != stat.S_IFREG:
+        return False
+    # Skip other skipped files.
+    if path in ('lock', 'fncache'):
+        return False
+
+    return True
+
+def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
+    """Hook point for extensions to perform additional actions during upgrade.
+
+    This function is called after revlogs and store files have been copied but
+    before the new store is swapped into the original location.
+    """
+
+def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
+    """Do the low-level work of upgrading a repository.
+
+    The upgrade is effectively performed as a copy between a source
+    repository and a temporary destination repository.
+
+    The source repository is unmodified for as long as possible so the
+    upgrade can abort at any time without causing loss of service for
+    readers and without corrupting the source repository.
+    """
+    assert srcrepo.currentwlock()
+    assert dstrepo.currentwlock()
+
+    ui.write(_('(it is safe to interrupt this process any time before '
+               'data migration completes)\n'))
+
+    if 'redeltaall' in actions:
+        deltareuse = revlog.revlog.DELTAREUSENEVER
+    elif 'redeltaparent' in actions:
+        deltareuse = revlog.revlog.DELTAREUSESAMEREVS
+    elif 'redeltamultibase' in actions:
+        deltareuse = revlog.revlog.DELTAREUSESAMEREVS
+    else:
+        deltareuse = revlog.revlog.DELTAREUSEALWAYS
+
+    with dstrepo.transaction('upgrade') as tr:
+        _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
+                     'redeltamultibase' in actions)
+
+    # Now copy other files in the store directory.
+    # The sorted() makes execution deterministic.
+    for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
+        if not _filterstorefile(srcrepo, dstrepo, requirements,
+                                       p, kind, st):
+            continue
+
+        srcrepo.ui.write(_('copying %s\n') % p)
+        src = srcrepo.store.rawvfs.join(p)
+        dst = dstrepo.store.rawvfs.join(p)
+        util.copyfile(src, dst, copystat=True)
+
+    _finishdatamigration(ui, srcrepo, dstrepo, requirements)
+
+    ui.write(_('data fully migrated to temporary repository\n'))
+
+    backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
+    backupvfs = vfsmod.vfs(backuppath)
+
+    # Make a backup of requires file first, as it is the first to be modified.
+    util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
+
+    # We install an arbitrary requirement that clients must not support
+    # as a mechanism to lock out new clients during the data swap. This is
+    # better than allowing a client to continue while the repository is in
+    # an inconsistent state.
+    ui.write(_('marking source repository as being upgraded; clients will be '
+               'unable to read from repository\n'))
+    scmutil.writerequires(srcrepo.vfs,
+                          srcrepo.requirements | set(['upgradeinprogress']))
+
+    ui.write(_('starting in-place swap of repository data\n'))
+    ui.write(_('replaced files will be backed up at %s\n') %
+             backuppath)
+
+    # Now swap in the new store directory. Doing it as a rename should make
+    # the operation nearly instantaneous and atomic (at least in well-behaved
+    # environments).
+    ui.write(_('replacing store...\n'))
+    tstart = util.timer()
+    util.rename(srcrepo.spath, backupvfs.join('store'))
+    util.rename(dstrepo.spath, srcrepo.spath)
+    elapsed = util.timer() - tstart
+    ui.write(_('store replacement complete; repository was inconsistent for '
+               '%0.1fs\n') % elapsed)
+
+    # We first write the requirements file. Any new requirements will lock
+    # out legacy clients.
+    ui.write(_('finalizing requirements file and making repository readable '
+               'again\n'))
+    scmutil.writerequires(srcrepo.vfs, requirements)
+
+    # The lock file from the old store won't be removed because nothing has a
+    # reference to its new location. So clean it up manually. Alternatively, we
+    # could update srcrepo.svfs and other variables to point to the new
+    # location. This is simpler.
+    backupvfs.unlink('store/lock')
+
+    return backuppath
+
+def upgraderepo(ui, repo, run=False, optimize=None):
+    """Upgrade a repository in place."""
+    optimize = set(optimize or [])
+    repo = repo.unfiltered()
+
+    # Ensure the repository can be upgraded.
+    missingreqs = requiredsourcerequirements(repo) - repo.requirements
+    if missingreqs:
+        raise error.Abort(_('cannot upgrade repository; requirement '
+                            'missing: %s') % _(', ').join(sorted(missingreqs)))
+
+    blockedreqs = blocksourcerequirements(repo) & repo.requirements
+    if blockedreqs:
+        raise error.Abort(_('cannot upgrade repository; unsupported source '
+                            'requirement: %s') %
+                          _(', ').join(sorted(blockedreqs)))
+
+    # FUTURE there is potentially a need to control the wanted requirements via
+    # command arguments or via an extension hook point.
+    newreqs = localrepo.newreporequirements(repo)
+
+    noremovereqs = (repo.requirements - newreqs -
+                   supportremovedrequirements(repo))
+    if noremovereqs:
+        raise error.Abort(_('cannot upgrade repository; requirement would be '
+                            'removed: %s') % _(', ').join(sorted(noremovereqs)))
+
+    noaddreqs = (newreqs - repo.requirements -
+                 allowednewrequirements(repo))
+    if noaddreqs:
+        raise error.Abort(_('cannot upgrade repository; do not support adding '
+                            'requirement: %s') %
+                          _(', ').join(sorted(noaddreqs)))
+
+    unsupportedreqs = newreqs - supporteddestrequirements(repo)
+    if unsupportedreqs:
+        raise error.Abort(_('cannot upgrade repository; do not support '
+                            'destination requirement: %s') %
+                          _(', ').join(sorted(unsupportedreqs)))
+
+    # Find and validate all improvements that can be made.
+    alloptimizations = findoptimizations(repo)
+
+    # Apply and Validate arguments.
+    optimizations = []
+    for o in alloptimizations:
+        if o.name in optimize:
+            optimizations.append(o)
+            optimize.discard(o.name)
+
+    if optimize: # anything left is unknown
+        raise error.Abort(_('unknown optimization action requested: %s') %
+                          ', '.join(sorted(optimize)),
+                          hint=_('run without arguments to see valid '
+                                 'optimizations'))
+
+    deficiencies = finddeficiencies(repo)
+    actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
+    actions.extend(o for o in sorted(optimizations)
+                   # determineactions could have added optimisation
+                   if o not in actions)
+
+    def printrequirements():
+        ui.write(_('requirements\n'))
+        ui.write(_('   preserved: %s\n') %
+                 _(', ').join(sorted(newreqs & repo.requirements)))
+
+        if repo.requirements - newreqs:
+            ui.write(_('   removed: %s\n') %
+                     _(', ').join(sorted(repo.requirements - newreqs)))
+
+        if newreqs - repo.requirements:
+            ui.write(_('   added: %s\n') %
+                     _(', ').join(sorted(newreqs - repo.requirements)))
+
+        ui.write('\n')
+
+    def printupgradeactions():
+        for a in actions:
+            ui.write('%s\n   %s\n\n' % (a.name, a.upgrademessage))
+
+    if not run:
+        fromconfig = []
+        onlydefault = []
+
+        for d in deficiencies:
+            if d.fromconfig(repo):
+                fromconfig.append(d)
+            elif d.default:
+                onlydefault.append(d)
+
+        if fromconfig or onlydefault:
+
+            if fromconfig:
+                ui.write(_('repository lacks features recommended by '
+                           'current config options:\n\n'))
+                for i in fromconfig:
+                    ui.write('%s\n   %s\n\n' % (i.name, i.description))
+
+            if onlydefault:
+                ui.write(_('repository lacks features used by the default '
+                           'config options:\n\n'))
+                for i in onlydefault:
+                    ui.write('%s\n   %s\n\n' % (i.name, i.description))
+
+            ui.write('\n')
+        else:
+            ui.write(_('(no feature deficiencies found in existing '
+                       'repository)\n'))
+
+        ui.write(_('performing an upgrade with "--run" will make the following '
+                   'changes:\n\n'))
+
+        printrequirements()
+        printupgradeactions()
+
+        unusedoptimize = [i for i in alloptimizations if i not in actions]
+
+        if unusedoptimize:
+            ui.write(_('additional optimizations are available by specifying '
+                     '"--optimize <name>":\n\n'))
+            for i in unusedoptimize:
+                ui.write(_('%s\n   %s\n\n') % (i.name, i.description))
+        return
+
+    # Else we're in the run=true case.
+    ui.write(_('upgrade will perform the following actions:\n\n'))
+    printrequirements()
+    printupgradeactions()
+
+    upgradeactions = [a.name for a in actions]
+
+    ui.write(_('beginning upgrade...\n'))
+    with repo.wlock():
+        with repo.lock():
+            ui.write(_('repository locked and read-only\n'))
+            # Our strategy for upgrading the repository is to create a new,
+            # temporary repository, write data to it, then do a swap of the
+            # data. There are less heavyweight ways to do this, but it is easier
+            # to create a new repo object than to instantiate all the components
+            # (like the store) separately.
+            tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
+            backuppath = None
+            try:
+                ui.write(_('creating temporary repository to stage migrated '
+                           'data: %s\n') % tmppath)
+                dstrepo = localrepo.localrepository(repo.baseui,
+                                                    path=tmppath,
+                                                    create=True)
+
+                with dstrepo.wlock():
+                    with dstrepo.lock():
+                        backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
+                                                  upgradeactions)
+
+            finally:
+                ui.write(_('removing temporary repository %s\n') % tmppath)
+                repo.vfs.rmtree(tmppath, forcibly=True)
+
+                if backuppath:
+                    ui.warn(_('copy of old repository backed up at %s\n') %
+                            backuppath)
+                    ui.warn(_('the old repository will not be deleted; remove '
+                              'it to free up disk space once the upgraded '
+                              'repository is verified\n'))
--- a/mercurial/url.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/url.py	Tue Apr 18 12:24:34 2017 -0400
@@ -417,6 +417,35 @@
         else:
             return None
 
+class cookiehandler(urlreq.basehandler):
+    def __init__(self, ui):
+        self.cookiejar = None
+
+        cookiefile = ui.config('auth', 'cookiefile')
+        if not cookiefile:
+            return
+
+        cookiefile = util.expandpath(cookiefile)
+        try:
+            cookiejar = util.cookielib.MozillaCookieJar(cookiefile)
+            cookiejar.load()
+            self.cookiejar = cookiejar
+        except util.cookielib.LoadError as e:
+            ui.warn(_('(error loading cookie file %s: %s; continuing without '
+                      'cookies)\n') % (cookiefile, str(e)))
+
+    def http_request(self, request):
+        if self.cookiejar:
+            self.cookiejar.add_cookie_header(request)
+
+        return request
+
+    def https_request(self, request):
+        if self.cookiejar:
+            self.cookiejar.add_cookie_header(request)
+
+        return request
+
 handlerfuncs = []
 
 def opener(ui, authinfo=None):
@@ -450,6 +479,7 @@
     handlers.extend((httpbasicauthhandler(passmgr),
                      httpdigestauthhandler(passmgr)))
     handlers.extend([h(ui, passmgr) for h in handlerfuncs])
+    handlers.append(cookiehandler(ui))
     opener = urlreq.buildopener(*handlers)
 
     # The user agent should should *NOT* be used by servers for e.g.
--- a/mercurial/util.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/util.py	Tue Apr 18 12:24:34 2017 -0400
@@ -17,6 +17,7 @@
 
 import bz2
 import calendar
+import codecs
 import collections
 import datetime
 import errno
@@ -37,6 +38,7 @@
 import textwrap
 import time
 import traceback
+import warnings
 import zlib
 
 from . import (
@@ -48,6 +50,7 @@
     pycompat,
 )
 
+cookielib = pycompat.cookielib
 empty = pycompat.empty
 httplib = pycompat.httplib
 httpserver = pycompat.httpserver
@@ -59,13 +62,24 @@
 stdout = pycompat.stdout
 stringio = pycompat.stringio
 urlerr = pycompat.urlerr
-urlparse = pycompat.urlparse
 urlreq = pycompat.urlreq
 xmlrpclib = pycompat.xmlrpclib
 
+def isatty(fp):
+    try:
+        return fp.isatty()
+    except AttributeError:
+        return False
+
+# glibc determines buffering on first write to stdout - if we replace a TTY
+# destined stdout with a pipe destined stdout (e.g. pager), we want line
+# buffering
+if isatty(stdout):
+    stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
+
 if pycompat.osname == 'nt':
     from . import windows as platform
-    stdout = platform.winstdout(pycompat.stdout)
+    stdout = platform.winstdout(stdout)
 else:
     from . import posix as platform
 
@@ -123,7 +137,6 @@
 testpid = platform.testpid
 umask = platform.umask
 unlink = platform.unlink
-unlinkpath = platform.unlinkpath
 username = platform.username
 
 # Python compatibility
@@ -144,6 +157,31 @@
         bits |= bit
     return bits
 
+# python 2.6 still have deprecation warning enabled by default. We do not want
+# to display anything to standard user so detect if we are running test and
+# only use python deprecation warning in this case.
+_dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
+if _dowarn:
+    # explicitly unfilter our warning for python 2.7
+    #
+    # The option of setting PYTHONWARNINGS in the test runner was investigated.
+    # However, module name set through PYTHONWARNINGS was exactly matched, so
+    # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
+    # makes the whole PYTHONWARNINGS thing useless for our usecase.
+    warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
+    warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
+    warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
+
+def nouideprecwarn(msg, version, stacklevel=1):
+    """Issue an python native deprecation warning
+
+    This is a noop outside of tests, use 'ui.deprecwarn' when possible.
+    """
+    if _dowarn:
+        msg += ("\n(compatibility will be dropped after Mercurial-%s,"
+                " update your code.)") % version
+        warnings.warn(msg, DeprecationWarning, stacklevel + 1)
+
 DIGESTS = {
     'md5': hashlib.md5,
     'sha1': hashlib.sha1,
@@ -544,11 +582,11 @@
         dict.__delitem__(self, key)
         self._list.remove(key)
     def pop(self, key, *args, **kwargs):
-        dict.pop(self, key, *args, **kwargs)
         try:
             self._list.remove(key)
         except ValueError:
             pass
+        return dict.pop(self, key, *args, **kwargs)
     def keys(self):
         return self._list[:]
     def iterkeys(self):
@@ -797,7 +835,7 @@
     inname, outname = None, None
     try:
         infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
-        fp = os.fdopen(infd, 'wb')
+        fp = os.fdopen(infd, pycompat.sysstr('wb'))
         fp.write(s)
         fp.close()
         outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
@@ -943,10 +981,7 @@
     # executable version (py2exe) doesn't support __file__
     datapath = os.path.dirname(pycompat.sysexecutable)
 else:
-    datapath = os.path.dirname(__file__)
-
-if not isinstance(datapath, bytes):
-    datapath = pycompat.fsencode(datapath)
+    datapath = os.path.dirname(pycompat.fsencode(__file__))
 
 i18n.setdatapath(datapath)
 
@@ -959,7 +994,7 @@
     """
     if _hgexecutable is None:
         hg = encoding.environ.get('HG')
-        mainmod = sys.modules['__main__']
+        mainmod = sys.modules[pycompat.sysstr('__main__')]
         if hg:
             _sethgexecutable(hg)
         elif mainfrozen():
@@ -968,8 +1003,9 @@
                 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
             else:
                 _sethgexecutable(pycompat.sysexecutable)
-        elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
-            _sethgexecutable(mainmod.__file__)
+        elif (os.path.basename(
+            pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
+            _sethgexecutable(pycompat.fsencode(mainmod.__file__))
         else:
             exe = findexe('hg') or os.path.basename(sys.argv[0])
             _sethgexecutable(exe)
@@ -999,20 +1035,16 @@
     env['HG'] = hgexecutable()
     return env
 
-def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
+def system(cmd, environ=None, cwd=None, out=None):
     '''enhanced shell command execution.
     run with environment maybe modified, maybe in different dir.
 
-    if command fails and onerr is None, return status, else raise onerr
-    object as exception.
-
     if out is specified, it is assumed to be a file-like object that has a
     write() method. stdout and stderr will be redirected to out.'''
     try:
         stdout.flush()
     except Exception:
         pass
-    origcmd = cmd
     cmd = quotecommand(cmd)
     if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
                                     and sys.version_info[1] < 7):
@@ -1036,12 +1068,6 @@
             rc = proc.returncode
         if pycompat.sysplatform == 'OpenVMS' and rc & 1:
             rc = 0
-    if rc and onerr:
-        errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
-                            explainexit(rc)[0])
-        if errprefix:
-            errmsg = '%s: %s' % (errprefix, errmsg)
-        raise onerr(errmsg)
     return rc
 
 def checksignature(func):
@@ -1056,6 +1082,21 @@
 
     return check
 
+# a whilelist of known filesystems where hardlink works reliably
+_hardlinkfswhitelist = set([
+    'btrfs',
+    'ext2',
+    'ext3',
+    'ext4',
+    'hfs',
+    'jfs',
+    'reiserfs',
+    'tmpfs',
+    'ufs',
+    'xfs',
+    'zfs',
+])
+
 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
     '''copy a file, preserving mode and optionally other stat info like
     atime/mtime
@@ -1072,9 +1113,16 @@
         if checkambig:
             oldstat = checkambig and filestat(dest)
         unlink(dest)
-    # hardlinks are problematic on CIFS, quietly ignore this flag
-    # until we find a way to work around it cleanly (issue4546)
-    if False and hardlink:
+    if hardlink:
+        # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
+        # unless we are confident that dest is on a whitelisted filesystem.
+        try:
+            fstype = getfstype(os.path.dirname(dest))
+        except OSError:
+            fstype = None
+        if fstype not in _hardlinkfswhitelist:
+            hardlink = False
+    if hardlink:
         try:
             oslink(src, dest)
             return
@@ -1105,15 +1153,13 @@
     """Copy a directory tree using hardlinks if possible."""
     num = 0
 
-    if hardlink is None:
-        hardlink = (os.stat(src).st_dev ==
-                    os.stat(os.path.dirname(dst)).st_dev)
-    if hardlink:
-        topic = _('linking')
-    else:
-        topic = _('copying')
+    gettopic = lambda: hardlink and _('linking') or _('copying')
 
     if os.path.isdir(src):
+        if hardlink is None:
+            hardlink = (os.stat(src).st_dev ==
+                        os.stat(os.path.dirname(dst)).st_dev)
+        topic = gettopic()
         os.mkdir(dst)
         for name, kind in osutil.listdir(src):
             srcname = os.path.join(src, name)
@@ -1124,6 +1170,11 @@
             hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
             num += n
     else:
+        if hardlink is None:
+            hardlink = (os.stat(os.path.dirname(src)).st_dev ==
+                        os.stat(os.path.dirname(dst)).st_dev)
+        topic = gettopic()
+
         if hardlink:
             try:
                 oslink(src, dst)
@@ -1173,7 +1224,7 @@
     for n in path.replace('\\', '/').split('/'):
         if not n:
             continue
-        for c in n:
+        for c in pycompat.bytestr(n):
             if c in _winreservedchars:
                 return _("filename contains '%s', which is reserved "
                          "on Windows") % c
@@ -1191,8 +1242,13 @@
 
 if pycompat.osname == 'nt':
     checkosfilename = checkwinfilename
+    timer = time.clock
 else:
     checkosfilename = platform.checkosfilename
+    timer = time.time
+
+if safehasattr(time, "perf_counter"):
+    timer = time.perf_counter
 
 def makelock(info, pathname):
     try:
@@ -1322,7 +1378,7 @@
         seps = seps + pycompat.osaltsep
     # Protect backslashes. This gets silly very quickly.
     seps.replace('\\','\\\\')
-    pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
+    pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
     dir = os.path.normpath(root)
     result = []
     for part, sep in pattern.findall(name):
@@ -1346,6 +1402,13 @@
 
     return ''.join(result)
 
+def getfstype(dirpath):
+    '''Get the filesystem type name from a directory (best-effort)
+
+    Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
+    '''
+    return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
+
 def checknlink(testfile):
     '''check whether hardlink count reporting works properly'''
 
@@ -1596,6 +1659,26 @@
         else:
             self.close()
 
+def unlinkpath(f, ignoremissing=False):
+    """unlink and remove the directory if it is empty"""
+    if ignoremissing:
+        tryunlink(f)
+    else:
+        unlink(f)
+    # try removing directories that might now be empty
+    try:
+        removedirs(os.path.dirname(f))
+    except OSError:
+        pass
+
+def tryunlink(f):
+    """Attempt to remove a file, ignoring ENOENT errors."""
+    try:
+        unlink(f)
+    except OSError as e:
+        if e.errno != errno.ENOENT:
+            raise
+
 def makedirs(name, mode=None, notindexed=False):
     """recursive directory creation with parent mode inheritance
 
@@ -1784,7 +1867,7 @@
     # because they use the gmtime() system call which is buggy on Windows
     # for negative values.
     t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
-    s = t.strftime(format)
+    s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
     return s
 
 def shortdate(date=None):
@@ -1819,9 +1902,12 @@
 
     return None, s
 
-def strdate(string, format, defaults=[]):
+def strdate(string, format, defaults=None):
     """parse a localized time string and return a (unixtime, offset) tuple.
     if the string cannot be parsed, ValueError is raised."""
+    if defaults is None:
+        defaults = {}
+
     # NOTE: unixtime = localunixtime + offset
     offset, date = parsetimezone(string)
 
@@ -2101,12 +2187,33 @@
 
     def go(count):
         for multiplier, divisor, format in unittable:
-            if count >= divisor * multiplier:
+            if abs(count) >= divisor * multiplier:
                 return format % (count / float(divisor))
         return unittable[-1][2] % count
 
     return go
 
+def processlinerange(fromline, toline):
+    """Check that linerange <fromline>:<toline> makes sense and return a
+    0-based range.
+
+    >>> processlinerange(10, 20)
+    (9, 20)
+    >>> processlinerange(2, 1)
+    Traceback (most recent call last):
+        ...
+    ParseError: line range must be positive
+    >>> processlinerange(0, 5)
+    Traceback (most recent call last):
+        ...
+    ParseError: fromline must be strictly positive
+    """
+    if toline - fromline < 0:
+        raise error.ParseError(_("line range must be positive"))
+    if fromline < 1:
+        raise error.ParseError(_("fromline must be strictly positive"))
+    return fromline - 1, toline
+
 bytecount = unitcountfn(
     (100, 1 << 30, _('%.0f GB')),
     (10, 1 << 30, _('%.1f GB')),
@@ -2120,6 +2227,32 @@
     (1, 1, _('%.0f bytes')),
     )
 
+# Matches a single EOL which can either be a CRLF where repeated CR
+# are removed or a LF. We do not care about old Macintosh files, so a
+# stray CR is an error.
+_eolre = remod.compile(br'\r*\n')
+
+def tolf(s):
+    return _eolre.sub('\n', s)
+
+def tocrlf(s):
+    return _eolre.sub('\r\n', s)
+
+if pycompat.oslinesep == '\r\n':
+    tonativeeol = tocrlf
+    fromnativeeol = tolf
+else:
+    tonativeeol = pycompat.identity
+    fromnativeeol = pycompat.identity
+
+def escapestr(s):
+    # call underlying function of s.encode('string_escape') directly for
+    # Python 3 compatibility
+    return codecs.escape_encode(s)[0]
+
+def unescapestr(s):
+    return codecs.escape_decode(s)[0]
+
 def uirepr(s):
     # Avoid double backslash in Windows path repr()
     return repr(s).replace('\\\\', '\\')
@@ -2233,13 +2366,16 @@
     if width <= maxindent:
         # adjust for weird terminal size
         width = max(78, maxindent + 1)
-    line = line.decode(encoding.encoding, encoding.encodingmode)
-    initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
-    hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
+    line = line.decode(pycompat.sysstr(encoding.encoding),
+                    pycompat.sysstr(encoding.encodingmode))
+    initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
+                    pycompat.sysstr(encoding.encodingmode))
+    hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
+                    pycompat.sysstr(encoding.encodingmode))
     wrapper = MBTextWrapper(width=width,
                             initial_indent=initindent,
                             subsequent_indent=hangindent)
-    return wrapper.fill(line).encode(encoding.encoding)
+    return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
 
 if (pyplatform.python_implementation() == 'CPython' and
     sys.version_info < (3, 0)):
@@ -2595,7 +2731,7 @@
                   'path', 'fragment'):
             v = getattr(self, a)
             if v is not None:
-                setattr(self, a, pycompat.urlunquote(v))
+                setattr(self, a, urlreq.unquote(v))
 
     def __repr__(self):
         attrs = []
@@ -2640,6 +2776,9 @@
         >>> print url(r'file:///D:\data\hg')
         file:///D:\data\hg
         """
+        return encoding.strfromlocal(self.__bytes__())
+
+    def __bytes__(self):
         if self._localpath:
             s = self.path
             if self.scheme == 'bundle':
@@ -2687,7 +2826,7 @@
         user, passwd = self.user, self.passwd
         try:
             self.user, self.passwd = None, None
-            s = str(self)
+            s = bytes(self)
         finally:
             self.user, self.passwd = user, passwd
         if not self.user:
@@ -2742,7 +2881,7 @@
     u = url(u)
     if u.passwd:
         u.passwd = '***'
-    return str(u)
+    return bytes(u)
 
 def removeauth(u):
     '''remove all authentication information from a url string'''
@@ -2750,12 +2889,6 @@
     u.user = u.passwd = None
     return str(u)
 
-def isatty(fp):
-    try:
-        return fp.isatty()
-    except AttributeError:
-        return False
-
 timecount = unitcountfn(
     (1, 1e3, _('%.0f s')),
     (100, 1, _('%.1f s')),
@@ -2786,13 +2919,13 @@
     '''
 
     def wrapper(*args, **kwargs):
-        start = time.time()
+        start = timer()
         indent = 2
         _timenesting[0] += indent
         try:
             return func(*args, **kwargs)
         finally:
-            elapsed = time.time() - start
+            elapsed = timer() - start
             _timenesting[0] -= indent
             stderr.write('%s%s: %s\n' %
                          (' ' * _timenesting[0], func.__name__,
@@ -2839,9 +2972,9 @@
             results.append(hook(*args))
         return results
 
-def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
+def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
     '''Yields lines for a nicely formatted stacktrace.
-    Skips the 'skip' last entries.
+    Skips the 'skip' last entries, then return the last 'depth' entries.
     Each file+linenumber is formatted according to fileline.
     Each line is formatted according to line.
     If line is None, it yields:
@@ -2852,7 +2985,8 @@
     Not be used in production code but very convenient while developing.
     '''
     entries = [(fileline % (fn, ln), func)
-        for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
+        for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
+        ][-depth:]
     if entries:
         fnmax = max(len(entry[0]) for entry in entries)
         for fnln, func in entries:
@@ -2861,16 +2995,18 @@
             else:
                 yield line % (fnmax, fnln, func)
 
-def debugstacktrace(msg='stacktrace', skip=0, f=stderr, otherf=stdout):
+def debugstacktrace(msg='stacktrace', skip=0,
+                    f=stderr, otherf=stdout, depth=0):
     '''Writes a message to f (stderr) with a nicely formatted stacktrace.
-    Skips the 'skip' last entries. By default it will flush stdout first.
+    Skips the 'skip' entries closest to the call, then show 'depth' entries.
+    By default it will flush stdout first.
     It can be used everywhere and intentionally does not require an ui object.
     Not be used in production code but very convenient while developing.
     '''
     if otherf:
         otherf.flush()
-    f.write('%s at:\n' % msg)
-    for line in getstackframes(skip + 1):
+    f.write('%s at:\n' % msg.rstrip())
+    for line in getstackframes(skip + 1, depth=depth):
         f.write(line)
     f.flush()
 
@@ -2905,7 +3041,7 @@
             del dirs[base]
 
     def __iter__(self):
-        return self._dirs.iterkeys()
+        return iter(self._dirs)
 
     def __contains__(self, d):
         return d in self._dirs
@@ -3179,6 +3315,9 @@
 
         If bundle compression is supported, the class must also implement
         ``compressstream`` and `decompressorreader``.
+
+        The docstring of this method is used in the help system to tell users
+        about this engine.
         """
         return None
 
@@ -3263,6 +3402,12 @@
         return 'zlib'
 
     def bundletype(self):
+        """zlib compression using the DEFLATE algorithm.
+
+        All Mercurial clients should support this format. The compression
+        algorithm strikes a reasonable balance between compression ratio
+        and size.
+        """
         return 'gzip', 'GZ'
 
     def wireprotosupport(self):
@@ -3344,6 +3489,17 @@
         return 'bz2'
 
     def bundletype(self):
+        """An algorithm that produces smaller bundles than ``gzip``.
+
+        All Mercurial clients should support this format.
+
+        This engine will likely produce smaller bundles than ``gzip`` but
+        will be significantly slower, both during compression and
+        decompression.
+
+        If available, the ``zstd`` engine can yield similar or better
+        compression at much higher speeds.
+        """
         return 'bzip2', 'BZ'
 
     # We declare a protocol name but don't advertise by default because
@@ -3397,6 +3553,10 @@
         return 'none'
 
     def bundletype(self):
+        """No compression is performed.
+
+        Use this compression engine to explicitly disable compression.
+        """
         return 'none', 'UN'
 
     # Clients always support uncompressed payloads. Servers don't because
@@ -3443,6 +3603,17 @@
         return bool(self._module)
 
     def bundletype(self):
+        """A modern compression algorithm that is fast and highly flexible.
+
+        Only supported by Mercurial 4.1 and newer clients.
+
+        With the default settings, zstd compression is both faster and yields
+        better compression than ``gzip``. It also frequently yields better
+        compression than ``bzip2`` while operating at much higher speeds.
+
+        If this engine is available and backwards compatibility is not a
+        concern, it is likely the best available engine.
+        """
         return 'zstd', 'ZS'
 
     def wireprotosupport(self):
@@ -3541,5 +3712,35 @@
 
 compengines.register(_zstdengine())
 
+def bundlecompressiontopics():
+    """Obtains a list of available bundle compressions for use in help."""
+    # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
+    items = {}
+
+    # We need to format the docstring. So use a dummy object/type to hold it
+    # rather than mutating the original.
+    class docobject(object):
+        pass
+
+    for name in compengines:
+        engine = compengines[name]
+
+        if not engine.available():
+            continue
+
+        bt = engine.bundletype()
+        if not bt or not bt[0]:
+            continue
+
+        doc = pycompat.sysstr('``%s``\n    %s') % (
+            bt[0], engine.bundletype.__doc__)
+
+        value = docobject()
+        value.__doc__ = doc
+
+        items[bt[0]] = value
+
+    return items
+
 # convenient shortcut
 dst = debugstacktrace
--- a/mercurial/verify.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/verify.py	Tue Apr 18 12:24:34 2017 -0400
@@ -18,6 +18,7 @@
 from . import (
     error,
     revlog,
+    scmutil,
     util,
 )
 
@@ -32,21 +33,13 @@
         f = f.replace('//', '/')
     return f
 
-def _validpath(repo, path):
-    """Returns False if a path should NOT be treated as part of a repo.
-
-    For all in-core cases, this returns True, as we have no way for a
-    path to be mentioned in the history but not actually be
-    relevant. For narrow clones, this is important because many
-    filelogs will be missing, and changelog entries may mention
-    modified files that are outside the narrow scope.
-    """
-    return True
-
 class verifier(object):
-    def __init__(self, repo):
+    # The match argument is always None in hg core, but e.g. the narrowhg
+    # extension will pass in a matcher here.
+    def __init__(self, repo, match=None):
         self.repo = repo.unfiltered()
         self.ui = repo.ui
+        self.match = match or scmutil.matchall(repo)
         self.badrevs = set()
         self.errors = 0
         self.warnings = 0
@@ -170,6 +163,7 @@
     def _verifychangelog(self):
         ui = self.ui
         repo = self.repo
+        match = self.match
         cl = repo.changelog
 
         ui.status(_("checking changesets\n"))
@@ -189,7 +183,7 @@
                     mflinkrevs.setdefault(changes[0], []).append(i)
                     self.refersmf = True
                 for f in changes[3]:
-                    if _validpath(repo, f):
+                    if match(f):
                         filelinkrevs.setdefault(_normpath(f), []).append(i)
             except Exception as inst:
                 self.refersmf = True
@@ -201,6 +195,7 @@
                         progress=None):
         repo = self.repo
         ui = self.ui
+        match = self.match
         mfl = self.repo.manifestlog
         mf = mfl._revlog.dirlog(dir)
 
@@ -243,12 +238,14 @@
                     elif f == "/dev/null":  # ignore this in very old repos
                         continue
                     fullpath = dir + _normpath(f)
-                    if not _validpath(repo, fullpath):
-                        continue
                     if fl == 't':
+                        if not match.visitdir(fullpath):
+                            continue
                         subdirnodes.setdefault(fullpath + '/', {}).setdefault(
                             fn, []).append(lr)
                     else:
+                        if not match(fullpath):
+                            continue
                         filenodes.setdefault(fullpath, {}).setdefault(fn, lr)
             except Exception as inst:
                 self.exc(lr, _("reading delta %s") % short(n), inst, label)
@@ -382,12 +379,59 @@
                     else:
                         del filenodes[f][n]
 
-                # verify contents
+                # Verify contents. 4 cases to care about:
+                #
+                #   common: the most common case
+                #   rename: with a rename
+                #   meta: file content starts with b'\1\n', the metadata
+                #         header defined in filelog.py, but without a rename
+                #   ext: content stored externally
+                #
+                # More formally, their differences are shown below:
+                #
+                #                       | common | rename | meta  | ext
+                #  -------------------------------------------------------
+                #   flags()             | 0      | 0      | 0     | not 0
+                #   renamed()           | False  | True   | False | ?
+                #   rawtext[0:2]=='\1\n'| False  | True   | True  | ?
+                #
+                # "rawtext" means the raw text stored in revlog data, which
+                # could be retrieved by "revision(rev, raw=True)". "text"
+                # mentioned below is "revision(rev, raw=False)".
+                #
+                # There are 3 different lengths stored physically:
+                #  1. L1: rawsize, stored in revlog index
+                #  2. L2: len(rawtext), stored in revlog data
+                #  3. L3: len(text), stored in revlog data if flags==0, or
+                #     possibly somewhere else if flags!=0
+                #
+                # L1 should be equal to L2. L3 could be different from them.
+                # "text" may or may not affect commit hash depending on flag
+                # processors (see revlog.addflagprocessor).
+                #
+                #              | common  | rename | meta  | ext
+                # -------------------------------------------------
+                #    rawsize() | L1      | L1     | L1    | L1
+                #       size() | L1      | L2-LM  | L1(*) | L1 (?)
+                # len(rawtext) | L2      | L2     | L2    | L2
+                #    len(text) | L2      | L2     | L2    | L3
+                #  len(read()) | L2      | L2-LM  | L2-LM | L3 (?)
+                #
+                # LM:  length of metadata, depending on rawtext
+                # (*): not ideal, see comment in filelog.size
+                # (?): could be "- len(meta)" if the resolved content has
+                #      rename metadata
+                #
+                # Checks needed to be done:
+                #  1. length check: L1 == L2, in all cases.
+                #  2. hash check: depending on flag processor, we may need to
+                #     use either "text" (external), or "rawtext" (in revlog).
                 try:
                     l = len(fl.read(n))
                     rp = fl.renamed(n)
                     if l != fl.size(i):
-                        if len(fl.revision(n)) != fl.size(i):
+                        # the "L1 == L2" check
+                        if len(fl.revision(n, raw=True)) != fl.rawsize(i):
                             self.err(lr, _("unpacked size is %s, %s expected") %
                                      (l, fl.size(i)), f)
                 except error.CensoredNodeError:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/vfs.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,637 @@
+# vfs.py - Mercurial 'vfs' classes
+#
+#  Copyright Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import contextlib
+import errno
+import os
+import shutil
+import stat
+import tempfile
+import threading
+
+from .i18n import _
+from . import (
+    error,
+    osutil,
+    pathutil,
+    pycompat,
+    util,
+)
+
+class abstractvfs(object):
+    """Abstract base class; cannot be instantiated"""
+
+    def __init__(self, *args, **kwargs):
+        '''Prevent instantiation; don't call this from subclasses.'''
+        raise NotImplementedError('attempted instantiating ' + str(type(self)))
+
+    def tryread(self, path):
+        '''gracefully return an empty string for missing files'''
+        try:
+            return self.read(path)
+        except IOError as inst:
+            if inst.errno != errno.ENOENT:
+                raise
+        return ""
+
+    def tryreadlines(self, path, mode='rb'):
+        '''gracefully return an empty array for missing files'''
+        try:
+            return self.readlines(path, mode=mode)
+        except IOError as inst:
+            if inst.errno != errno.ENOENT:
+                raise
+        return []
+
+    @util.propertycache
+    def open(self):
+        '''Open ``path`` file, which is relative to vfs root.
+
+        Newly created directories are marked as "not to be indexed by
+        the content indexing service", if ``notindexed`` is specified
+        for "write" mode access.
+        '''
+        return self.__call__
+
+    def read(self, path):
+        with self(path, 'rb') as fp:
+            return fp.read()
+
+    def readlines(self, path, mode='rb'):
+        with self(path, mode=mode) as fp:
+            return fp.readlines()
+
+    def write(self, path, data, backgroundclose=False):
+        with self(path, 'wb', backgroundclose=backgroundclose) as fp:
+            return fp.write(data)
+
+    def writelines(self, path, data, mode='wb', notindexed=False):
+        with self(path, mode=mode, notindexed=notindexed) as fp:
+            return fp.writelines(data)
+
+    def append(self, path, data):
+        with self(path, 'ab') as fp:
+            return fp.write(data)
+
+    def basename(self, path):
+        """return base element of a path (as os.path.basename would do)
+
+        This exists to allow handling of strange encoding if needed."""
+        return os.path.basename(path)
+
+    def chmod(self, path, mode):
+        return os.chmod(self.join(path), mode)
+
+    def dirname(self, path):
+        """return dirname element of a path (as os.path.dirname would do)
+
+        This exists to allow handling of strange encoding if needed."""
+        return os.path.dirname(path)
+
+    def exists(self, path=None):
+        return os.path.exists(self.join(path))
+
+    def fstat(self, fp):
+        return util.fstat(fp)
+
+    def isdir(self, path=None):
+        return os.path.isdir(self.join(path))
+
+    def isfile(self, path=None):
+        return os.path.isfile(self.join(path))
+
+    def islink(self, path=None):
+        return os.path.islink(self.join(path))
+
+    def isfileorlink(self, path=None):
+        '''return whether path is a regular file or a symlink
+
+        Unlike isfile, this doesn't follow symlinks.'''
+        try:
+            st = self.lstat(path)
+        except OSError:
+            return False
+        mode = st.st_mode
+        return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
+
+    def reljoin(self, *paths):
+        """join various elements of a path together (as os.path.join would do)
+
+        The vfs base is not injected so that path stay relative. This exists
+        to allow handling of strange encoding if needed."""
+        return os.path.join(*paths)
+
+    def split(self, path):
+        """split top-most element of a path (as os.path.split would do)
+
+        This exists to allow handling of strange encoding if needed."""
+        return os.path.split(path)
+
+    def lexists(self, path=None):
+        return os.path.lexists(self.join(path))
+
+    def lstat(self, path=None):
+        return os.lstat(self.join(path))
+
+    def listdir(self, path=None):
+        return os.listdir(self.join(path))
+
+    def makedir(self, path=None, notindexed=True):
+        return util.makedir(self.join(path), notindexed)
+
+    def makedirs(self, path=None, mode=None):
+        return util.makedirs(self.join(path), mode)
+
+    def makelock(self, info, path):
+        return util.makelock(info, self.join(path))
+
+    def mkdir(self, path=None):
+        return os.mkdir(self.join(path))
+
+    def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
+        fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
+                                    dir=self.join(dir), text=text)
+        dname, fname = util.split(name)
+        if dir:
+            return fd, os.path.join(dir, fname)
+        else:
+            return fd, fname
+
+    def readdir(self, path=None, stat=None, skip=None):
+        return osutil.listdir(self.join(path), stat, skip)
+
+    def readlock(self, path):
+        return util.readlock(self.join(path))
+
+    def rename(self, src, dst, checkambig=False):
+        """Rename from src to dst
+
+        checkambig argument is used with util.filestat, and is useful
+        only if destination file is guarded by any lock
+        (e.g. repo.lock or repo.wlock).
+        """
+        dstpath = self.join(dst)
+        oldstat = checkambig and util.filestat(dstpath)
+        if oldstat and oldstat.stat:
+            ret = util.rename(self.join(src), dstpath)
+            newstat = util.filestat(dstpath)
+            if newstat.isambig(oldstat):
+                # stat of renamed file is ambiguous to original one
+                newstat.avoidambig(dstpath, oldstat)
+            return ret
+        return util.rename(self.join(src), dstpath)
+
+    def readlink(self, path):
+        return os.readlink(self.join(path))
+
+    def removedirs(self, path=None):
+        """Remove a leaf directory and all empty intermediate ones
+        """
+        return util.removedirs(self.join(path))
+
+    def rmtree(self, path=None, ignore_errors=False, forcibly=False):
+        """Remove a directory tree recursively
+
+        If ``forcibly``, this tries to remove READ-ONLY files, too.
+        """
+        if forcibly:
+            def onerror(function, path, excinfo):
+                if function is not os.remove:
+                    raise
+                # read-only files cannot be unlinked under Windows
+                s = os.stat(path)
+                if (s.st_mode & stat.S_IWRITE) != 0:
+                    raise
+                os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
+                os.remove(path)
+        else:
+            onerror = None
+        return shutil.rmtree(self.join(path),
+                             ignore_errors=ignore_errors, onerror=onerror)
+
+    def setflags(self, path, l, x):
+        return util.setflags(self.join(path), l, x)
+
+    def stat(self, path=None):
+        return os.stat(self.join(path))
+
+    def unlink(self, path=None):
+        return util.unlink(self.join(path))
+
+    def tryunlink(self, path=None):
+        """Attempt to remove a file, ignoring missing file errors."""
+        util.tryunlink(self.join(path))
+
+    def unlinkpath(self, path=None, ignoremissing=False):
+        return util.unlinkpath(self.join(path), ignoremissing=ignoremissing)
+
+    def utime(self, path=None, t=None):
+        return os.utime(self.join(path), t)
+
+    def walk(self, path=None, onerror=None):
+        """Yield (dirpath, dirs, files) tuple for each directories under path
+
+        ``dirpath`` is relative one from the root of this vfs. This
+        uses ``os.sep`` as path separator, even you specify POSIX
+        style ``path``.
+
+        "The root of this vfs" is represented as empty ``dirpath``.
+        """
+        root = os.path.normpath(self.join(None))
+        # when dirpath == root, dirpath[prefixlen:] becomes empty
+        # because len(dirpath) < prefixlen.
+        prefixlen = len(pathutil.normasprefix(root))
+        for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
+            yield (dirpath[prefixlen:], dirs, files)
+
+    @contextlib.contextmanager
+    def backgroundclosing(self, ui, expectedcount=-1):
+        """Allow files to be closed asynchronously.
+
+        When this context manager is active, ``backgroundclose`` can be passed
+        to ``__call__``/``open`` to result in the file possibly being closed
+        asynchronously, on a background thread.
+        """
+        # This is an arbitrary restriction and could be changed if we ever
+        # have a use case.
+        vfs = getattr(self, 'vfs', self)
+        if getattr(vfs, '_backgroundfilecloser', None):
+            raise error.Abort(
+                _('can only have 1 active background file closer'))
+
+        with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
+            try:
+                vfs._backgroundfilecloser = bfc
+                yield bfc
+            finally:
+                vfs._backgroundfilecloser = None
+
+class vfs(abstractvfs):
+    '''Operate files relative to a base directory
+
+    This class is used to hide the details of COW semantics and
+    remote file access from higher level code.
+    '''
+    def __init__(self, base, audit=True, expandpath=False, realpath=False):
+        if expandpath:
+            base = util.expandpath(base)
+        if realpath:
+            base = os.path.realpath(base)
+        self.base = base
+        self.mustaudit = audit
+        self.createmode = None
+        self._trustnlink = None
+
+    @property
+    def mustaudit(self):
+        return self._audit
+
+    @mustaudit.setter
+    def mustaudit(self, onoff):
+        self._audit = onoff
+        if onoff:
+            self.audit = pathutil.pathauditor(self.base)
+        else:
+            self.audit = util.always
+
+    @util.propertycache
+    def _cansymlink(self):
+        return util.checklink(self.base)
+
+    @util.propertycache
+    def _chmod(self):
+        return util.checkexec(self.base)
+
+    def _fixfilemode(self, name):
+        if self.createmode is None or not self._chmod:
+            return
+        os.chmod(name, self.createmode & 0o666)
+
+    def __call__(self, path, mode="r", text=False, atomictemp=False,
+                 notindexed=False, backgroundclose=False, checkambig=False):
+        '''Open ``path`` file, which is relative to vfs root.
+
+        Newly created directories are marked as "not to be indexed by
+        the content indexing service", if ``notindexed`` is specified
+        for "write" mode access.
+
+        If ``backgroundclose`` is passed, the file may be closed asynchronously.
+        It can only be used if the ``self.backgroundclosing()`` context manager
+        is active. This should only be specified if the following criteria hold:
+
+        1. There is a potential for writing thousands of files. Unless you
+           are writing thousands of files, the performance benefits of
+           asynchronously closing files is not realized.
+        2. Files are opened exactly once for the ``backgroundclosing``
+           active duration and are therefore free of race conditions between
+           closing a file on a background thread and reopening it. (If the
+           file were opened multiple times, there could be unflushed data
+           because the original file handle hasn't been flushed/closed yet.)
+
+        ``checkambig`` argument is passed to atomictemplfile (valid
+        only for writing), and is useful only if target file is
+        guarded by any lock (e.g. repo.lock or repo.wlock).
+        '''
+        if self._audit:
+            r = util.checkosfilename(path)
+            if r:
+                raise error.Abort("%s: %r" % (r, path))
+        self.audit(path)
+        f = self.join(path)
+
+        if not text and "b" not in mode:
+            mode += "b" # for that other OS
+
+        nlink = -1
+        if mode not in ('r', 'rb'):
+            dirname, basename = util.split(f)
+            # If basename is empty, then the path is malformed because it points
+            # to a directory. Let the posixfile() call below raise IOError.
+            if basename:
+                if atomictemp:
+                    util.makedirs(dirname, self.createmode, notindexed)
+                    return util.atomictempfile(f, mode, self.createmode,
+                                               checkambig=checkambig)
+                try:
+                    if 'w' in mode:
+                        util.unlink(f)
+                        nlink = 0
+                    else:
+                        # nlinks() may behave differently for files on Windows
+                        # shares if the file is open.
+                        with util.posixfile(f):
+                            nlink = util.nlinks(f)
+                            if nlink < 1:
+                                nlink = 2 # force mktempcopy (issue1922)
+                except (OSError, IOError) as e:
+                    if e.errno != errno.ENOENT:
+                        raise
+                    nlink = 0
+                    util.makedirs(dirname, self.createmode, notindexed)
+                if nlink > 0:
+                    if self._trustnlink is None:
+                        self._trustnlink = nlink > 1 or util.checknlink(f)
+                    if nlink > 1 or not self._trustnlink:
+                        util.rename(util.mktempcopy(f), f)
+        fp = util.posixfile(f, mode)
+        if nlink == 0:
+            self._fixfilemode(f)
+
+        if checkambig:
+            if mode in ('r', 'rb'):
+                raise error.Abort(_('implementation error: mode %s is not'
+                                    ' valid for checkambig=True') % mode)
+            fp = checkambigatclosing(fp)
+
+        if backgroundclose:
+            if not self._backgroundfilecloser:
+                raise error.Abort(_('backgroundclose can only be used when a '
+                                  'backgroundclosing context manager is active')
+                                  )
+
+            fp = delayclosedfile(fp, self._backgroundfilecloser)
+
+        return fp
+
+    def symlink(self, src, dst):
+        self.audit(dst)
+        linkname = self.join(dst)
+        util.tryunlink(linkname)
+
+        util.makedirs(os.path.dirname(linkname), self.createmode)
+
+        if self._cansymlink:
+            try:
+                os.symlink(src, linkname)
+            except OSError as err:
+                raise OSError(err.errno, _('could not symlink to %r: %s') %
+                              (src, err.strerror), linkname)
+        else:
+            self.write(dst, src)
+
+    def join(self, path, *insidef):
+        if path:
+            return os.path.join(self.base, path, *insidef)
+        else:
+            return self.base
+
+opener = vfs
+
+class auditvfs(object):
+    def __init__(self, vfs):
+        self.vfs = vfs
+
+    @property
+    def mustaudit(self):
+        return self.vfs.mustaudit
+
+    @mustaudit.setter
+    def mustaudit(self, onoff):
+        self.vfs.mustaudit = onoff
+
+    @property
+    def options(self):
+        return self.vfs.options
+
+    @options.setter
+    def options(self, value):
+        self.vfs.options = value
+
+class filtervfs(abstractvfs, auditvfs):
+    '''Wrapper vfs for filtering filenames with a function.'''
+
+    def __init__(self, vfs, filter):
+        auditvfs.__init__(self, vfs)
+        self._filter = filter
+
+    def __call__(self, path, *args, **kwargs):
+        return self.vfs(self._filter(path), *args, **kwargs)
+
+    def join(self, path, *insidef):
+        if path:
+            return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
+        else:
+            return self.vfs.join(path)
+
+filteropener = filtervfs
+
+class readonlyvfs(abstractvfs, auditvfs):
+    '''Wrapper vfs preventing any writing.'''
+
+    def __init__(self, vfs):
+        auditvfs.__init__(self, vfs)
+
+    def __call__(self, path, mode='r', *args, **kw):
+        if mode not in ('r', 'rb'):
+            raise error.Abort(_('this vfs is read only'))
+        return self.vfs(path, mode, *args, **kw)
+
+    def join(self, path, *insidef):
+        return self.vfs.join(path, *insidef)
+
+class closewrapbase(object):
+    """Base class of wrapper, which hooks closing
+
+    Do not instantiate outside of the vfs layer.
+    """
+    def __init__(self, fh):
+        object.__setattr__(self, r'_origfh', fh)
+
+    def __getattr__(self, attr):
+        return getattr(self._origfh, attr)
+
+    def __setattr__(self, attr, value):
+        return setattr(self._origfh, attr, value)
+
+    def __delattr__(self, attr):
+        return delattr(self._origfh, attr)
+
+    def __enter__(self):
+        return self._origfh.__enter__()
+
+    def __exit__(self, exc_type, exc_value, exc_tb):
+        raise NotImplementedError('attempted instantiating ' + str(type(self)))
+
+    def close(self):
+        raise NotImplementedError('attempted instantiating ' + str(type(self)))
+
+class delayclosedfile(closewrapbase):
+    """Proxy for a file object whose close is delayed.
+
+    Do not instantiate outside of the vfs layer.
+    """
+    def __init__(self, fh, closer):
+        super(delayclosedfile, self).__init__(fh)
+        object.__setattr__(self, r'_closer', closer)
+
+    def __exit__(self, exc_type, exc_value, exc_tb):
+        self._closer.close(self._origfh)
+
+    def close(self):
+        self._closer.close(self._origfh)
+
+class backgroundfilecloser(object):
+    """Coordinates background closing of file handles on multiple threads."""
+    def __init__(self, ui, expectedcount=-1):
+        self._running = False
+        self._entered = False
+        self._threads = []
+        self._threadexception = None
+
+        # Only Windows/NTFS has slow file closing. So only enable by default
+        # on that platform. But allow to be enabled elsewhere for testing.
+        defaultenabled = pycompat.osname == 'nt'
+        enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
+
+        if not enabled:
+            return
+
+        # There is overhead to starting and stopping the background threads.
+        # Don't do background processing unless the file count is large enough
+        # to justify it.
+        minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
+                                    2048)
+        # FUTURE dynamically start background threads after minfilecount closes.
+        # (We don't currently have any callers that don't know their file count)
+        if expectedcount > 0 and expectedcount < minfilecount:
+            return
+
+        # Windows defaults to a limit of 512 open files. A buffer of 128
+        # should give us enough headway.
+        maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
+        threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
+
+        ui.debug('starting %d threads for background file closing\n' %
+                 threadcount)
+
+        self._queue = util.queue(maxsize=maxqueue)
+        self._running = True
+
+        for i in range(threadcount):
+            t = threading.Thread(target=self._worker, name='backgroundcloser')
+            self._threads.append(t)
+            t.start()
+
+    def __enter__(self):
+        self._entered = True
+        return self
+
+    def __exit__(self, exc_type, exc_value, exc_tb):
+        self._running = False
+
+        # Wait for threads to finish closing so open files don't linger for
+        # longer than lifetime of context manager.
+        for t in self._threads:
+            t.join()
+
+    def _worker(self):
+        """Main routine for worker thread."""
+        while True:
+            try:
+                fh = self._queue.get(block=True, timeout=0.100)
+                # Need to catch or the thread will terminate and
+                # we could orphan file descriptors.
+                try:
+                    fh.close()
+                except Exception as e:
+                    # Stash so can re-raise from main thread later.
+                    self._threadexception = e
+            except util.empty:
+                if not self._running:
+                    break
+
+    def close(self, fh):
+        """Schedule a file for closing."""
+        if not self._entered:
+            raise error.Abort(_('can only call close() when context manager '
+                              'active'))
+
+        # If a background thread encountered an exception, raise now so we fail
+        # fast. Otherwise we may potentially go on for minutes until the error
+        # is acted on.
+        if self._threadexception:
+            e = self._threadexception
+            self._threadexception = None
+            raise e
+
+        # If we're not actively running, close synchronously.
+        if not self._running:
+            fh.close()
+            return
+
+        self._queue.put(fh, block=True, timeout=None)
+
+class checkambigatclosing(closewrapbase):
+    """Proxy for a file object, to avoid ambiguity of file stat
+
+    See also util.filestat for detail about "ambiguity of file stat".
+
+    This proxy is useful only if the target file is guarded by any
+    lock (e.g. repo.lock or repo.wlock)
+
+    Do not instantiate outside of the vfs layer.
+    """
+    def __init__(self, fh):
+        super(checkambigatclosing, self).__init__(fh)
+        object.__setattr__(self, r'_oldstat', util.filestat(fh.name))
+
+    def _checkambig(self):
+        oldstat = self._oldstat
+        if oldstat.stat:
+            newstat = util.filestat(self._origfh.name)
+            if newstat.isambig(oldstat):
+                # stat of changed file is ambiguous to original one
+                newstat.avoidambig(self._origfh.name, oldstat)
+
+    def __exit__(self, exc_type, exc_value, exc_tb):
+        self._origfh.__exit__(exc_type, exc_value, exc_tb)
+        self._checkambig()
+
+    def close(self):
+        self._origfh.close()
+        self._checkambig()
--- a/mercurial/windows.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/windows.py	Tue Apr 18 12:24:34 2017 -0400
@@ -61,8 +61,14 @@
     OPWRITE = 2
 
     def __init__(self, fp):
-        object.__setattr__(self, '_fp', fp)
-        object.__setattr__(self, '_lastop', 0)
+        object.__setattr__(self, r'_fp', fp)
+        object.__setattr__(self, r'_lastop', 0)
+
+    def __enter__(self):
+        return self._fp.__enter__()
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self._fp.__exit__(exc_type, exc_val, exc_tb)
 
     def __getattr__(self, name):
         return getattr(self._fp, name)
@@ -74,42 +80,42 @@
         self._fp.seek(0, os.SEEK_CUR)
 
     def seek(self, *args, **kwargs):
-        object.__setattr__(self, '_lastop', self.OPNONE)
+        object.__setattr__(self, r'_lastop', self.OPNONE)
         return self._fp.seek(*args, **kwargs)
 
     def write(self, d):
         if self._lastop == self.OPREAD:
             self._noopseek()
 
-        object.__setattr__(self, '_lastop', self.OPWRITE)
+        object.__setattr__(self, r'_lastop', self.OPWRITE)
         return self._fp.write(d)
 
     def writelines(self, *args, **kwargs):
         if self._lastop == self.OPREAD:
             self._noopeseek()
 
-        object.__setattr__(self, '_lastop', self.OPWRITE)
+        object.__setattr__(self, r'_lastop', self.OPWRITE)
         return self._fp.writelines(*args, **kwargs)
 
     def read(self, *args, **kwargs):
         if self._lastop == self.OPWRITE:
             self._noopseek()
 
-        object.__setattr__(self, '_lastop', self.OPREAD)
+        object.__setattr__(self, r'_lastop', self.OPREAD)
         return self._fp.read(*args, **kwargs)
 
     def readline(self, *args, **kwargs):
         if self._lastop == self.OPWRITE:
             self._noopseek()
 
-        object.__setattr__(self, '_lastop', self.OPREAD)
+        object.__setattr__(self, r'_lastop', self.OPREAD)
         return self._fp.readline(*args, **kwargs)
 
     def readlines(self, *args, **kwargs):
         if self._lastop == self.OPWRITE:
             self._noopseek()
 
-        object.__setattr__(self, '_lastop', self.OPREAD)
+        object.__setattr__(self, r'_lastop', self.OPREAD)
         return self._fp.readlines(*args, **kwargs)
 
 def posixfile(name, mode='r', buffering=-1):
@@ -385,19 +391,6 @@
             break
         head, tail = os.path.split(head)
 
-def unlinkpath(f, ignoremissing=False):
-    """unlink and remove the directory if it is empty"""
-    try:
-        unlink(f)
-    except OSError as e:
-        if not (ignoremissing and e.errno == errno.ENOENT):
-            raise
-    # try removing directories that might now be empty
-    try:
-        removedirs(os.path.dirname(f))
-    except OSError:
-        pass
-
 def rename(src, dst):
     '''atomically rename file src to dst, replacing dst if it exists'''
     try:
@@ -442,7 +435,7 @@
         try:
             val = winreg.QueryValueEx(winreg.OpenKey(s, key), valname)[0]
             # never let a Unicode string escape into the wild
-            return encoding.tolocal(val.encode('UTF-8'))
+            return encoding.unitolocal(val)
         except EnvironmentError:
             pass
 
--- a/mercurial/wireproto.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/wireproto.py	Tue Apr 18 12:24:34 2017 -0400
@@ -26,6 +26,7 @@
     exchange,
     peer,
     pushkey as pushkeymod,
+    pycompat,
     streamclone,
     util,
 )
@@ -735,7 +736,7 @@
     depending on the request. e.g. you could advertise URLs for the closest
     data center given the client's IP address.
     """
-    return repo.opener.tryread('clonebundles.manifest')
+    return repo.vfs.tryread('clonebundles.manifest')
 
 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
                  'known', 'getbundle', 'unbundlehash', 'batch']
@@ -839,7 +840,6 @@
             raise error.Abort(bundle2requiredmain,
                               hint=bundle2requiredhint)
 
-    #chunks = exchange.getbundlechunks(repo, 'serve', **opts)
     try:
         chunks = exchange.getbundlechunks(repo, 'serve', **opts)
     except error.Abort as exc:
@@ -900,7 +900,7 @@
 def pushkey(repo, proto, namespace, key, old, new):
     # compatibility with pre-1.8 clients which were accidentally
     # sending raw binary nodes rather than utf-8-encoded hex
-    if len(new) == 20 and new.encode('string-escape') != new:
+    if len(new) == 20 and util.escapestr(new) != new:
         # looks like it could be a binary node
         try:
             new.decode('utf-8')
@@ -961,7 +961,7 @@
 
         # write bundle data to temporary file because it can be big
         fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
-        fp = os.fdopen(fd, 'wb+')
+        fp = os.fdopen(fd, pycompat.sysstr('wb+'))
         r = 0
         try:
             proto.getfile(fp)
--- a/mercurial/worker.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/mercurial/worker.py	Tue Apr 18 12:24:34 2017 -0400
@@ -133,6 +133,7 @@
         if problem[0]:
             killworkers()
     oldchldhandler = signal.signal(signal.SIGCHLD, sigchldhandler)
+    ui.flush()
     for pargs in partition(args, workers):
         pid = os.fork()
         if pid == 0:
@@ -143,28 +144,30 @@
                 os.close(rfd)
                 for i, item in func(*(staticargs + (pargs,))):
                     os.write(wfd, '%d %s\n' % (i, item))
+                return 0
 
             # make sure we use os._exit in all code paths. otherwise the worker
             # may do some clean-ups which could cause surprises like deadlock.
             # see sshpeer.cleanup for example.
+            ret = 0
             try:
                 try:
-                    scmutil.callcatch(ui, workerfunc)
+                    ret = scmutil.callcatch(ui, workerfunc)
                 finally:
                     ui.flush()
             except KeyboardInterrupt:
                 os._exit(255)
             except: # never return, therefore no re-raises
                 try:
-                    ui.traceback()
+                    ui.traceback(force=True)
                     ui.flush()
                 finally:
                     os._exit(255)
             else:
-                os._exit(0)
+                os._exit(ret & 255)
         pids.add(pid)
     os.close(wfd)
-    fp = os.fdopen(rfd, 'rb', 0)
+    fp = os.fdopen(rfd, pycompat.sysstr('rb'), 0)
     def cleanup():
         signal.signal(signal.SIGINT, oldhandler)
         waitforworkers()
--- a/setup.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/setup.py	Tue Apr 18 12:24:34 2017 -0400
@@ -63,7 +63,10 @@
 import shutil
 import tempfile
 from distutils import log
-if 'FORCE_SETUPTOOLS' in os.environ:
+# We have issues with setuptools on some platforms and builders. Until
+# those are resolved, setuptools is opt-in except for platforms where
+# we don't have issues.
+if os.name == 'nt' or 'FORCE_SETUPTOOLS' in os.environ:
     from setuptools import setup
 else:
     from distutils.core import setup
@@ -91,17 +94,13 @@
     # We remove hg.bat if we are able to build hg.exe.
     scripts.append('contrib/win32/hg.bat')
 
-# simplified version of distutils.ccompiler.CCompiler.has_function
-# that actually removes its temporary files.
-def hasfunction(cc, funcname):
+def cancompile(cc, code):
     tmpdir = tempfile.mkdtemp(prefix='hg-install-')
     devnull = oldstderr = None
     try:
-        fname = os.path.join(tmpdir, 'funcname.c')
+        fname = os.path.join(tmpdir, 'testcomp.c')
         f = open(fname, 'w')
-        f.write('int main(void) {\n')
-        f.write('    %s();\n' % funcname)
-        f.write('}\n')
+        f.write(code)
         f.close()
         # Redirect stderr to /dev/null to hide any error messages
         # from the compiler.
@@ -122,6 +121,16 @@
             devnull.close()
         shutil.rmtree(tmpdir)
 
+# simplified version of distutils.ccompiler.CCompiler.has_function
+# that actually removes its temporary files.
+def hasfunction(cc, funcname):
+    code = 'int main(void) { %s(); }\n' % funcname
+    return cancompile(cc, code)
+
+def hasheader(cc, headername):
+    code = '#include <%s>\nint main(void) { return 0; }\n' % headername
+    return cancompile(cc, code)
+
 # py2exe needs to be installed to work
 try:
     import py2exe
@@ -367,7 +376,7 @@
             modulepolicy = 'c'
         with open("mercurial/__modulepolicy__.py", "w") as f:
             f.write('# this file is autogenerated by setup.py\n')
-            f.write('modulepolicy = "%s"\n' % modulepolicy)
+            f.write('modulepolicy = b"%s"\n' % modulepolicy)
 
         build_py.run(self)
 
@@ -581,11 +590,26 @@
 osutil_cflags = []
 osutil_ldflags = []
 
-# platform specific macros: HAVE_SETPROCTITLE
-for plat, func in [(re.compile('freebsd'), 'setproctitle')]:
-    if plat.search(sys.platform) and hasfunction(new_compiler(), func):
+# platform specific macros
+for plat, func in [('bsd', 'setproctitle')]:
+    if re.search(plat, sys.platform) and hasfunction(new_compiler(), func):
         osutil_cflags.append('-DHAVE_%s' % func.upper())
 
+for plat, macro, code in [
+    ('bsd|darwin', 'BSD_STATFS', '''
+     #include <sys/param.h>
+     #include <sys/mount.h>
+     int main() { struct statfs s; return sizeof(s.f_fstypename); }
+     '''),
+    ('linux', 'LINUX_STATFS', '''
+     #include <linux/magic.h>
+     #include <sys/vfs.h>
+     int main() { struct statfs s; return sizeof(s.f_type); }
+     '''),
+]:
+    if re.search(plat, sys.platform) and cancompile(new_compiler(), code):
+        osutil_cflags.append('-DHAVE_%s' % macro)
+
 if sys.platform == 'darwin':
     osutil_ldflags += ['-framework', 'ApplicationServices']
 
@@ -658,7 +682,14 @@
             packagedata['mercurial'].append(f)
 
 datafiles = []
-setupversion = version
+
+# distutils expects version to be str/unicode. Converting it to
+# unicode on Python 2 still works because it won't contain any
+# non-ascii bytes and will be implicitly converted back to bytes
+# when operated on.
+assert isinstance(version, bytes)
+setupversion = version.decode('ascii')
+
 extra = {}
 
 if py2exeloaded:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/badserverext.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,270 @@
+# badserverext.py - Extension making servers behave badly
+#
+# Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# no-check-code
+
+"""Extension to make servers behave badly.
+
+This extension is useful for testing Mercurial behavior when various network
+events occur.
+
+Various config options in the [badserver] section influence behavior:
+
+closebeforeaccept
+   If true, close() the server socket when a new connection arrives before
+   accept() is called. The server will then exit.
+
+closeafteraccept
+   If true, the server will close() the client socket immediately after
+   accept().
+
+closeafterrecvbytes
+   If defined, close the client socket after receiving this many bytes.
+
+closeaftersendbytes
+   If defined, close the client socket after sending this many bytes.
+"""
+
+from __future__ import absolute_import
+
+import socket
+
+from mercurial.hgweb import (
+    server,
+)
+
+# We can't adjust __class__ on a socket instance. So we define a proxy type.
+class socketproxy(object):
+    __slots__ = (
+        '_orig',
+        '_logfp',
+        '_closeafterrecvbytes',
+        '_closeaftersendbytes',
+    )
+
+    def __init__(self, obj, logfp, closeafterrecvbytes=0,
+                 closeaftersendbytes=0):
+        object.__setattr__(self, '_orig', obj)
+        object.__setattr__(self, '_logfp', logfp)
+        object.__setattr__(self, '_closeafterrecvbytes', closeafterrecvbytes)
+        object.__setattr__(self, '_closeaftersendbytes', closeaftersendbytes)
+
+    def __getattribute__(self, name):
+        if name in ('makefile',):
+            return object.__getattribute__(self, name)
+
+        return getattr(object.__getattribute__(self, '_orig'), name)
+
+    def __delattr__(self, name):
+        delattr(object.__getattribute__(self, '_orig'), name)
+
+    def __setattr__(self, name, value):
+        setattr(object.__getattribute__(self, '_orig'), name, value)
+
+    def makefile(self, mode, bufsize):
+        f = object.__getattribute__(self, '_orig').makefile(mode, bufsize)
+
+        logfp = object.__getattribute__(self, '_logfp')
+        closeafterrecvbytes = object.__getattribute__(self,
+                                                      '_closeafterrecvbytes')
+        closeaftersendbytes = object.__getattribute__(self,
+                                                      '_closeaftersendbytes')
+
+        return fileobjectproxy(f, logfp,
+                               closeafterrecvbytes=closeafterrecvbytes,
+                               closeaftersendbytes=closeaftersendbytes)
+
+# We can't adjust __class__ on socket._fileobject, so define a proxy.
+class fileobjectproxy(object):
+    __slots__ = (
+        '_orig',
+        '_logfp',
+        '_closeafterrecvbytes',
+        '_closeaftersendbytes',
+    )
+
+    def __init__(self, obj, logfp, closeafterrecvbytes=0,
+                 closeaftersendbytes=0):
+        object.__setattr__(self, '_orig', obj)
+        object.__setattr__(self, '_logfp', logfp)
+        object.__setattr__(self, '_closeafterrecvbytes', closeafterrecvbytes)
+        object.__setattr__(self, '_closeaftersendbytes', closeaftersendbytes)
+
+    def __getattribute__(self, name):
+        if name in ('read', 'readline', 'write', '_writelog'):
+            return object.__getattribute__(self, name)
+
+        return getattr(object.__getattribute__(self, '_orig'), name)
+
+    def __delattr__(self, name):
+        delattr(object.__getattribute__(self, '_orig'), name)
+
+    def __setattr__(self, name, value):
+        setattr(object.__getattribute__(self, '_orig'), name, value)
+
+    def _writelog(self, msg):
+        msg = msg.replace('\r', '\\r').replace('\n', '\\n')
+
+        object.__getattribute__(self, '_logfp').write(msg)
+        object.__getattribute__(self, '_logfp').write('\n')
+        object.__getattribute__(self, '_logfp').flush()
+
+    def read(self, size=-1):
+        remaining = object.__getattribute__(self, '_closeafterrecvbytes')
+
+        # No read limit. Call original function.
+        if not remaining:
+            result = object.__getattribute__(self, '_orig').read(size)
+            self._writelog('read(%d) -> (%d) (%s) %s' % (size,
+                                                           len(result),
+                                                           result))
+            return result
+
+        origsize = size
+
+        if size < 0:
+            size = remaining
+        else:
+            size = min(remaining, size)
+
+        result = object.__getattribute__(self, '_orig').read(size)
+        remaining -= len(result)
+
+        self._writelog('read(%d from %d) -> (%d) %s' % (
+            size, origsize, len(result), result))
+
+        object.__setattr__(self, '_closeafterrecvbytes', remaining)
+
+        if remaining <= 0:
+            self._writelog('read limit reached, closing socket')
+            self._sock.close()
+            # This is the easiest way to abort the current request.
+            raise Exception('connection closed after receiving N bytes')
+
+        return result
+
+    def readline(self, size=-1):
+        remaining = object.__getattribute__(self, '_closeafterrecvbytes')
+
+        # No read limit. Call original function.
+        if not remaining:
+            result = object.__getattribute__(self, '_orig').readline(size)
+            self._writelog('readline(%d) -> (%d) %s' % (
+                size, len(result), result))
+            return result
+
+        origsize = size
+
+        if size < 0:
+            size = remaining
+        else:
+            size = min(remaining, size)
+
+        result = object.__getattribute__(self, '_orig').readline(size)
+        remaining -= len(result)
+
+        self._writelog('readline(%d from %d) -> (%d) %s' % (
+            size, origsize, len(result), result))
+
+        object.__setattr__(self, '_closeafterrecvbytes', remaining)
+
+        if remaining <= 0:
+            self._writelog('read limit reached; closing socket')
+            self._sock.close()
+            # This is the easiest way to abort the current request.
+            raise Exception('connection closed after receiving N bytes')
+
+        return result
+
+    def write(self, data):
+        remaining = object.__getattribute__(self, '_closeaftersendbytes')
+
+        # No byte limit on this operation. Call original function.
+        if not remaining:
+            self._writelog('write(%d) -> %s' % (len(data), data))
+            result = object.__getattribute__(self, '_orig').write(data)
+            return result
+
+        if len(data) > remaining:
+            newdata = data[0:remaining]
+        else:
+            newdata = data
+
+        remaining -= len(newdata)
+
+        self._writelog('write(%d from %d) -> (%d) %s' % (
+            len(newdata), len(data), remaining, newdata))
+
+        result = object.__getattribute__(self, '_orig').write(newdata)
+
+        object.__setattr__(self, '_closeaftersendbytes', remaining)
+
+        if remaining <= 0:
+            self._writelog('write limit reached; closing socket')
+            self._sock.close()
+            raise Exception('connection closed after sending N bytes')
+
+        return result
+
+def extsetup(ui):
+    # Change the base HTTP server class so various events can be performed.
+    # See SocketServer.BaseServer for how the specially named methods work.
+    class badserver(server.MercurialHTTPServer):
+        def __init__(self, ui, *args, **kwargs):
+            self._ui = ui
+            super(badserver, self).__init__(ui, *args, **kwargs)
+
+            # Need to inherit object so super() works.
+            class badrequesthandler(self.RequestHandlerClass, object):
+                def send_header(self, name, value):
+                    # Make headers deterministic to facilitate testing.
+                    if name.lower() == 'date':
+                        value = 'Fri, 14 Apr 2017 00:00:00 GMT'
+                    elif name.lower() == 'server':
+                        value = 'badhttpserver'
+
+                    return super(badrequesthandler, self).send_header(name,
+                                                                      value)
+
+            self.RequestHandlerClass = badrequesthandler
+
+        # Called to accept() a pending socket.
+        def get_request(self):
+            if self._ui.configbool('badserver', 'closebeforeaccept'):
+                self.socket.close()
+
+                # Tells the server to stop processing more requests.
+                self.__shutdown_request = True
+
+                # Simulate failure to stop processing this request.
+                raise socket.error('close before accept')
+
+            if self._ui.configbool('badserver', 'closeafteraccept'):
+                request, client_address = super(badserver, self).get_request()
+                request.close()
+                raise socket.error('close after accept')
+
+            return super(badserver, self).get_request()
+
+        # Does heavy lifting of processing a request. Invokes
+        # self.finish_request() which calls self.RequestHandlerClass() which
+        # is a hgweb.server._httprequesthandler.
+        def process_request(self, socket, address):
+            # Wrap socket in a proxy if we need to count bytes.
+            closeafterrecvbytes = self._ui.configint('badserver',
+                                                     'closeafterrecvbytes', 0)
+            closeaftersendbytes = self._ui.configint('badserver',
+                                                     'closeaftersendbytes', 0)
+
+            if closeafterrecvbytes or closeaftersendbytes:
+                socket = socketproxy(socket, self.errorlog,
+                                     closeafterrecvbytes=closeafterrecvbytes,
+                                     closeaftersendbytes=closeaftersendbytes)
+
+            return super(badserver, self).process_request(socket, address)
+
+    server.MercurialHTTPServer = badserver
--- a/tests/drawdag.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/drawdag.py	Tue Apr 18 12:24:34 2017 -0400
@@ -85,6 +85,7 @@
     error,
     node,
     scmutil,
+    tags as tagsmod,
 )
 
 cmdtable = {}
@@ -308,4 +309,5 @@
         ctx = simplecommitctx(repo, name, pctxs, [name])
         n = ctx.commit()
         committed[name] = n
-        repo.tag(name, n, message=None, user=None, date=None, local=True)
+        tagsmod.tag(repo, name, n, message=None, user=None, date=None,
+                    local=True)
--- a/tests/dumbhttp.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/dumbhttp.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,7 +7,9 @@
 """
 
 import optparse
+import os
 import signal
+import socket
 import sys
 
 from mercurial import (
@@ -18,11 +20,17 @@
 httpserver = util.httpserver
 OptionParser = optparse.OptionParser
 
+if os.environ.get('HGIPV6', '0') == '1':
+    class simplehttpserver(httpserver.httpserver):
+        address_family = socket.AF_INET6
+else:
+    simplehttpserver = httpserver.httpserver
+
 class simplehttpservice(object):
     def __init__(self, host, port):
         self.address = (host, port)
     def init(self):
-        self.httpd = httpserver.httpserver(
+        self.httpd = simplehttpserver(
             self.address, httpserver.simplehttprequesthandler)
     def run(self):
         self.httpd.serve_forever()
--- a/tests/dummyssh	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/dummyssh	Tue Apr 18 12:24:34 2017 -0400
@@ -10,7 +10,7 @@
 if sys.argv[1] != "user@dummy":
     sys.exit(-1)
 
-os.environ["SSH_CLIENT"] = "127.0.0.1 1 2"
+os.environ["SSH_CLIENT"] = "%s 1 2" % os.environ.get('LOCALIP', '127.0.0.1')
 
 log = open("dummylog", "ab")
 log.write("Got arguments")
--- a/tests/flagprocessorext.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/flagprocessorext.py	Tue Apr 18 12:24:34 2017 -0400
@@ -7,6 +7,7 @@
 
 from mercurial import (
     changegroup,
+    exchange,
     extensions,
     filelog,
     revlog,
@@ -103,6 +104,10 @@
     revlog.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags)
     revlog.REVIDX_FLAGS_ORDER.extend(flags)
 
+    # Teach exchange to use changegroup 3
+    for k in exchange._bundlespeccgversions.keys():
+        exchange._bundlespeccgversions[k] = '03'
+
     # Add wrappers for addrevision, responsible to set flags depending on the
     # revision data contents.
     wrapfunction(filelog.filelog, 'addrevision', noopaddrevision)
--- a/tests/get-with-headers.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/get-with-headers.py	Tue Apr 18 12:24:34 2017 -0400
@@ -78,8 +78,8 @@
         else:
             sys.stdout.write(data)
 
-        if twice and response.getheader('ETag', None):
-            tag = response.getheader('ETag')
+    if twice and response.getheader('ETag', None):
+        tag = response.getheader('ETag')
 
     return response.status
 
--- a/tests/hghave.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/hghave.py	Tue Apr 18 12:24:34 2017 -0400
@@ -346,6 +346,15 @@
     finally:
         os.unlink(fn)
 
+@check("hardlink-whitelisted", "hardlinks on whitelisted filesystems")
+def has_hardlink_whitelisted():
+    from mercurial import util
+    try:
+        fstype = util.getfstype('.')
+    except OSError:
+        return False
+    return fstype in util._hardlinkfswhitelist
+
 @check("rmcwd", "can remove current working directory")
 def has_rmcwd():
     ocwd = os.getcwd()
@@ -413,6 +422,12 @@
                        br"<stdin>:1: 're' imported but unused",
                        True)
 
+@check("pylint", "Pylint python linter")
+def has_pylint():
+    return matchoutput("pylint --help",
+                       br"Usage:  pylint",
+                       True)
+
 @check("pygments", "Pygments source highlighting library")
 def has_pygments():
     try:
@@ -619,3 +634,7 @@
         return True
     except ImportError:
         return False
+
+@check("devfull", "/dev/full special file")
+def has_dev_full():
+    return os.path.exists('/dev/full')
--- a/tests/run-tests.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/run-tests.py	Tue Apr 18 12:24:34 2017 -0400
@@ -112,18 +112,51 @@
 # For Windows support
 wifexited = getattr(os, "WIFEXITED", lambda x: False)
 
-def checkportisavailable(port):
-    """return true if a port seems free to bind on localhost"""
+# Whether to use IPv6
+def checksocketfamily(name, port=20058):
+    """return true if we can listen on localhost using family=name
+
+    name should be either 'AF_INET', or 'AF_INET6'.
+    port being used is okay - EADDRINUSE is considered as successful.
+    """
+    family = getattr(socket, name, None)
+    if family is None:
+        return False
     try:
-        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        s = socket.socket(family, socket.SOCK_STREAM)
         s.bind(('localhost', port))
         s.close()
         return True
     except socket.error as exc:
-        if not exc.errno == errno.EADDRINUSE:
+        if exc.errno == errno.EADDRINUSE:
+            return True
+        elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
+            return False
+        else:
             raise
+    else:
         return False
 
+# useipv6 will be set by parseargs
+useipv6 = None
+
+def checkportisavailable(port):
+    """return true if a port seems free to bind on localhost"""
+    if useipv6:
+        family = socket.AF_INET6
+    else:
+        family = socket.AF_INET
+    try:
+        s = socket.socket(family, socket.SOCK_STREAM)
+        s.bind(('localhost', port))
+        s.close()
+        return True
+    except socket.error as exc:
+        if exc.errno not in (errno.EADDRINUSE, errno.EADDRNOTAVAIL,
+                             errno.EPROTONOSUPPORT):
+            raise
+    return False
+
 closefds = os.name == 'posix'
 def Popen4(cmd, wd, timeout, env=None):
     processlock.acquire()
@@ -269,6 +302,8 @@
                       help="install and use chg wrapper in place of hg")
     parser.add_option("--with-chg", metavar="CHG",
                       help="use specified chg wrapper in place of hg")
+    parser.add_option("--ipv6", action="store_true",
+                      help="prefer IPv6 to IPv4 for network related tests")
     parser.add_option("-3", "--py3k-warnings", action="store_true",
         help="enable Py3k warnings on Python 2.6+")
     # This option should be deleted once test-check-py3-compat.t and other
@@ -338,6 +373,14 @@
         parser.error('--chg does not work when --with-hg is specified '
                      '(use --with-chg instead)')
 
+    global useipv6
+    if options.ipv6:
+        useipv6 = checksocketfamily('AF_INET6')
+    else:
+        # only use IPv6 if IPv4 is unavailable and IPv6 is available
+        useipv6 = ((not checksocketfamily('AF_INET'))
+                   and checksocketfamily('AF_INET6'))
+
     options.anycoverage = options.cover or options.annotate or options.htmlcov
     if options.anycoverage:
         try:
@@ -454,6 +497,12 @@
 # sans \t, \n and \r
 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
 
+# Match feature conditionalized output lines in the form, capturing the feature
+# list in group 2, and the preceeding line output in group 1:
+#
+#   output..output (feature !)\n
+optline = re.compile(b'(.+) \((.+?) !\)\n$')
+
 def cdatasafe(data):
     """Make a string safe to include in a CDATA block.
 
@@ -506,7 +555,8 @@
                  timeout=defaults['timeout'],
                  startport=defaults['port'], extraconfigopts=None,
                  py3kwarnings=False, shell=None, hgcommand=None,
-                 slowtimeout=defaults['slowtimeout'], usechg=False):
+                 slowtimeout=defaults['slowtimeout'], usechg=False,
+                 useipv6=False):
         """Create a test from parameters.
 
         path is the full path to the file defining the test.
@@ -554,6 +604,7 @@
         self._shell = _bytespath(shell)
         self._hgcommand = hgcommand or b'hg'
         self._usechg = usechg
+        self._useipv6 = useipv6
 
         self._aborted = False
         self._daemonpids = []
@@ -802,6 +853,8 @@
             self._portmap(2),
             (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
              br'\1 (glob)'),
+            (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
+            (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
             ]
         r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
 
@@ -817,6 +870,12 @@
         else:
             return re.escape(p)
 
+    def _localip(self):
+        if self._useipv6:
+            return b'::1'
+        else:
+            return b'127.0.0.1'
+
     def _getenv(self):
         """Obtain environment variables to use during test execution."""
         def defineport(i):
@@ -825,6 +884,7 @@
         env = os.environ.copy()
         if sysconfig is not None:
             env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase')
+        env['HGEMITWARNINGS'] = '1'
         env['TESTTMP'] = self._testtmp
         env['HOME'] = self._testtmp
         # This number should match portneeded in _getport
@@ -839,6 +899,11 @@
         env["HGUSER"]   = "test"
         env["HGENCODING"] = "ascii"
         env["HGENCODINGMODE"] = "strict"
+        env['HGIPV6'] = str(int(self._useipv6))
+
+        # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
+        # IP addresses.
+        env['LOCALIP'] = self._localip()
 
         # Reset some environment variables to well-known values so that
         # the tests produce repeatable output.
@@ -849,6 +914,7 @@
         env['TERM'] = 'xterm'
 
         for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
+                  'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
                   'NO_PROXY CHGDEBUG').split():
             if k in env:
                 del env[k]
@@ -881,6 +947,9 @@
         hgrc.write(b'[largefiles]\n')
         hgrc.write(b'usercache = %s\n' %
                    (os.path.join(self._testtmp, b'.cache/largefiles')))
+        hgrc.write(b'[web]\n')
+        hgrc.write(b'address = localhost\n')
+        hgrc.write(b'ipv6 = %s\n' % str(self._useipv6).encode('ascii'))
 
         for opt in self._extraconfigopts:
             section, key = opt.split('.', 1)
@@ -973,7 +1042,7 @@
     re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
     # Not all platforms have 127.0.0.1 as loopback (though most do),
     # so we always glob that too.
-    re.compile(br'.*127.0.0.1.*$'),
+    re.compile(br'.*\$LOCALIP.*$'),
 ]
 
 bchr = chr
@@ -1209,8 +1278,19 @@
                     if r:
                         els.pop(i)
                         break
-                    if el and el.endswith(b" (?)\n"):
-                        optional.append(i)
+                    if el:
+                        if el.endswith(b" (?)\n"):
+                            optional.append(i)
+                        else:
+                            m = optline.match(el)
+                            if m:
+                                conditions = [c for c in m.group(2).split(' ')]
+
+                                if self._hghave(conditions)[0]:
+                                    lout = el
+                                else:
+                                    optional.append(i)
+
                     i += 1
 
                 if r:
@@ -1236,8 +1316,10 @@
                 # clean up any optional leftovers
                 while expected.get(pos, None):
                     el = expected[pos].pop(0)
-                    if el and not el.endswith(b" (?)\n"):
-                        break
+                    if el:
+                        if (not optline.match(el)
+                            and not el.endswith(b" (?)\n")):
+                            break
                     postout.append(b'  ' + el)
 
             if lcmd:
@@ -1281,7 +1363,7 @@
                         return True
                 return b'-glob'
             return True
-        el = el.replace(b'127.0.0.1', b'*')
+        el = el.replace(b'$LOCALIP', b'*')
         i, n = 0, len(el)
         res = b''
         while i < n:
@@ -1309,6 +1391,12 @@
             if el.endswith(b" (?)\n"):
                 retry = "retry"
                 el = el[:-5] + b"\n"
+            else:
+                m = optline.match(el)
+                if m:
+                    el = m.group(1) + b"\n"
+                    retry = "retry"
+
             if el.endswith(b" (esc)\n"):
                 if PYTHON3:
                     el = el[:-7].decode('unicode_escape') + '\n'
@@ -1323,7 +1411,7 @@
                 # ignore '(glob)' added to l by 'replacements'
                 if l.endswith(b" (glob)\n"):
                     l = l[:-8] + b"\n"
-                return TTest.globmatch(el[:-8], l)
+                return TTest.globmatch(el[:-8], l) or retry
             if os.altsep and l.replace(b'\\', b'/') == el:
                 return b'+glob'
         return retry
@@ -2288,7 +2376,8 @@
                     py3kwarnings=self.options.py3k_warnings,
                     shell=self.options.shell,
                     hgcommand=self._hgcommand,
-                    usechg=bool(self.options.with_chg or self.options.chg))
+                    usechg=bool(self.options.with_chg or self.options.chg),
+                    useipv6=useipv6)
         t.should_reload = True
         return t
 
--- a/tests/test-addremove-similar.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-addremove-similar.t	Tue Apr 18 12:24:34 2017 -0400
@@ -55,6 +55,78 @@
 
   $ hg commit -m B
 
+should be sorted by path for stable result
+
+  $ for i in `python $TESTDIR/seq.py 0 9`; do
+  >     cp small-file $i
+  > done
+  $ rm small-file
+  $ hg addremove
+  adding 0
+  adding 1
+  adding 2
+  adding 3
+  adding 4
+  adding 5
+  adding 6
+  adding 7
+  adding 8
+  adding 9
+  removing small-file
+  recording removal of small-file as rename to 0 (100% similar)
+  recording removal of small-file as rename to 1 (100% similar)
+  recording removal of small-file as rename to 2 (100% similar)
+  recording removal of small-file as rename to 3 (100% similar)
+  recording removal of small-file as rename to 4 (100% similar)
+  recording removal of small-file as rename to 5 (100% similar)
+  recording removal of small-file as rename to 6 (100% similar)
+  recording removal of small-file as rename to 7 (100% similar)
+  recording removal of small-file as rename to 8 (100% similar)
+  recording removal of small-file as rename to 9 (100% similar)
+  $ hg commit -m '10 same files'
+
+pick one from many identical files
+
+  $ cp 0 a
+  $ rm `python $TESTDIR/seq.py 0 9`
+  $ hg addremove
+  removing 0
+  removing 1
+  removing 2
+  removing 3
+  removing 4
+  removing 5
+  removing 6
+  removing 7
+  removing 8
+  removing 9
+  adding a
+  recording removal of 0 as rename to a (100% similar)
+  $ hg revert -aq
+
+pick one from many similar files
+
+  $ cp 0 a
+  $ for i in `python $TESTDIR/seq.py 0 9`; do
+  >     echo $i >> $i
+  > done
+  $ hg commit -m 'make them slightly different'
+  $ rm `python $TESTDIR/seq.py 0 9`
+  $ hg addremove -s50
+  removing 0
+  removing 1
+  removing 2
+  removing 3
+  removing 4
+  removing 5
+  removing 6
+  removing 7
+  removing 8
+  removing 9
+  adding a
+  recording removal of 0 as rename to a (99% similar)
+  $ hg commit -m 'always the same file should be selected'
+
 should all fail
 
   $ hg addremove -s foo
--- a/tests/test-annotate.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-annotate.t	Tue Apr 18 12:24:34 2017 -0400
@@ -484,7 +484,9 @@
   $ hg id -n
   20
 
-Test followlines() revset
+Test followlines() revset; we usually check both followlines(pat, range) and
+followlines(pat, range, descend=True) to make sure both give the same result
+when they should.
 
   $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5)'
   16: baz:0
@@ -494,9 +496,12 @@
   16: baz:0
   19: baz:3
   20: baz:4
-  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=.^)'
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19)'
   16: baz:0
   19: baz:3
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=True)'
+  19: baz:3
+  20: baz:4
   $ printf "0\n0\n" | cat - baz > baz1
   $ mv baz1 baz
   $ hg ci -m 'added two lines with 0'
@@ -504,21 +509,31 @@
   16: baz:0
   19: baz:3
   20: baz:4
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, descend=true, startrev=19)'
+  19: baz:3
+  20: baz:4
   $ echo 6 >> baz
   $ hg ci -m 'added line 8'
   $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
   16: baz:0
   19: baz:3
   20: baz:4
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=1)'
+  19: baz:3
+  20: baz:4
   $ sed 's/3/3+/' baz > baz.new
   $ mv baz.new baz
   $ hg ci -m 'baz:3->3+'
-  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, descend=0)'
   16: baz:0
   19: baz:3
   20: baz:4
   23: baz:3->3+
-  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1:2)'
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=17, descend=True)'
+  19: baz:3
+  20: baz:4
+  23: baz:3->3+
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1:2, descend=false)'
   21: added two lines with 0
 
 file patterns are okay
@@ -536,9 +551,13 @@
   20: baz:4
   23: baz:3->3+
   24: qux:4->4+
-  $ hg up 23 --quiet
+
+but are missed when following children
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=22, descend=True)'
+  23: baz:3->3+
 
 merge
+  $ hg up 23 --quiet
   $ echo 7 >> baz
   $ hg ci -m 'one more line, out of line range'
   created new head
@@ -581,7 +600,35 @@
   28: merge from other side
   $ hg up 23 --quiet
 
+we are missing the branch with rename when following children
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=25, descend=True)'
+  26: baz:3+->3-
+
+we follow all branches in descending direction
+  $ hg up 22 --quiet
+  $ sed 's/3/+3/' baz > baz.new
+  $ mv baz.new baz
+  $ hg ci -m 'baz:3->+3'
+  created new head
+  $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 2:5, startrev=16, descend=True)' --graph
+  @  29: baz:3->+3
+  :
+  : o  26: baz:3+->3-
+  : :
+  : o  23: baz:3->3+
+  :/
+  o    20: baz:4
+  |\
+  | o  19: baz:3
+  |/
+  o  18: baz:2
+  :
+  o  16: baz:0
+  |
+  ~
+
 check error cases
+  $ hg up 23 --quiet
   $ hg log -r 'followlines()'
   hg: parse error: followlines takes at least 1 positional arguments
   [255]
@@ -615,6 +662,12 @@
   $ hg log -r 'followlines(baz, 2:40)'
   abort: line range exceeds file size
   [255]
+  $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=[1])'
+  hg: parse error at 43: syntax error in revset 'followlines(baz, 2:4, startrev=20, descend=[1])'
+  [255]
+  $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=a)'
+  hg: parse error: 'descend' argument must be a boolean
+  [255]
 
 Test annotate with whitespace options
 
--- a/tests/test-archive.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-archive.t	Tue Apr 18 12:24:34 2017 -0400
@@ -99,7 +99,7 @@
   > except AttributeError:
   >     stdout = sys.stdout
   > try:
-  >     f = util.urlreq.urlopen('http://127.0.0.1:%s/?%s'
+  >     f = util.urlreq.urlopen('http://$LOCALIP:%s/?%s'
   >                     % (os.environ['HGPORT'], requeststr))
   >     stdout.write(f.read())
   > except util.urlerr.httperror as e:
--- a/tests/test-bad-extension.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-bad-extension.t	Tue Apr 18 12:24:34 2017 -0400
@@ -1,3 +1,31 @@
+ensure that failing ui.atexit handlers report sensibly
+
+  $ cat > $TESTTMP/bailatexit.py <<EOF
+  > from mercurial import util
+  > def bail():
+  >     raise RuntimeError('ui.atexit handler exception')
+  > 
+  > def extsetup(ui):
+  >     ui.atexit(bail)
+  > EOF
+  $ hg -q --config extensions.bailatexit=$TESTTMP/bailatexit.py \
+  >  help help
+  hg help [-ecks] [TOPIC]
+  
+  show help for a given topic or a help overview
+  error in exit handlers:
+  Traceback (most recent call last):
+    File "*/mercurial/dispatch.py", line *, in _runexithandlers (glob)
+      func(*args, **kwargs)
+    File "$TESTTMP/bailatexit.py", line *, in bail (glob)
+      raise RuntimeError('ui.atexit handler exception')
+  RuntimeError: ui.atexit handler exception
+  [255]
+
+  $ rm $TESTTMP/bailatexit.py
+
+another bad extension
+
   $ echo 'raise Exception("bit bucket overflow")' > badext.py
   $ abspathexc=`pwd`/badext.py
 
--- a/tests/test-basic.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-basic.t	Tue Apr 18 12:24:34 2017 -0400
@@ -11,13 +11,36 @@
   ui.interactive=False
   ui.mergemarkers=detailed
   ui.promptecho=True
+  web.address=localhost
+  web\.ipv6=(?:True|False) (re)
   $ hg init t
   $ cd t
 
-Make a changeset:
+Prepare a changeset:
 
   $ echo a > a
   $ hg add a
+
+  $ hg status
+  A a
+
+Writes to stdio succeed and fail appropriately
+
+#if devfull
+  $ hg status 2>/dev/full
+  A a
+
+  $ hg status >/dev/full
+  abort: No space left on device
+  [255]
+
+  $ hg status >/dev/full 2>&1
+  [1]
+
+  $ hg status ENOENT 2>/dev/full
+  [1]
+#endif
+
   $ hg commit -m test
 
 This command is ancient:
--- a/tests/test-bdiff.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-bdiff.py	Tue Apr 18 12:24:34 2017 -0400
@@ -3,8 +3,6 @@
 import struct
 import unittest
 
-import silenttestrunner
-
 from mercurial import (
     bdiff,
     mpatch,
@@ -148,4 +146,5 @@
                          ['a\n', diffreplace(2, 10, 'a\na\na\na\n', '')])
 
 if __name__ == '__main__':
+    import silenttestrunner
     silenttestrunner.main(__name__)
--- a/tests/test-blackbox.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-blackbox.t	Tue Apr 18 12:24:34 2017 -0400
@@ -25,7 +25,7 @@
   1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> add a
   1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> add a exited 0 after * seconds (glob)
   1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000+ (5000)> blackbox
-  1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000+ (5000)> blackbox --config blackbox.dirty=True exited 0 after * seconds (glob)
+  1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000+ (5000)> blackbox --config *blackbox.dirty=True* exited 0 after * seconds (glob)
   1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> confuse
   1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> alias 'confuse' expands to 'log --limit 3'
   1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> confuse exited 0 after * seconds (glob)
--- a/tests/test-bookmarks-pushpull.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-bookmarks-pushpull.t	Tue Apr 18 12:24:34 2017 -0400
@@ -581,12 +581,12 @@
 be exchanged)
 
   $ hg -R repo1 incoming -B
-  comparing with $TESTTMP/bmcomparison/source
+  comparing with $TESTTMP/bmcomparison/source (glob)
   searching for changed bookmarks
   no changed bookmarks found
   [1]
   $ hg -R repo1 outgoing -B
-  comparing with $TESTTMP/bmcomparison/source
+  comparing with $TESTTMP/bmcomparison/source (glob)
   searching for changed bookmarks
   no changed bookmarks found
   [1]
--- a/tests/test-bookmarks.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-bookmarks.t	Tue Apr 18 12:24:34 2017 -0400
@@ -1,4 +1,5 @@
-  $ hg init
+  $ hg init repo
+  $ cd repo
 
 no bookmarks
 
@@ -630,7 +631,7 @@
      Z                         2:db815d6d32e6
      x  y                      2:db815d6d32e6
   $ hg -R ../cloned-bookmarks-manual-update-with-divergence pull
-  pulling from $TESTTMP
+  pulling from $TESTTMP/repo (glob)
   searching for changes
   adding changesets
   adding manifests
@@ -895,3 +896,58 @@
   $ touch $TESTTMP/unpause
 
   $ cd ..
+
+check whether HG_PENDING makes pending changes only in related
+repositories visible to an external hook.
+
+(emulate a transaction running concurrently by copied
+.hg/bookmarks.pending in subsequent test)
+
+  $ cat > $TESTTMP/savepending.sh <<EOF
+  > cp .hg/bookmarks.pending .hg/bookmarks.pending.saved
+  > exit 1 # to avoid adding new bookmark for subsequent tests
+  > EOF
+
+  $ hg init unrelated
+  $ cd unrelated
+  $ echo a > a
+  $ hg add a
+  $ hg commit -m '#0'
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" bookmarks INVISIBLE
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+  $ cp .hg/bookmarks.pending.saved .hg/bookmarks.pending
+
+(check visible bookmarks while transaction running in repo)
+
+  $ cat > $TESTTMP/checkpending.sh <<EOF
+  > echo "@repo"
+  > hg -R "$TESTTMP/repo" bookmarks
+  > echo "@unrelated"
+  > hg -R "$TESTTMP/unrelated" bookmarks
+  > exit 1 # to avoid adding new bookmark for subsequent tests
+  > EOF
+
+  $ cd ../repo
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" bookmarks NEW
+  @repo
+   * NEW                       6:81dcce76aa0b
+     X2                        1:925d80f479bb
+     Y                         4:125c9a1d6df6
+     Z                         5:5fb12f0f2d51
+     Z@1                       1:925d80f479bb
+     Z@2                       4:125c9a1d6df6
+     foo                       3:9ba5f110a0b3
+     foo@1                     0:f7b1eb17ad24
+     foo@2                     2:db815d6d32e6
+     four                      3:9ba5f110a0b3
+     should-end-on-two         2:db815d6d32e6
+     x  y                      2:db815d6d32e6
+  @unrelated
+  no bookmarks set
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
--- a/tests/test-branches.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-branches.t	Tue Apr 18 12:24:34 2017 -0400
@@ -1,5 +1,10 @@
   $ hg init a
   $ cd a
+
+Verify checking branch of nullrev before the cache is created doesnt crash
+  $ hg log -r 'branch(.)' -T '{branch}\n'
+
+Basic test
   $ echo 'root' >root
   $ hg add root
   $ hg commit -d '0 0' -m "Adding root node"
@@ -519,6 +524,12 @@
   $ hg branches --closed -T '{if(closed, "{branch}\n")}'
   c
 
+  $ hg branches -T '{word(0, branch)}: {desc|firstline}\n'
+  b: reopen branch with a change
+  a: Adding d branch
+  a: Adding b branch head 2
+  default: Adding root node
+
 Tests of revision branch name caching
 
 We rev branch cache is updated automatically. In these tests we use a trick to
--- a/tests/test-bundle-type.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-bundle-type.t	Tue Apr 18 12:24:34 2017 -0400
@@ -40,12 +40,12 @@
   $ hg -q pull ../b1
   $ hg bundle -a -t unknown out.hg
   abort: unknown is not a recognized bundle specification
-  (see 'hg help bundle' for supported values for --type)
+  (see 'hg help bundlespec' for supported values for --type)
   [255]
 
   $ hg bundle -a -t unknown-v2 out.hg
   abort: unknown compression is not supported
-  (see 'hg help bundle' for supported values for --type)
+  (see 'hg help bundlespec' for supported values for --type)
   [255]
 
   $ cd ..
@@ -193,7 +193,7 @@
 
   $ hg -R tzstd bundle -a -t zstd-v1 zstd-v1
   abort: compression engine zstd is not supported on v1 bundles
-  (see 'hg help bundle' for supported values for --type)
+  (see 'hg help bundlespec' for supported values for --type)
   [255]
 
 #else
@@ -222,6 +222,6 @@
   $ cd t1
   $ hg bundle -a -t garbage ../bgarbage
   abort: garbage is not a recognized bundle specification
-  (see 'hg help bundle' for supported values for --type)
+  (see 'hg help bundlespec' for supported values for --type)
   [255]
   $ cd ..
--- a/tests/test-bundle.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-bundle.t	Tue Apr 18 12:24:34 2017 -0400
@@ -232,7 +232,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=bundle?../full.hg (glob)
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=bundle*../full.hg (glob)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
 Rollback empty
@@ -255,7 +255,7 @@
   adding manifests
   adding file changes
   added 9 changesets with 7 changes to 4 files (+1 heads)
-  changegroup hook: HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=bundle:empty+full.hg (glob)
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=bundle:empty+full.hg
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
 Cannot produce streaming clone bundles with "hg bundle"
--- a/tests/test-bundle2-exchange.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-bundle2-exchange.t	Tue Apr 18 12:24:34 2017 -0400
@@ -41,7 +41,7 @@
   $ hg commit -m 'a'
   pre-close-tip:3903775176ed draft 
   postclose-tip:3903775176ed draft 
-  txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
 
   $ hg unbundle $TESTDIR/bundles/rebase.hg
   adding changesets
@@ -50,7 +50,7 @@
   added 8 changesets with 7 changes to 7 files (+3 heads)
   pre-close-tip:02de42196ebe draft 
   postclose-tip:02de42196ebe draft 
-  txnclose hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:* HG_TXNNAME=unbundle (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:$ID$ HG_TXNNAME=unbundle
   bundle:*/tests/bundles/rebase.hg HG_URL=bundle:*/tests/bundles/rebase.hg (glob)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
@@ -64,18 +64,18 @@
   $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
   pre-close-tip:02de42196ebe draft 
   postclose-tip:02de42196ebe draft 
-  txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
   $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
   pre-close-tip:02de42196ebe draft 
   postclose-tip:02de42196ebe draft 
-  txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
 
 clone --pull
 
   $ hg -R main phase --public cd010b8cd998
   pre-close-tip:02de42196ebe draft 
   postclose-tip:02de42196ebe draft 
-  txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
   $ hg clone main other --pull --rev 9520eea781bc
   adding changesets
   adding manifests
@@ -84,7 +84,7 @@
   1 new obsolescence markers
   pre-close-tip:9520eea781bc draft 
   postclose-tip:9520eea781bc draft 
-  txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=9520eea781bcca16c1e15acc0ba14335a0e8e5ba HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=9520eea781bcca16c1e15acc0ba14335a0e8e5ba HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
   file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -101,7 +101,7 @@
   $ hg -R main phase --public 9520eea781bc
   pre-close-tip:02de42196ebe draft 
   postclose-tip:02de42196ebe draft 
-  txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
   $ hg -R other pull -r 24b6387c8c8c
   pulling from $TESTTMP/main (glob)
   searching for changes
@@ -112,7 +112,7 @@
   1 new obsolescence markers
   pre-close-tip:24b6387c8c8c draft 
   postclose-tip:24b6387c8c8c draft 
-  txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_NODE_LAST=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_NODE_LAST=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
   file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg -R other log -G
@@ -131,13 +131,13 @@
   $ hg -R main phase --public 24b6387c8c8c
   pre-close-tip:02de42196ebe draft 
   postclose-tip:02de42196ebe draft 
-  txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
   $ hg -R other pull -r 24b6387c8c8c
   pulling from $TESTTMP/main (glob)
   no changes found
   pre-close-tip:24b6387c8c8c public 
   postclose-tip:24b6387c8c8c public 
-  txnclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
   file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
   $ hg -R other log -G
   o  2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com>  F
@@ -157,7 +157,7 @@
   no changes found
   pre-close-tip:24b6387c8c8c public 
   postclose-tip:24b6387c8c8c public 
-  txnclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
   file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
   $ hg -R other log -G
   o  2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com>  F
@@ -175,69 +175,69 @@
   $ hg -R main bookmark --rev eea13746799a book_eea1
   pre-close-tip:02de42196ebe draft 
   postclose-tip:02de42196ebe draft 
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
   pre-close-tip:02de42196ebe draft 
   postclose-tip:02de42196ebe draft 
-  txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
   $ hg -R main bookmark --rev 02de42196ebe book_02de
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
   $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
   $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
   $ hg -R main bookmark --rev 32af7686d403 book_32af
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
 
   $ hg -R other bookmark --rev cd010b8cd998 book_eea1
   pre-close-tip:24b6387c8c8c public 
   postclose-tip:24b6387c8c8c public 
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R other bookmark --rev cd010b8cd998 book_02de
   pre-close-tip:24b6387c8c8c public 
   postclose-tip:24b6387c8c8c public 
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R other bookmark --rev cd010b8cd998 book_42cc
   pre-close-tip:24b6387c8c8c public 
   postclose-tip:24b6387c8c8c public 
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
   pre-close-tip:24b6387c8c8c public 
   postclose-tip:24b6387c8c8c public 
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ hg -R other bookmark --rev cd010b8cd998 book_32af
   pre-close-tip:24b6387c8c8c public 
   postclose-tip:24b6387c8c8c public 
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
 
   $ hg -R main phase --public eea13746799a
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
 
 push
   $ hg -R main push other --rev eea13746799a --bookmark book_eea1
@@ -256,11 +256,11 @@
   remote: lock:  free
   remote: wlock: free
   remote: postclose-tip:eea13746799a public book_eea1
-  remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_NODE_LAST=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_TXNID=TXN:* HG_TXNNAME=push HG_URL=file:$TESTTMP/other (glob)
+  remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_NODE_LAST=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_TXNID=TXN:$ID$ HG_TXNNAME=push HG_URL=file:$TESTTMP/other
   updating bookmark book_eea1
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
   file:/*/$TESTTMP/other HG_URL=file:$TESTTMP/other (glob)
   $ hg -R other log -G
   o    3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
@@ -289,7 +289,7 @@
   updating bookmark book_02de
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
   ssh://user@dummy/main HG_URL=ssh://user@dummy/main
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg -R other debugobsolete
@@ -314,7 +314,7 @@
   updating bookmark book_42cc
   pre-close-tip:42ccdea3bb16 draft book_42cc
   postclose-tip:42ccdea3bb16 draft book_42cc
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_NODE_LAST=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_NODE_LAST=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
   http://localhost:$HGPORT/ HG_URL=http://localhost:$HGPORT/
   (run 'hg heads .' to see heads, 'hg merge' to merge)
   $ cat main-error.log
@@ -340,11 +340,11 @@
   remote: lock:  free
   remote: wlock: free
   remote: postclose-tip:5fddd98957c8 draft book_5fdd
-  remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_NODE_LAST=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:ssh:127.0.0.1 (glob)
+  remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_NODE_LAST=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_TXNNAME=serve HG_URL=remote:ssh:$LOCALIP
   updating bookmark book_5fdd
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
   ssh://user@dummy/other HG_URL=ssh://user@dummy/other
   $ hg -R other log -G
   o  6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
@@ -377,7 +377,7 @@
   $ hg -R main phase --public 32af7686d403
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
   $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
   pushing to http://localhost:$HGPORT2/
   searching for changes
@@ -394,11 +394,11 @@
   remote: lock:  free
   remote: wlock: free
   remote: postclose-tip:32af7686d403 public book_32af
-  remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=32af7686d403cf45b5d95f2d70cebea587ac806a HG_NODE_LAST=32af7686d403cf45b5d95f2d70cebea587ac806a HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:http:127.0.0.1: (glob)
+  remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=32af7686d403cf45b5d95f2d70cebea587ac806a HG_NODE_LAST=32af7686d403cf45b5d95f2d70cebea587ac806a HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_TXNNAME=serve HG_URL=remote:http:$LOCALIP: (glob)
   updating bookmark book_32af
   pre-close-tip:02de42196ebe draft book_02de
   postclose-tip:02de42196ebe draft book_02de
-  txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
   http://localhost:$HGPORT2/ HG_URL=http://localhost:$HGPORT2/
   $ cat other-error.log
 
@@ -486,7 +486,7 @@
   $ hg ci -m 'I'
   pre-close-tip:e7ec4e813ba6 draft 
   postclose-tip:e7ec4e813ba6 draft 
-  txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
+  txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
   $ hg id
   e7ec4e813ba6 tip
   $ cd ..
--- a/tests/test-bundle2-multiple-changegroups.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-bundle2-multiple-changegroups.t	Tue Apr 18 12:24:34 2017 -0400
@@ -81,17 +81,17 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  pretxnchangegroup hook: HG_NODE=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_NODE_LAST=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_PENDING=$TESTTMP/clone HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
+  pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_NODE_LAST=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_PENDING=$TESTTMP/clone HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
   remote: changegroup2
   adding changesets
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  pretxnchangegroup hook: HG_NODE=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_NODE_LAST=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_PENDING=$TESTTMP/clone HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  changegroup hook: HG_NODE=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_NODE_LAST=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  incoming hook: HG_NODE=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  changegroup hook: HG_NODE=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_NODE_LAST=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  incoming hook: HG_NODE=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
+  pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_NODE_LAST=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_PENDING=$TESTTMP/clone HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_NODE_LAST=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=27547f69f25460a52fff66ad004e58da7ad3fb56 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_NODE_LAST=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=f838bfaca5c7226600ebcfd84f3c3c13a28d3757 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
   pullop.cgresult is 1
   (run 'hg update' to get a working copy)
   $ hg update
@@ -151,20 +151,20 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files (+1 heads)
-  pretxnchangegroup hook: HG_NODE=b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e HG_NODE_LAST=8a5212ebc8527f9fb821601504794e3eb11a1ed3 HG_PENDING=$TESTTMP/clone HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
+  pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e HG_NODE_LAST=8a5212ebc8527f9fb821601504794e3eb11a1ed3 HG_PENDING=$TESTTMP/clone HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
   remote: changegroup2
   adding changesets
   adding manifests
   adding file changes
   added 3 changesets with 3 changes to 3 files (+1 heads)
-  pretxnchangegroup hook: HG_NODE=7f219660301fe4c8a116f714df5e769695cc2b46 HG_NODE_LAST=5cd59d311f6508b8e0ed28a266756c859419c9f1 HG_PENDING=$TESTTMP/clone HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  changegroup hook: HG_NODE=b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e HG_NODE_LAST=8a5212ebc8527f9fb821601504794e3eb11a1ed3 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  incoming hook: HG_NODE=b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  incoming hook: HG_NODE=8a5212ebc8527f9fb821601504794e3eb11a1ed3 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  changegroup hook: HG_NODE=7f219660301fe4c8a116f714df5e769695cc2b46 HG_NODE_LAST=5cd59d311f6508b8e0ed28a266756c859419c9f1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  incoming hook: HG_NODE=7f219660301fe4c8a116f714df5e769695cc2b46 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  incoming hook: HG_NODE=1d14c3ce6ac0582d2809220d33e8cd7a696e0156 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  incoming hook: HG_NODE=5cd59d311f6508b8e0ed28a266756c859419c9f1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
+  pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=7f219660301fe4c8a116f714df5e769695cc2b46 HG_NODE_LAST=5cd59d311f6508b8e0ed28a266756c859419c9f1 HG_PENDING=$TESTTMP/clone HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e HG_NODE_LAST=8a5212ebc8527f9fb821601504794e3eb11a1ed3 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=8a5212ebc8527f9fb821601504794e3eb11a1ed3 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=7f219660301fe4c8a116f714df5e769695cc2b46 HG_NODE_LAST=5cd59d311f6508b8e0ed28a266756c859419c9f1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=7f219660301fe4c8a116f714df5e769695cc2b46 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=1d14c3ce6ac0582d2809220d33e8cd7a696e0156 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=5cd59d311f6508b8e0ed28a266756c859419c9f1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
   pullop.cgresult is 3
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg log -G
@@ -224,17 +224,17 @@
   adding manifests
   adding file changes
   added 1 changesets with 0 changes to 0 files (-1 heads)
-  pretxnchangegroup hook: HG_NODE=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_NODE_LAST=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_PENDING=$TESTTMP/clone HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
+  pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_NODE_LAST=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_PENDING=$TESTTMP/clone HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
   remote: changegroup2
   adding changesets
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  pretxnchangegroup hook: HG_NODE=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_NODE_LAST=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_PENDING=$TESTTMP/clone HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  changegroup hook: HG_NODE=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_NODE_LAST=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  incoming hook: HG_NODE=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  changegroup hook: HG_NODE=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_NODE_LAST=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
-  incoming hook: HG_NODE=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/repo (glob)
+  pretxnchangegroup hook: HG_HOOKNAME=pretxnchangegroup HG_HOOKTYPE=pretxnchangegroup HG_NODE=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_NODE_LAST=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_PENDING=$TESTTMP/clone HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_NODE_LAST=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=71bd7b46de72e69a32455bf88d04757d542e6cf4 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_NODE_LAST=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=9d18e5bd9ab09337802595d49f1dad0c98df4d84 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/repo
   pullop.cgresult is -2
   (run 'hg update' to get a working copy)
   $ hg log -G
--- a/tests/test-bundle2-remote-changegroup.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-bundle2-remote-changegroup.t	Tue Apr 18 12:24:34 2017 -0400
@@ -39,7 +39,7 @@
   >         part = bundler.newpart(name, data=data)
   >         return part
   > 
-  >     for line in open(repo.join('bundle2maker'), 'r'):
+  >     for line in open(repo.vfs.join('bundle2maker'), 'r'):
   >         line = line.strip()
   >         try:
   >             verb, args = line.split(None, 1)
--- a/tests/test-check-code.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-check-code.t	Tue Apr 18 12:24:34 2017 -0400
@@ -8,9 +8,12 @@
 (The writing "no-che?k-code" is for not skipping this file when checking.)
 
   $ hg locate -X contrib/python-zstandard -X hgext/fsmonitor/pywatchman |
-  > sed 's-\\-/-g' | xargs "$check_code" --warnings --per-file=0 || false
+  > sed 's-\\-/-g' | "$check_code" --warnings --per-file=0 - || false
+  contrib/perf.py:869:
+   >             r.revision(r.node(x))
+   don't convert rev to node before passing to revision(nodeorrev)
   Skipping i18n/polib.py it has no-che?k-code (glob)
-  mercurial/demandimport.py:312:
+  mercurial/demandimport.py:313:
    >     if os.environ.get('HGDEMANDIMPORT') != 'disable':
    use encoding.environ instead (py3)
   mercurial/encoding.py:54:
@@ -22,15 +25,22 @@
   mercurial/encoding.py:61:
    >                    for k, v in os.environ.items())
    use encoding.environ instead (py3)
-  mercurial/encoding.py:203:
+  mercurial/encoding.py:221:
    >                    for k, v in os.environ.items())
    use encoding.environ instead (py3)
   Skipping mercurial/httpclient/__init__.py it has no-che?k-code (glob)
   Skipping mercurial/httpclient/_readers.py it has no-che?k-code (glob)
-  mercurial/policy.py:45:
-   > policy = os.environ.get('HGMODULEPOLICY', policy)
+  mercurial/policy.py:46:
+   >     if 'HGMODULEPOLICY' in os.environ:
+   use encoding.environ instead (py3)
+  mercurial/policy.py:47:
+   >         policy = os.environ['HGMODULEPOLICY'].encode('utf-8')
+   use encoding.environ instead (py3)
+  mercurial/policy.py:49:
+   >     policy = os.environ.get('HGMODULEPOLICY', policy)
    use encoding.environ instead (py3)
   Skipping mercurial/statprof.py it has no-che?k-code (glob)
+  Skipping tests/badserverext.py it has no-che?k-code (glob)
   [1]
 
 @commands in debugcommands.py should be in alphabetical order.
@@ -48,3 +58,20 @@
   ...         print('commands in debugcommands.py not sorted; first differing '
   ...               'command is %s; expected %s' % (commands[i], command))
   ...         break
+
+Prevent adding new files in the root directory accidentally.
+
+  $ hg files 'glob:*'
+  .editorconfig
+  .hgignore
+  .hgsigs
+  .hgtags
+  CONTRIBUTING
+  CONTRIBUTORS
+  COPYING
+  Makefile
+  README
+  hg
+  hgeditor
+  hgweb.cgi
+  setup.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-check-help.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,28 @@
+#require test-repo
+
+  $ . "$TESTDIR/helpers-testrepo.sh"
+
+  $ cat <<'EOF' > scanhelptopics.py
+  > from __future__ import absolute_import, print_function
+  > import re
+  > import sys
+  > if sys.platform == "win32":
+  >     import os, msvcrt
+  >     msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+  > topics = set()
+  > topicre = re.compile(r':hg:`help ([a-z0-9\-.]+)`')
+  > for fname in sys.argv:
+  >     with open(fname) as f:
+  >         topics.update(m.group(1) for m in topicre.finditer(f.read()))
+  > for s in sorted(topics):
+  >     print(s)
+  > EOF
+
+  $ cd "$TESTDIR"/..
+
+Check if ":hg:`help TOPIC`" is valid:
+(use "xargs -n1 -t" to see which help commands are executed)
+
+  $ hg files 'glob:{hgext,mercurial}/**/*.py' | sed 's|\\|/|g' \
+  > | xargs python "$TESTTMP/scanhelptopics.py" \
+  > | xargs -n1 hg help > /dev/null
--- a/tests/test-check-module-imports.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-check-module-imports.t	Tue Apr 18 12:24:34 2017 -0400
@@ -3,148 +3,6 @@
   $ . "$TESTDIR/helpers-testrepo.sh"
   $ import_checker="$TESTDIR"/../contrib/import-checker.py
 
-Run the doctests from the import checker, and make sure
-it's working correctly.
-  $ TERM=dumb
-  $ export TERM
-  $ python -m doctest $import_checker
-
-Run additional tests for the import checker
-
-  $ mkdir testpackage
-  $ touch testpackage/__init__.py
-
-  $ cat > testpackage/multiple.py << EOF
-  > from __future__ import absolute_import
-  > import os, sys
-  > EOF
-
-  $ cat > testpackage/unsorted.py << EOF
-  > from __future__ import absolute_import
-  > import sys
-  > import os
-  > EOF
-
-  $ cat > testpackage/stdafterlocal.py << EOF
-  > from __future__ import absolute_import
-  > from . import unsorted
-  > import os
-  > EOF
-
-  $ cat > testpackage/requirerelative.py << EOF
-  > from __future__ import absolute_import
-  > import testpackage.unsorted
-  > EOF
-
-  $ cat > testpackage/importalias.py << EOF
-  > from __future__ import absolute_import
-  > import ui
-  > EOF
-
-  $ cat > testpackage/relativestdlib.py << EOF
-  > from __future__ import absolute_import
-  > from .. import os
-  > EOF
-
-  $ cat > testpackage/symbolimport.py << EOF
-  > from __future__ import absolute_import
-  > from .unsorted import foo
-  > EOF
-
-  $ cat > testpackage/latesymbolimport.py << EOF
-  > from __future__ import absolute_import
-  > from . import unsorted
-  > from mercurial.node import hex
-  > EOF
-
-  $ cat > testpackage/multiplegroups.py << EOF
-  > from __future__ import absolute_import
-  > from . import unsorted
-  > from . import more
-  > EOF
-
-  $ mkdir testpackage/subpackage
-  $ cat > testpackage/subpackage/levelpriority.py << EOF
-  > from __future__ import absolute_import
-  > from . import foo
-  > from .. import parent
-  > EOF
-
-  $ touch testpackage/subpackage/foo.py
-  $ cat > testpackage/subpackage/__init__.py << EOF
-  > from __future__ import absolute_import
-  > from . import levelpriority  # should not cause cycle
-  > EOF
-
-  $ cat > testpackage/subpackage/localimport.py << EOF
-  > from __future__ import absolute_import
-  > from . import foo
-  > def bar():
-  >     # should not cause "higher-level import should come first"
-  >     from .. import unsorted
-  >     # but other errors should be detected
-  >     from .. import more
-  >     import testpackage.subpackage.levelpriority
-  > EOF
-
-  $ cat > testpackage/importmodulefromsub.py << EOF
-  > from __future__ import absolute_import
-  > from .subpackage import foo  # not a "direct symbol import"
-  > EOF
-
-  $ cat > testpackage/importsymbolfromsub.py << EOF
-  > from __future__ import absolute_import
-  > from .subpackage import foo, nonmodule
-  > EOF
-
-  $ cat > testpackage/sortedentries.py << EOF
-  > from __future__ import absolute_import
-  > from . import (
-  >     foo,
-  >     bar,
-  > )
-  > EOF
-
-  $ cat > testpackage/importfromalias.py << EOF
-  > from __future__ import absolute_import
-  > from . import ui
-  > EOF
-
-  $ cat > testpackage/importfromrelative.py << EOF
-  > from __future__ import absolute_import
-  > from testpackage.unsorted import foo
-  > EOF
-
-  $ mkdir testpackage2
-  $ touch testpackage2/__init__.py
-
-  $ cat > testpackage2/latesymbolimport.py << EOF
-  > from __future__ import absolute_import
-  > from testpackage import unsorted
-  > from mercurial.node import hex
-  > EOF
-
-  $ python "$import_checker" testpackage*/*.py testpackage/subpackage/*.py
-  testpackage/importalias.py:2: ui module must be "as" aliased to uimod
-  testpackage/importfromalias.py:2: ui from testpackage must be "as" aliased to uimod
-  testpackage/importfromrelative.py:2: import should be relative: testpackage.unsorted
-  testpackage/importfromrelative.py:2: direct symbol import foo from testpackage.unsorted
-  testpackage/importsymbolfromsub.py:2: direct symbol import nonmodule from testpackage.subpackage
-  testpackage/latesymbolimport.py:3: symbol import follows non-symbol import: mercurial.node
-  testpackage/multiple.py:2: multiple imported names: os, sys
-  testpackage/multiplegroups.py:3: multiple "from . import" statements
-  testpackage/relativestdlib.py:2: relative import of stdlib module
-  testpackage/requirerelative.py:2: import should be relative: testpackage.unsorted
-  testpackage/sortedentries.py:2: imports from testpackage not lexically sorted: bar < foo
-  testpackage/stdafterlocal.py:3: stdlib import "os" follows local import: testpackage
-  testpackage/subpackage/levelpriority.py:3: higher-level import should come first: testpackage
-  testpackage/subpackage/localimport.py:7: multiple "from .. import" statements
-  testpackage/subpackage/localimport.py:8: import should be relative: testpackage.subpackage.levelpriority
-  testpackage/symbolimport.py:2: direct symbol import foo from testpackage.unsorted
-  testpackage/unsorted.py:3: imports not lexically sorted: os < sys
-  testpackage2/latesymbolimport.py:3: symbol import follows non-symbol import: mercurial.node
-  [1]
-
   $ cd "$TESTDIR"/..
 
 There are a handful of cases here that require renaming a module so it
@@ -171,7 +29,7 @@
   > -X tests/test-verify-repo-operations.py \
   > -X tests/test-hook.t \
   > -X tests/test-import.t \
-  > -X tests/test-check-module-imports.t \
+  > -X tests/test-imports-checker.t \
   > -X tests/test-commit-interactive.t \
   > -X tests/test-contrib-check-code.t \
   > -X tests/test-extension.t \
--- a/tests/test-check-py3-commands.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-check-py3-commands.t	Tue Apr 18 12:24:34 2017 -0400
@@ -3,12 +3,159 @@
 This test helps in keeping a track on which commands we can run on
 Python 3 and see what kind of errors are coming up.
 The full traceback is hidden to have a stable output.
+  $ HGBIN=`which hg`
 
   $ for cmd in version debuginstall ; do
   >   echo $cmd
-  >   $PYTHON3 `which hg` $cmd 2>&1 2>&1 | tail -1
+  >   $PYTHON3 $HGBIN $cmd 2>&1 2>&1 | tail -1
   > done
   version
   warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
   debuginstall
-  TypeError: Can't convert 'bytes' object to str implicitly
+  no problems detected
+
+#if test-repo
+Make a clone so that any features in the developer's .hg/hgrc that
+might confuse Python 3 don't break this test. When we can do commit in
+Python 3, we'll stop doing this. We use e76ed1e480ef for the clone
+because it has different files than 273ce12ad8f1, so we can test both
+`files` from dirstate and `files` loaded from a specific revision.
+
+  $ hg clone -r e76ed1e480ef "`dirname "$TESTDIR"`" testrepo 2>&1 | tail -1
+  15 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Test using -R, which exercises some URL code:
+  $ $PYTHON3 $HGBIN -R testrepo files -r 273ce12ad8f1 | tail -1
+  testrepo/tkmerge
+
+Now prove `hg files` is reading the whole manifest. We have to grep
+out some potential warnings that come from hgrc as yet.
+  $ cd testrepo
+  $ $PYTHON3 $HGBIN files -r 273ce12ad8f1
+  .hgignore
+  PKG-INFO
+  README
+  hg
+  mercurial/__init__.py
+  mercurial/byterange.py
+  mercurial/fancyopts.py
+  mercurial/hg.py
+  mercurial/mdiff.py
+  mercurial/revlog.py
+  mercurial/transaction.py
+  notes.txt
+  setup.py
+  tkmerge
+
+  $ $PYTHON3 $HGBIN files -r 273ce12ad8f1 | wc -l
+  \s*14 (re)
+  $ $PYTHON3 $HGBIN files | wc -l
+  \s*15 (re)
+
+Test if log-like commands work:
+
+  $ $PYTHON3 $HGBIN tip
+  changeset:   10:e76ed1e480ef
+  tag:         tip
+  user:        oxymoron@cinder.waste.org
+  date:        Tue May 03 23:37:43 2005 -0800
+  summary:     Fix linking of changeset revs when merging
+  
+
+  $ $PYTHON3 $HGBIN log -r0
+  changeset:   0:9117c6561b0b
+  user:        mpm@selenic.com
+  date:        Tue May 03 13:16:10 2005 -0800
+  summary:     Add back links from file revisions to changeset revisions
+  
+
+  $ cd ..
+#endif
+
+Test if `hg config` works:
+
+  $ $PYTHON3 $HGBIN config
+  defaults.backout=-d "0 0"
+  defaults.commit=-d "0 0"
+  defaults.shelve=--date "0 0"
+  defaults.tag=-d "0 0"
+  devel.all-warnings=true
+  largefiles.usercache=$TESTTMP/.cache/largefiles
+  ui.slash=True
+  ui.interactive=False
+  ui.mergemarkers=detailed
+  ui.promptecho=True
+  web.address=localhost
+  web.ipv6=False
+
+  $ cat > included-hgrc <<EOF
+  > [extensions]
+  > babar = imaginary_elephant
+  > EOF
+  $ cat >> $HGRCPATH <<EOF
+  > %include $TESTTMP/included-hgrc
+  > EOF
+  $ $PYTHON3 $HGBIN version | tail -1
+  *** failed to import extension babar from imaginary_elephant: *: 'imaginary_elephant' (glob)
+  warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+  $ rm included-hgrc
+  $ touch included-hgrc
+
+Test bytes-ness of policy.policy with HGMODULEPOLICY
+
+  $ HGMODULEPOLICY=py
+  $ export HGMODULEPOLICY
+  $ $PYTHON3 `which hg` debuginstall 2>&1 2>&1 | tail -1
+  no problems detected
+
+`hg init` can create empty repos
+`hg status works fine`
+`hg summary` also works!
+
+  $ $PYTHON3 `which hg` init py3repo
+  $ cd py3repo
+  $ echo "This is the file 'iota'." > iota
+  $ $PYTHON3 $HGBIN status
+  ? iota
+  $ $PYTHON3 $HGBIN add iota
+  $ $PYTHON3 $HGBIN status
+  A iota
+  $ $PYTHON3 $HGBIN commit --message 'commit performed in Python 3'
+  $ $PYTHON3 $HGBIN status
+
+  $ mkdir A
+  $ echo "This is the file 'mu'." > A/mu
+  $ $PYTHON3 $HGBIN addremove
+  adding A/mu
+  $ $PYTHON3 $HGBIN status
+  A A/mu
+  $ HGEDITOR='echo message > ' $PYTHON3 $HGBIN commit
+  $ $PYTHON3 $HGBIN status
+  $ $PYHON3 $HGBIN summary
+  parent: 1:e1e9167203d4 tip
+   message
+  branch: default
+  commit: (clean)
+  update: (current)
+  phases: 2 draft
+
+Prove the repo is valid using the Python 2 `hg`:
+  $ hg verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  2 files, 2 changesets, 2 total revisions
+  $ hg log
+  changeset:   1:e1e9167203d4
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     message
+  
+  changeset:   0:71c96e924262
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     commit performed in Python 3
+  
--- a/tests/test-check-py3-compat.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-check-py3-compat.t	Tue Apr 18 12:24:34 2017 -0400
@@ -7,13 +7,15 @@
   contrib/python-zstandard/setup.py not using absolute_import
   contrib/python-zstandard/setup_zstd.py not using absolute_import
   contrib/python-zstandard/tests/common.py not using absolute_import
-  contrib/python-zstandard/tests/test_cffi.py not using absolute_import
+  contrib/python-zstandard/tests/test_buffer_util.py not using absolute_import
   contrib/python-zstandard/tests/test_compressor.py not using absolute_import
+  contrib/python-zstandard/tests/test_compressor_fuzzing.py not using absolute_import
   contrib/python-zstandard/tests/test_data_structures.py not using absolute_import
+  contrib/python-zstandard/tests/test_data_structures_fuzzing.py not using absolute_import
   contrib/python-zstandard/tests/test_decompressor.py not using absolute_import
+  contrib/python-zstandard/tests/test_decompressor_fuzzing.py not using absolute_import
   contrib/python-zstandard/tests/test_estimate_sizes.py not using absolute_import
   contrib/python-zstandard/tests/test_module_attributes.py not using absolute_import
-  contrib/python-zstandard/tests/test_roundtrip.py not using absolute_import
   contrib/python-zstandard/tests/test_train_dictionary.py not using absolute_import
   i18n/check-translation.py not using absolute_import
   setup.py not using absolute_import
@@ -23,15 +25,15 @@
   $ hg files 'set:(**.py) - grep(pygments)' -X hgext/fsmonitor/pywatchman \
   > | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py \
   > | sed 's/[0-9][0-9]*)$/*)/'
-  hgext/convert/transport.py: error importing: <ImportError> No module named 'svn.client' (error at transport.py:*)
-  hgext/fsmonitor/state.py: error importing: <SyntaxError> from __future__ imports must occur at the beginning of the file (__init__.py, line 30) (error at watchmanclient.py:*)
-  hgext/fsmonitor/watchmanclient.py: error importing: <SyntaxError> from __future__ imports must occur at the beginning of the file (__init__.py, line 30) (error at watchmanclient.py:*)
-  mercurial/cffi/bdiff.py: error importing: <ImportError> No module named 'mercurial.cffi' (error at check-py3-compat.py:*)
-  mercurial/cffi/mpatch.py: error importing: <ImportError> No module named 'mercurial.cffi' (error at check-py3-compat.py:*)
-  mercurial/cffi/osutil.py: error importing: <ImportError> No module named 'mercurial.cffi' (error at check-py3-compat.py:*)
-  mercurial/scmwindows.py: error importing: <ImportError> No module named 'msvcrt' (error at win32.py:*)
-  mercurial/win32.py: error importing: <ImportError> No module named 'msvcrt' (error at win32.py:*)
-  mercurial/windows.py: error importing: <ImportError> No module named 'msvcrt' (error at windows.py:*)
+  hgext/convert/transport.py: error importing: <*Error> No module named 'svn.client' (error at transport.py:*) (glob)
+  hgext/fsmonitor/state.py: error importing: <SyntaxError> from __future__ imports must occur at the beginning of the file (__init__.py, line 30) (error at __init__.py:*)
+  hgext/fsmonitor/watchmanclient.py: error importing: <SyntaxError> from __future__ imports must occur at the beginning of the file (__init__.py, line 30) (error at __init__.py:*)
+  mercurial/cffi/bdiff.py: error importing: <*Error> No module named 'mercurial.cffi' (error at check-py3-compat.py:*) (glob)
+  mercurial/cffi/mpatch.py: error importing: <*Error> No module named 'mercurial.cffi' (error at check-py3-compat.py:*) (glob)
+  mercurial/cffi/osutil.py: error importing: <*Error> No module named 'mercurial.cffi' (error at check-py3-compat.py:*) (glob)
+  mercurial/scmwindows.py: error importing: <*Error> No module named 'msvcrt' (error at win32.py:*) (glob)
+  mercurial/win32.py: error importing: <*Error> No module named 'msvcrt' (error at win32.py:*) (glob)
+  mercurial/windows.py: error importing: <*Error> No module named 'msvcrt' (error at windows.py:*) (glob)
 
 #endif
 
--- a/tests/test-check-pyflakes.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-check-pyflakes.t	Tue Apr 18 12:24:34 2017 -0400
@@ -7,9 +7,8 @@
 (skipping binary file random-seed)
 
   $ hg locate 'set:**.py or grep("^#!.*python")' -X hgext/fsmonitor/pywatchman \
-  > -X mercurial/pycompat.py \
+  > -X mercurial/pycompat.py -X contrib/python-zstandard \
   > 2>/dev/null \
   > | xargs pyflakes 2>/dev/null | "$TESTDIR/filterpyflakes.py"
-  contrib/python-zstandard/tests/test_data_structures.py:107: local variable 'size' is assigned to but never used
   tests/filterpyflakes.py:39: undefined name 'undefinedname'
   
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-check-pylint.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,19 @@
+#require test-repo pylint hg10
+
+Run pylint for known rules we care about.
+-----------------------------------------
+
+There should be no recorded failures; fix the codebase before introducing a
+new check.
+
+Current checks:
+- W0102: no mutable default argument
+
+  $ touch $TESTTMP/fakerc
+  $ pylint --rcfile=$TESTTMP/fakerc --disable=all \
+  >   --enable=W0102 --reports=no \
+  >   mercurial hgext hgext3rd
+   (?)
+  ------------------------------------ (?)
+  Your code has been rated at 10.00/10 (?)
+   (?)
--- a/tests/test-chg.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-chg.t	Tue Apr 18 12:24:34 2017 -0400
@@ -32,6 +32,46 @@
 
   $ cd ..
 
+editor
+------
+
+  $ cat >> pushbuffer.py <<EOF
+  > def reposetup(ui, repo):
+  >     repo.ui.pushbuffer(subproc=True)
+  > EOF
+
+  $ chg init editor
+  $ cd editor
+
+by default, system() should be redirected to the client:
+
+  $ touch foo
+  $ CHGDEBUG= HGEDITOR=cat chg ci -Am channeled --edit 2>&1 \
+  > | egrep "HG:|run 'cat"
+  chg: debug: run 'cat "*"' at '$TESTTMP/editor' (glob)
+  HG: Enter commit message.  Lines beginning with 'HG:' are removed.
+  HG: Leave message empty to abort commit.
+  HG: --
+  HG: user: test
+  HG: branch 'default'
+  HG: added foo
+
+but no redirection should be made if output is captured:
+
+  $ touch bar
+  $ CHGDEBUG= HGEDITOR=cat chg ci -Am bufferred --edit \
+  > --config extensions.pushbuffer="$TESTTMP/pushbuffer.py" 2>&1 \
+  > | egrep "HG:|run 'cat"
+  [1]
+
+check that commit commands succeeded:
+
+  $ hg log -T '{rev}:{desc}\n'
+  1:bufferred
+  0:channeled
+
+  $ cd ..
+
 pager
 -----
 
@@ -62,6 +102,37 @@
   $ chg log -l1 -q --config ui.formatted=False
   0:1f7b0de80e11
 
+chg waits for pager if runcommand raises
+
+  $ cat > $TESTTMP/crash.py <<EOF
+  > from mercurial import cmdutil
+  > cmdtable = {}
+  > command = cmdutil.command(cmdtable)
+  > @command('crash')
+  > def pagercrash(ui, repo, *pats, **opts):
+  >     ui.write('going to crash\n')
+  >     raise Exception('.')
+  > EOF
+
+  $ cat > $TESTTMP/fakepager.py <<EOF
+  > import sys, time
+  > for line in iter(sys.stdin.readline, ''):
+  >     if 'crash' in line: # only interested in lines containing 'crash'
+  >         # if chg exits when pager is sleeping (incorrectly), the output
+  >         # will be captured by the next test case
+  >         time.sleep(1)
+  >         sys.stdout.write('crash-pager: %s' % line)
+  > EOF
+
+  $ cat >> .hg/hgrc <<EOF
+  > [extensions]
+  > crash = $TESTTMP/crash.py
+  > EOF
+
+  $ chg crash --pager=on --config ui.formatted=True 2>/dev/null
+  crash-pager: going to crash
+  [255]
+
   $ cd ..
 
 server lifecycle
--- a/tests/test-clone-uncompressed.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-clone-uncompressed.t	Tue Apr 18 12:24:34 2017 -0400
@@ -60,12 +60,12 @@
 
   $ cat > delayer.py <<EOF
   > import time
-  > from mercurial import extensions, scmutil
+  > from mercurial import extensions, vfs
   > def __call__(orig, self, path, *args, **kwargs):
   >     if path == 'data/f1.i':
   >         time.sleep(2)
   >     return orig(self, path, *args, **kwargs)
-  > extensions.wrapfunction(scmutil.vfs, '__call__', __call__)
+  > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
   > EOF
 
 prepare repo with small and big file to cover both code paths in emitrevlogdata
--- a/tests/test-clone.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-clone.t	Tue Apr 18 12:24:34 2017 -0400
@@ -31,10 +31,10 @@
   default                       10:a7949464abda
   $ ls .hg/cache
   branch2-served
-  checkisexec
-  checklink
-  checklink-target
-  checknoexec
+  checkisexec (execbit !)
+  checklink (symlink !)
+  checklink-target (symlink !)
+  checknoexec (execbit !)
   rbc-names-v1
   rbc-revs-v1
 
@@ -49,9 +49,9 @@
 
   $ ls .hg/cache
   branch2-served
-  checkisexec
-  checklink
-  checklink-target
+  checkisexec (execbit !)
+  checklink (symlink !)
+  checklink-target (symlink !)
 
   $ cat a
   a
@@ -579,11 +579,11 @@
 No remote source
 
 #if windows
-  $ hg clone http://127.0.0.1:3121/a b
+  $ hg clone http://$LOCALIP:3121/a b
   abort: error: * (glob)
   [255]
 #else
-  $ hg clone http://127.0.0.1:3121/a b
+  $ hg clone http://$LOCALIP:3121/a b
   abort: error: *refused* (glob)
   [255]
 #endif
--- a/tests/test-command-template.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-command-template.t	Tue Apr 18 12:24:34 2017 -0400
@@ -134,6 +134,28 @@
     ('string', '\n'))
   -3
 
+Keyword arguments:
+
+  $ hg debugtemplate -r0 -v '{foo=bar|baz}'
+  (template
+    (keyvalue
+      ('symbol', 'foo')
+      (|
+        ('symbol', 'bar')
+        ('symbol', 'baz'))))
+  hg: parse error: can't use a key-value pair in this context
+  [255]
+
+  $ hg debugtemplate '{pad("foo", width=10, left=true)}\n'
+         foo
+
+Call function which takes named arguments by filter syntax:
+
+  $ hg debugtemplate '{" "|separate}'
+  $ hg debugtemplate '{("not", "an", "argument", "list")|separate}'
+  hg: parse error: unknown method 'list'
+  [255]
+
 Second branch starting at nullrev:
 
   $ hg update null
@@ -1108,11 +1130,11 @@
 
   $ hg log --style notexist
   abort: style 'notexist' not found
-  (available styles: bisect, changelog, compact, default, phases, status, xml)
+  (available styles: bisect, changelog, compact, default, phases, show, status, xml)
   [255]
 
   $ hg log -T list
-  available styles: bisect, changelog, compact, default, phases, status, xml
+  available styles: bisect, changelog, compact, default, phases, show, status, xml
   abort: specify a template
   [255]
 
@@ -2662,13 +2684,21 @@
   hg: parse error: date expects a date information
   [255]
 
+  $ hg tip -T '{author|email|shortdate}\n'
+  abort: template filter 'shortdate' is not compatible with keyword 'author'
+  [255]
+
+  $ hg tip -T '{get(extras, "branch")|shortdate}\n'
+  abort: incompatible use of template filter 'shortdate'
+  [255]
+
 Error in nested template:
 
   $ hg log -T '{"date'
   hg: parse error at 2: unterminated string
   [255]
 
-  $ hg log -T '{"foo{date|=}"}'
+  $ hg log -T '{"foo{date|?}"}'
   hg: parse error at 11: syntax error
   [255]
 
@@ -2683,6 +2713,16 @@
   $ hg log -l 1 --template '{if(author, author)|user}\n'
   test
 
+Test index keyword:
+
+  $ hg log -l 2 -T '{index + 10}{files % " {index}:{file}"}\n'
+  10 0:a 1:b 2:fifth 3:fourth 4:third
+  11 0:a
+
+  $ hg branches -T '{index} {branch}\n'
+  0 default
+  1 foo
+
 Test diff function:
 
   $ hg diff -c 8
@@ -3348,6 +3388,18 @@
   $ hg log --color=always -l 1 --template '{label(red, "text\n")}'
   \x1b[0;31mtext\x1b[0m (esc)
 
+color effects can be nested (issue5413)
+
+  $ hg debugtemplate --color=always \
+  > '{label(red, "red{label(magenta, "ma{label(cyan, "cyan")}{label(yellow, "yellow")}genta")}")}\n'
+  \x1b[0;31mred\x1b[0;35mma\x1b[0;36mcyan\x1b[0m\x1b[0;31m\x1b[0;35m\x1b[0;33myellow\x1b[0m\x1b[0;31m\x1b[0;35mgenta\x1b[0m (esc)
+
+pad() should interact well with color codes (issue5416)
+
+  $ hg debugtemplate --color=always \
+  > '{pad(label(red, "red"), 5, label(cyan, "-"))}\n'
+  \x1b[0;31mred\x1b[0m\x1b[0;36m-\x1b[0m\x1b[0;36m-\x1b[0m (esc)
+
 label should be no-op if color is disabled:
 
   $ hg log --color=never -l 1 --template '{label(red, "text\n")}'
@@ -3360,6 +3412,37 @@
   $ hg log -r 0 --template '{if(branches, "yes", "no")}\n'
   no
 
+Test dict constructor:
+
+  $ hg log -r 0 -T '{dict(y=node|short, x=rev)}\n'
+  y=f7769ec2ab97 x=0
+  $ hg log -r 0 -T '{dict(x=rev, y=node|short) % "{key}={value}\n"}'
+  x=0
+  y=f7769ec2ab97
+  $ hg log -r 0 -T '{dict(x=rev, y=node|short)|json}\n'
+  {"x": 0, "y": "f7769ec2ab97"}
+  $ hg log -r 0 -T '{dict()|json}\n'
+  {}
+
+  $ hg log -r 0 -T '{dict(rev, node=node|short)}\n'
+  rev=0 node=f7769ec2ab97
+  $ hg log -r 0 -T '{dict(rev, node|short)}\n'
+  rev=0 node=f7769ec2ab97
+
+  $ hg log -r 0 -T '{dict(rev, rev=rev)}\n'
+  hg: parse error: duplicated dict key 'rev' inferred
+  [255]
+  $ hg log -r 0 -T '{dict(node, node|short)}\n'
+  hg: parse error: duplicated dict key 'node' inferred
+  [255]
+  $ hg log -r 0 -T '{dict(1 + 2)}'
+  hg: parse error: dict key cannot be inferred
+  [255]
+
+  $ hg log -r 0 -T '{dict(x=rev, x=node)}'
+  hg: parse error: dict got multiple values for keyword argument 'x'
+  [255]
+
 Test get function:
 
   $ hg log -r 0 --template '{get(extras, "branch")}\n'
@@ -3370,6 +3453,13 @@
   hg: parse error: get() expects a dict as first argument
   [255]
 
+Test json filter applied to hybrid object:
+
+  $ hg log -r0 -T '{files|json}\n'
+  ["a"]
+  $ hg log -r0 -T '{extras|json}\n'
+  {"branch": "default"}
+
 Test localdate(date, tz) function:
 
   $ TZ=JST-09 hg log -r0 -T '{date|localdate|isodate}\n'
@@ -3515,6 +3605,15 @@
   hg: parse error: pad() expects an integer width
   [255]
 
+Test invalid fillchar passed to pad function
+
+  $ hg log -r 0 -T '{pad(rev, 10, "")}\n'
+  hg: parse error: pad() expects a single fill character
+  [255]
+  $ hg log -r 0 -T '{pad(rev, 10, "--")}\n'
+  hg: parse error: pad() expects a single fill character
+  [255]
+
 Test boolean argument passed to pad function
 
  no crash
@@ -3795,6 +3894,11 @@
   o  foo line 1
      foo line 2
 
+  $ hg log -R a -r0 -T '{desc|splitlines}\n'
+  line 1 line 2
+  $ hg log -R a -r0 -T '{join(desc|splitlines, "|")}\n'
+  line 1|line 2
+
 Test startswith
   $ hg log -Gv -R a --template "{startswith(desc)}"
   hg: parse error: startswith expects two arguments
@@ -4100,6 +4204,11 @@
   abort: template filter 'utf8' is not compatible with keyword 'rev'
   [255]
 
+pad width:
+
+  $ HGENCODING=utf-8 hg debugtemplate "{pad('`cat utf-8`', 2, '-')}\n"
+  \xc3\xa9- (esc)
+
   $ cd ..
 
 Test that template function in extension is registered as expected
--- a/tests/test-commandserver.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-commandserver.t	Tue Apr 18 12:24:34 2017 -0400
@@ -199,6 +199,8 @@
   ui.usehttp2=true (?)
   ui.foo=bar
   ui.nontty=true
+  web.address=localhost
+  web\.ipv6=(?:True|False) (re)
   *** runcommand init foo
   *** runcommand -R foo showconfig ui defaults
   defaults.backout=-d "0 0"
--- a/tests/test-commit.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-commit.t	Tue Apr 18 12:24:34 2017 -0400
@@ -387,12 +387,12 @@
 
   $ cat >> .hg/hgrc <<EOF
   > [committemplate]
-  > changeset.commit.normal = HG: this is "commit.normal" template
+  > changeset.commit.normal = 'HG: this is "commit.normal" template
   >     HG: {extramsg}
   >     {if(activebookmark,
   >    "HG: bookmark '{activebookmark}' is activated\n",
   >    "HG: no bookmark is activated\n")}{subrepos %
-  >    "HG: subrepo '{subrepo}' is changed\n"}
+  >    "HG: subrepo '{subrepo}' is changed\n"}'
   > 
   > changeset.commit = HG: this is "commit" template
   >     HG: {extramsg}
--- a/tests/test-completion.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-completion.t	Tue Apr 18 12:24:34 2017 -0400
@@ -73,6 +73,7 @@
   debugbuilddag
   debugbundle
   debugcheckstate
+  debugcolor
   debugcommands
   debugcomplete
   debugconfig
@@ -129,6 +130,7 @@
 
 Show the global options
   $ hg debugcomplete --options | sort
+  --color
   --config
   --cwd
   --debug
@@ -138,6 +140,7 @@
   --help
   --hidden
   --noninteractive
+  --pager
   --profile
   --quiet
   --repository
@@ -157,6 +160,7 @@
   --address
   --certificate
   --cmdserver
+  --color
   --config
   --cwd
   --daemon
@@ -171,6 +175,7 @@
   --ipv6
   --name
   --noninteractive
+  --pager
   --pid-file
   --port
   --prefix
@@ -179,6 +184,7 @@
   --repository
   --stdio
   --style
+  --subrepos
   --templates
   --time
   --traceback
@@ -189,6 +195,7 @@
   -A
   -E
   -R
+  -S
   -a
   -d
   -h
@@ -211,8 +218,8 @@
   annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, ignore-all-space, ignore-space-change, ignore-blank-lines, include, exclude, template
   clone: noupdate, updaterev, rev, branch, pull, uncompressed, ssh, remotecmd, insecure
   commit: addremove, close-branch, amend, secret, edit, interactive, include, exclude, message, logfile, date, user, subrepos
-  diff: rev, change, text, git, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, unified, stat, root, include, exclude, subrepos
-  export: output, switch-parent, rev, text, git, nodates
+  diff: rev, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, unified, stat, root, include, exclude, subrepos
+  export: output, switch-parent, rev, text, git, binary, nodates
   forget: include, exclude
   init: ssh, remotecmd, insecure
   log: follow, follow-first, date, copies, keyword, rev, removed, only-merges, user, only-branch, branch, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
@@ -220,10 +227,10 @@
   pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure
   push: force, rev, bookmark, branch, new-branch, ssh, remotecmd, insecure
   remove: after, force, subrepos, include, exclude
-  serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate
+  serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, subrepos
   status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, copies, print0, rev, change, include, exclude, subrepos, template
   summary: remote
-  update: clean, check, date, rev, tool
+  update: clean, check, merge, date, rev, tool
   addremove: similarity, subrepos, include, exclude, dry-run
   archive: no-decode, prefix, rev, type, subrepos, include, exclude
   backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
@@ -240,6 +247,7 @@
   debugbuilddag: mergeable-file, overwritten-file, new-file
   debugbundle: all, spec
   debugcheckstate: 
+  debugcolor: style
   debugcommands: 
   debugcomplete: options
   debugcreatestreamclonebundle: 
@@ -352,3 +360,18 @@
   fee
   fie
   fo
+
+Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
+used for completions in some shells.
+
+  $ hg debuglabelcomplete
+  Fum
+  default
+  fee
+  fie
+  fo
+  tip
+  $ hg debuglabelcomplete f
+  fee
+  fie
+  fo
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-config-env.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,49 @@
+# Test the config layer generated by environment variables
+
+from __future__ import absolute_import, print_function
+
+import os
+
+from mercurial import (
+    encoding,
+    rcutil,
+    ui as uimod,
+    util,
+)
+
+testtmp = encoding.environ['TESTTMP']
+
+# prepare hgrc files
+def join(name):
+    return os.path.join(testtmp, name)
+
+with open(join('sysrc'), 'w') as f:
+    f.write('[ui]\neditor=e0\n[pager]\npager=p0\n')
+
+with open(join('userrc'), 'w') as f:
+    f.write('[ui]\neditor=e1')
+
+# replace rcpath functions so they point to the files above
+def systemrcpath():
+    return [join('sysrc')]
+
+def userrcpath():
+    return [join('userrc')]
+
+rcutil.systemrcpath = systemrcpath
+rcutil.userrcpath = userrcpath
+os.path.isdir = lambda x: False # hack: do not load default.d/*.rc
+
+# utility to print configs
+def printconfigs(env):
+    encoding.environ = env
+    rcutil._rccomponents = None # reset cache
+    ui = uimod.ui.load()
+    for section, name, value in ui.walkconfig():
+        source = ui.configsource(section, name)
+        print('%s.%s=%s # %s' % (section, name, value, util.pconvert(source)))
+    print('')
+
+# environment variable overrides
+printconfigs({})
+printconfigs({'EDITOR': 'e2', 'PAGER': 'p2'})
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-config-env.py.out	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,6 @@
+pager.pager=p0 # $TESTTMP/sysrc:4
+ui.editor=e1 # $TESTTMP/userrc:2
+
+pager.pager=p2 # $PAGER
+ui.editor=e1 # $TESTTMP/userrc:2
+
--- a/tests/test-config.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-config.t	Tue Apr 18 12:24:34 2017 -0400
@@ -58,12 +58,12 @@
   [
    {
     "name": "Section.KeY",
-    "source": "*.hgrc:16", (glob)
+    "source": "*.hgrc:*", (glob)
     "value": "Case Sensitive"
    },
    {
     "name": "Section.key",
-    "source": "*.hgrc:17", (glob)
+    "source": "*.hgrc:*", (glob)
     "value": "lower case"
    }
   ]
@@ -71,7 +71,7 @@
   [
    {
     "name": "Section.KeY",
-    "source": "*.hgrc:16", (glob)
+    "source": "*.hgrc:*", (glob)
     "value": "Case Sensitive"
    }
   ]
@@ -158,3 +158,23 @@
   $ hg showconfig paths
   paths.foo:suboption=~/foo
   paths.foo=$TESTTMP/foo
+
+edit failure
+
+  $ HGEDITOR=false hg config --edit
+  abort: edit failed: false exited with status 1
+  [255]
+
+config affected by environment variables
+
+  $ EDITOR=e1 VISUAL=e2 hg config --debug | grep 'ui\.editor'
+  $VISUAL: ui.editor=e2
+
+  $ VISUAL=e2 hg config --debug --config ui.editor=e3 | grep 'ui\.editor'
+  --config: ui.editor=e3
+
+  $ PAGER=p1 hg config --debug | grep 'pager\.pager'
+  $PAGER: pager.pager=p1
+
+  $ PAGER=p1 hg config --debug --config pager.pager=p2 | grep 'pager\.pager'
+  --config: pager.pager=p2
--- a/tests/test-conflict.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-conflict.t	Tue Apr 18 12:24:34 2017 -0400
@@ -65,7 +65,10 @@
 Verify custom conflict markers
 
   $ hg up -q --clean .
-  $ printf "\n[ui]\nmergemarkertemplate={author} {rev}\n" >> .hg/hgrc
+  $ cat <<EOF >> .hg/hgrc
+  > [ui]
+  > mergemarkertemplate = '{author} {rev}'
+  > EOF
 
   $ hg merge 1
   merging a
--- a/tests/test-context.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-context.py	Tue Apr 18 12:24:34 2017 -0400
@@ -59,7 +59,7 @@
 # test performing a diff on a memctx
 
 for d in ctxb.diff(ctxa, git=True):
-    print(d)
+    print(d, end='')
 
 # test safeness and correctness of "ctx.status()"
 print('= checking context.status():')
--- a/tests/test-context.py.out	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-context.py.out	Tue Apr 18 12:24:34 2017 -0400
@@ -4,13 +4,11 @@
 UTF-8   : Grüezi!
 <status modified=['foo'], added=[], removed=[], deleted=[], unknown=[], ignored=[], clean=[]>
 diff --git a/foo b/foo
-
 --- a/foo
 +++ b/foo
 @@ -1,1 +1,2 @@
  foo
 +bar
-
 = checking context.status():
 == checking workingctx.status:
 wctx._status=<status modified=['bar-m'], added=['bar-a'], removed=['bar-r'], deleted=[], unknown=[], ignored=[], clean=[]>
--- a/tests/test-contrib-perf.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-contrib-perf.t	Tue Apr 18 12:24:34 2017 -0400
@@ -109,6 +109,7 @@
    perfvolatilesets
                  benchmark the computation of various volatile set
    perfwalk      (no help text available)
+   perfwrite     microbenchmark ui.write
   
   (use 'hg help -v perfstatusext' to show built-in aliases and global options)
   $ hg perfaddremove
@@ -164,3 +165,7 @@
   $ (hg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
   >  hg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
   > "$TESTDIR"/check-perf-code.py contrib/perf.py
+  contrib/perf.py:869:
+   >             r.revision(r.node(x))
+   don't convert rev to node before passing to revision(nodeorrev)
+  [1]
--- a/tests/test-convert-git.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-convert-git.t	Tue Apr 18 12:24:34 2017 -0400
@@ -330,7 +330,7 @@
 
 input validation
   $ hg convert --config convert.git.similarity=foo --datesort git-repo2 fullrepo
-  abort: convert.git.similarity is not an integer ('foo')
+  abort: convert.git.similarity is not a valid integer ('foo')
   [255]
   $ hg convert --config convert.git.similarity=-1 --datesort git-repo2 fullrepo
   abort: similarity must be between 0 and 100
--- a/tests/test-convert-p4.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-convert-p4.t	Tue Apr 18 12:24:34 2017 -0400
@@ -141,5 +141,23 @@
   rev=1 desc="change a" tags="" files="a"
   rev=0 desc="initial" tags="" files="a b/c"
 
+empty commit message
+  $ p4 edit a
+  //depot/test-mercurial-import/a#3 - opened for edit
+  $ echo aaaaa >> a
+  $ p4 submit -d ""
+  Submitting change 6.
+  Locking 1 files ...
+  edit //depot/test-mercurial-import/a#4
+  Change 6 submitted.
+  $ hg convert -s p4 $DEPOTPATH dst
+  scanning source...
+  reading p4 views
+  collecting p4 changelists
+  6 **empty changelist description**
+  sorting...
+  converting...
+  0 
+
 exit trap:
   stopping the p4 server
--- a/tests/test-debugcommands.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-debugcommands.t	Tue Apr 18 12:24:34 2017 -0400
@@ -116,18 +116,23 @@
   $ cat > debugstacktrace.py << EOF
   > from mercurial.util import debugstacktrace, dst, sys
   > def f():
-  >     dst('hello world')
+  >     debugstacktrace(f=sys.stdout)
+  >     g()
   > def g():
-  >     f()
-  >     debugstacktrace(skip=-5, f=sys.stdout)
-  > g()
+  >     dst('hello from g\\n', skip=1)
+  >     h()
+  > def h():
+  >     dst('hi ...\\nfrom h hidden in g', 1, depth=2)
+  > f()
   > EOF
   $ python debugstacktrace.py
-  hello world at:
-   debugstacktrace.py:7 in * (glob)
-   debugstacktrace.py:5 in g
-   debugstacktrace.py:3 in f
   stacktrace at:
-   debugstacktrace.py:7 *in * (glob)
-   debugstacktrace.py:6 *in g (glob)
-   */util.py:* in debugstacktrace (glob)
+   debugstacktrace.py:10 in * (glob)
+   debugstacktrace.py:3  in f
+  hello from g at:
+   debugstacktrace.py:10 in * (glob)
+   debugstacktrace.py:4  in f
+  hi ...
+  from h hidden in g at:
+   debugstacktrace.py:4 in f
+   debugstacktrace.py:7 in g
--- a/tests/test-devel-warnings.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-devel-warnings.t	Tue Apr 18 12:24:34 2017 -0400
@@ -3,7 +3,7 @@
   > """A small extension that tests our developer warnings
   > """
   > 
-  > from mercurial import cmdutil, repair, revset
+  > from mercurial import cmdutil, repair, util
   > 
   > cmdtable = {}
   > command = cmdutil.command(cmdtable)
@@ -58,11 +58,9 @@
   >     def foobar(ui):
   >         ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337')
   >     foobar(ui)
-  > 
-  > def oldstylerevset(repo, subset, x):
-  >     return list(subset)
-  > 
-  > revset.symbols['oldstyle'] = oldstylerevset
+  > @command('nouiwarning', [], '')
+  > def nouiwarning(ui, repo):
+  >     util.nouideprecwarn('this is a test', '13.37')
   > EOF
 
   $ cat << EOF >> $HGRCPATH
@@ -91,7 +89,7 @@
    */mercurial/dispatch.py:* in run (glob)
    */mercurial/dispatch.py:* in dispatch (glob)
    */mercurial/dispatch.py:* in _runcatch (glob)
-   */mercurial/dispatch.py:* in callcatch (glob)
+   */mercurial/dispatch.py:* in _callcatch (glob)
    */mercurial/scmutil.py* in callcatch (glob)
    */mercurial/dispatch.py:* in _runcatchfunc (glob)
    */mercurial/dispatch.py:* in _dispatch (glob)
@@ -106,16 +104,11 @@
   $ echo a > a
   $ hg add a
   $ hg commit -m a
-  $ hg stripintr
+  $ hg stripintr 2>&1 | egrep -v '^(\*\*|  )'
   saved backup bundle to $TESTTMP/lock-checker/.hg/strip-backup/*-backup.hg (glob)
-  abort: programming error: cannot strip from inside a transaction
-  (contact your extension maintainer)
-  [255]
+  Traceback (most recent call last):
+  mercurial.error.ProgrammingError: cannot strip from inside a transaction
 
-  $ hg log -r "oldstyle()" -T '{rev}\n'
-  devel-warn: revset "oldstyle" uses list instead of smartset
-  (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
-  0
   $ hg oldanddeprecated
   devel-warn: foorbar is deprecated, go shopping
   (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
@@ -127,7 +120,7 @@
    */mercurial/dispatch.py:* in run (glob)
    */mercurial/dispatch.py:* in dispatch (glob)
    */mercurial/dispatch.py:* in _runcatch (glob)
-   */mercurial/dispatch.py:* in callcatch (glob)
+   */mercurial/dispatch.py:* in _callcatch (glob)
    */mercurial/scmutil.py* in callcatch (glob)
    */mercurial/dispatch.py:* in _runcatchfunc (glob)
    */mercurial/dispatch.py:* in _dispatch (glob)
@@ -136,10 +129,7 @@
    */mercurial/dispatch.py:* in <lambda> (glob)
    */mercurial/util.py:* in check (glob)
    $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
-  $ hg blackbox -l 9
-  1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: revset "oldstyle" uses list instead of smartset
-  (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
-  1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> log -r oldstyle() -T {rev}\n exited 0 after * seconds (glob)
+  $ hg blackbox -l 7
   1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
   1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
   (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
@@ -151,7 +141,7 @@
    */mercurial/dispatch.py:* in run (glob)
    */mercurial/dispatch.py:* in dispatch (glob)
    */mercurial/dispatch.py:* in _runcatch (glob)
-   */mercurial/dispatch.py:* in callcatch (glob)
+   */mercurial/dispatch.py:* in _callcatch (glob)
    */mercurial/scmutil.py* in callcatch (glob)
    */mercurial/dispatch.py:* in _runcatchfunc (glob)
    */mercurial/dispatch.py:* in _dispatch (glob)
@@ -161,7 +151,7 @@
    */mercurial/util.py:* in check (glob)
    $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
   1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
-  1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 9
+  1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 7
 
 Test programming error failure:
 
@@ -176,4 +166,15 @@
   Traceback (most recent call last):
   mercurial.error.ProgrammingError: transaction requires locking
 
+Old style deprecation warning
+
+  $ hg nouiwarning
+  $TESTTMP/buggylocking.py:61: DeprecationWarning: this is a test
+  (compatibility will be dropped after Mercurial-13.37, update your code.)
+    util.nouideprecwarn('this is a test', '13.37')
+
+(disabled outside of test run)
+
+  $ HGEMITWARNINGS= hg nouiwarning
+
   $ cd ..
--- a/tests/test-diff-binary-file.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-diff-binary-file.t	Tue Apr 18 12:24:34 2017 -0400
@@ -73,5 +73,72 @@
   literal 5
   Mc$_OqttjCF00uV!&;S4c
   
+  $ cd ..
+
+Test text mode with extended git-style diff format
+  $ hg init b
+  $ cd b
+  $ cat > writebin.py <<EOF
+  > import sys
+  > path = sys.argv[1]
+  > open(path, 'wb').write('\x00\x01\x02\x03')
+  > EOF
+  $ python writebin.py binfile.bin
+  $ hg add binfile.bin
+  $ hg ci -m 'add binfile.bin'
+
+  $ echo >> binfile.bin
+  $ hg ci -m 'change binfile.bin'
+
+  $ hg diff --git -a -r 0 -r 1
+  diff --git a/binfile.bin b/binfile.bin
+  --- a/binfile.bin
+  +++ b/binfile.bin
+  @@ -1,1 +1,1 @@
+  -\x00\x01\x02\x03 (esc)
+  \ No newline at end of file
+  +\x00\x01\x02\x03 (esc)
+
+  $ HGPLAIN=1 hg diff --git -a -r 0 -r 1
+  diff --git a/binfile.bin b/binfile.bin
+  --- a/binfile.bin
+  +++ b/binfile.bin
+  @@ -1,1 +1,1 @@
+  -\x00\x01\x02\x03 (esc)
+  \ No newline at end of file
+  +\x00\x01\x02\x03 (esc)
+
+Test binary mode with extended git-style diff format
+  $ hg diff --no-binary -r 0 -r 1
+  diff -r fb45f71337ad -r 9ca112d1a3c1 binfile.bin
+  Binary file binfile.bin has changed
+
+  $ hg diff --git --no-binary -r 0 -r 1
+  diff --git a/binfile.bin b/binfile.bin
+  Binary file binfile.bin has changed
+
+  $ hg diff --git --binary -r 0 -r 1
+  diff --git a/binfile.bin b/binfile.bin
+  index eaf36c1daccfdf325514461cd1a2ffbc139b5464..ba71a782e93f3fb63a428383706065e3ec2828e9
+  GIT binary patch
+  literal 5
+  Mc${NkWMbw50018V5dZ)H
+  
+  $ hg diff --git --binary --config diff.nobinary=True -r 0 -r 1
+  diff --git a/binfile.bin b/binfile.bin
+  index eaf36c1daccfdf325514461cd1a2ffbc139b5464..ba71a782e93f3fb63a428383706065e3ec2828e9
+  GIT binary patch
+  literal 5
+  Mc${NkWMbw50018V5dZ)H
+  
+
+  $ hg diff --git --binary --text -r 0 -r 1
+  diff --git a/binfile.bin b/binfile.bin
+  --- a/binfile.bin
+  +++ b/binfile.bin
+  @@ -1,1 +1,1 @@
+  -\x00\x01\x02\x03 (esc)
+  \ No newline at end of file
+  +\x00\x01\x02\x03 (esc)
 
   $ cd ..
--- a/tests/test-diff-color.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-diff-color.t	Tue Apr 18 12:24:34 2017 -0400
@@ -1,10 +1,10 @@
 Setup
 
   $ cat <<EOF >> $HGRCPATH
+  > [ui]
+  > color = always
   > [color]
   > mode = ansi
-  > [extensions]
-  > color =
   > EOF
   $ hg init repo
   $ cd repo
@@ -35,7 +35,7 @@
 
 default context
 
-  $ hg diff --nodates --color=always
+  $ hg diff --nodates
   \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc)
   \x1b[0;31;1m--- a/a\x1b[0m (esc)
   \x1b[0;32;1m+++ b/a\x1b[0m (esc)
@@ -51,7 +51,7 @@
 
 --unified=2
 
-  $ hg diff --nodates -U 2  --color=always
+  $ hg diff --nodates -U 2
   \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc)
   \x1b[0;31;1m--- a/a\x1b[0m (esc)
   \x1b[0;32;1m+++ b/a\x1b[0m (esc)
@@ -65,10 +65,11 @@
 
 diffstat
 
-  $ hg diff --stat --color=always
+  $ hg diff --stat
    a |  2 \x1b[0;32m+\x1b[0m\x1b[0;31m-\x1b[0m (esc)
    1 files changed, 1 insertions(+), 1 deletions(-)
   $ cat <<EOF >> $HGRCPATH
+  > [extensions]
   > record =
   > [ui]
   > interactive = true
@@ -81,7 +82,7 @@
 record
 
   $ chmod +x a
-  $ hg record --color=always -m moda a <<EOF
+  $ hg record -m moda a <<EOF
   > y
   > y
   > EOF
@@ -111,7 +112,7 @@
 
 qrecord
 
-  $ hg qrecord --color=always -m moda patch <<EOF
+  $ hg qrecord -m moda patch <<EOF
   > y
   > y
   > EOF
@@ -151,7 +152,7 @@
   $ echo aa >> a
   $ echo bb >> sub/b
 
-  $ hg diff --color=always -S
+  $ hg diff -S
   \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
   \x1b[0;31;1m--- a/a\x1b[0m (esc)
   \x1b[0;32;1m+++ b/a\x1b[0m (esc)
@@ -176,7 +177,7 @@
   > mid	tab
   > 	all		tabs	
   > EOF
-  $ hg diff --nodates --color=always
+  $ hg diff --nodates
   \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
   \x1b[0;31;1m--- a/a\x1b[0m (esc)
   \x1b[0;32;1m+++ b/a\x1b[0m (esc)
@@ -192,7 +193,7 @@
   \x1b[0;32m+\x1b[0m	\x1b[0;32mall\x1b[0m		\x1b[0;32mtabs\x1b[0m\x1b[0;1;41m	\x1b[0m (esc)
   $ echo "[color]" >> $HGRCPATH
   $ echo "diff.tab = bold magenta" >> $HGRCPATH
-  $ hg diff --nodates --color=always
+  $ hg diff --nodates
   \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
   \x1b[0;31;1m--- a/a\x1b[0m (esc)
   \x1b[0;32;1m+++ b/a\x1b[0m (esc)
--- a/tests/test-doctest.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-doctest.py	Tue Apr 18 12:24:34 2017 -0400
@@ -5,10 +5,16 @@
 import doctest
 import os
 import sys
+
+ispy3 = (sys.version_info[0] >= 3)
+
 if 'TERM' in os.environ:
     del os.environ['TERM']
 
-def testmod(name, optionflags=0, testtarget=None):
+# TODO: migrate doctests to py3 and enable them on both versions
+def testmod(name, optionflags=0, testtarget=None, py2=True, py3=False):
+    if not (not ispy3 and py2 or ispy3 and py3):
+        return
     __import__(name)
     mod = sys.modules[name]
     if testtarget is not None:
@@ -17,6 +23,8 @@
 
 testmod('mercurial.changegroup')
 testmod('mercurial.changelog')
+testmod('mercurial.color')
+testmod('mercurial.config')
 testmod('mercurial.dagparser', optionflags=doctest.NORMALIZE_WHITESPACE)
 testmod('mercurial.dispatch')
 testmod('mercurial.encoding')
@@ -24,11 +32,14 @@
 testmod('mercurial.hg')
 testmod('mercurial.hgweb.hgwebdir_mod')
 testmod('mercurial.match')
+testmod('mercurial.mdiff')
 testmod('mercurial.minirst')
 testmod('mercurial.patch')
 testmod('mercurial.pathutil')
 testmod('mercurial.parser')
-testmod('mercurial.revset')
+testmod('mercurial.pycompat', py3=True)
+testmod('mercurial.revsetlang')
+testmod('mercurial.smartset')
 testmod('mercurial.store')
 testmod('mercurial.subrepo')
 testmod('mercurial.templatefilters')
--- a/tests/test-eol.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-eol.t	Tue Apr 18 12:24:34 2017 -0400
@@ -470,6 +470,22 @@
   > EOF
   $ hg commit -m 'consistent'
 
+  $ hg init subrepo
+  $ hg -R subrepo pull -qu .
+  $ echo "subrepo = subrepo" > .hgsub
+  $ hg ci -Am "add subrepo"
+  adding .hgeol
+  adding .hgsub
+  $ hg archive -S ../archive
+  $ find ../archive/* | sort
+  ../archive/a.txt
+  ../archive/subrepo
+  ../archive/subrepo/a.txt
+  $ cat ../archive/a.txt ../archive/subrepo/a.txt
+  first\r (esc)
+  second\r (esc)
+  first\r (esc)
+  second\r (esc)
 
 Test trailing newline
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-A1.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,295 @@
+============================================
+Testing obsolescence markers push: Cases A.1
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category A: simple cases
+TestCase 1: pushing a single head
+Subcases:
+# A.1.1 pushing a single head (2 variants)
+# A.1.2 pushing multiple changesets into a single head (2 variants)
+
+Case: A.1.1 pushing a single head
+=================================
+..
+.. {{{
+..     ⇠◔ A
+..      |
+..      ● O
+.. }}}
+..
+.. Marker exists from:
+..
+..  * A
+..
+.. Commands run:
+..
+..  * hg push -r A
+..  * hg push
+..
+.. Expected exchange:
+..
+..  * chain from A
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos A.1.1
+  creating test repo for test case A.1.1
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'`
+  $ hg log -G
+  @  f5bc6836db60 (draft): A
+  |
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+setup both variants
+
+  $ cp -R A.1.1 A.1.1.a
+  $ cp -R A.1.1 A.1.1.b
+
+Variant a: push -r A
+--------------------
+
+  $ dotest A.1.1.a A
+  ## Running testcase A.1.1.a
+  # testing echange of "A" (f5bc6836db60)
+  ## initial state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "A" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "f5bc6836db60" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  1 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+Variant b: push
+---------------
+
+  $ dotest A.1.1.b
+  ## Running testcase A.1.1.b
+  ## initial state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  1 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+A.1.2 pushing multiple changesets into a single head
+====================================================
+
+.. {{{
+..      ◔ B
+..      |
+..     ⇠◔ A
+..      |
+..      ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * A
+..
+.. Command run:
+..
+..  * hg push -r B
+..  * hg push
+..
+.. Expected exchange:
+..
+..  * chain from A
+
+Setup
+-----
+
+initial
+
+  $ setuprepos A.1.2
+  creating test repo for test case A.1.2
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ mkcommit B
+  $ hg log -G
+  @  f6fbb35d8ac9 (draft): B
+  |
+  o  f5bc6836db60 (draft): A
+  |
+  o  a9bdc8b26820 (public): O
+  
+  $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'`
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+setup both variants
+
+  $ cp -R A.1.2 A.1.2.a
+  $ cp -R A.1.2 A.1.2.b
+
+Variant a: push -r A
+--------------------
+
+  $ dotest A.1.2.a B
+  ## Running testcase A.1.2.a
+  # testing echange of "B" (f6fbb35d8ac9)
+  ## initial state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "B" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 2 changesets with 2 changes to 2 files
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "f6fbb35d8ac9" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files
+  1 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+Variant b: push
+---------------
+
+  $ dotest A.1.2.b
+  ## Running testcase A.1.2.b
+  ## initial state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 2 changesets with 2 changes to 2 files
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files
+  1 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-A2.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,124 @@
+============================================
+Testing obsolescence markers push: Cases A.2
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category A: simple cases
+TestCase 2: Two heads, only one of them pushed
+
+A.2 Two heads, only on of then pushed
+=====================================
+
+.. {{{
+..     ⇠○ B
+..   ⇠◔ | A
+..    |/
+..    ● O
+.. }}}
+..
+.. Markers exist from:
+..
+..  * A
+..  * B
+..
+..
+.. Command runs:
+..
+..  * hg push -r A
+..
+.. Expected exchange:
+..
+..  * chain from A
+..
+.. Expected Exclude:
+..
+..  * chain from B
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos A.2
+  creating test repo for test case A.2
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'`
+  $ hg up '.~1'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B
+  created new head
+  $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb `getid 'desc(B)'`
+  $ hg log -G
+  @  35b183996678 (draft): B
+  |
+  | o  f5bc6836db60 (draft): A
+  |/
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 35b1839966785d5703a01607229eea932db42f87 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test
+-----------
+
+  $ dotest A.2 A
+  ## Running testcase A.2
+  # testing echange of "A" (f5bc6836db60)
+  ## initial state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 35b1839966785d5703a01607229eea932db42f87 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "A" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 35b1839966785d5703a01607229eea932db42f87 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "f5bc6836db60" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  1 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 35b1839966785d5703a01607229eea932db42f87 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+  $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-A3.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,238 @@
+============================================
+Testing obsolescence markers push: Cases A.3
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category A: simple cases
+TestCase 3: old branch split in two, only one of the new one pushed
+Variants:
+# a: changesets are known on remote
+# b: changesets are known on remote (push needs -f)
+
+A.3 new branchs created, one pushed.
+====================================
+
+.. {{{
+..   B' ○⇢ø B
+..      | |
+..      \Aø⇠◔ A'
+..       \|/
+..        ● O
+.. }}}
+..
+.. Markers  exist from:
+..
+..  * `A ø⇠○ A'`
+..  * `B ø⇠○ B'`
+..
+.. Command runs:
+..
+..  * hg push -r A
+..
+.. Expected exchange:
+..
+..  * chain from A
+..
+.. Expected exclude:
+..
+..  * chain from B
+..
+.. Extra note:
+..
+.. If A and B are remontly known, we should expect:
+..
+..  * `hg push` will complain about the new head
+..  * `hg push` should complain about unstable history creation
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos A.3.a
+  creating test repo for test case A.3.a
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ mkcommit B0
+  $ hg update -q 0
+  $ mkcommit A1
+  created new head
+  $ hg update -q 0
+  $ mkcommit B1
+  created new head
+  $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'`
+  $ hg log -G --hidden
+  @  f6298a8ac3a4 (draft): B1
+  |
+  | o  e5ea8f9c7314 (draft): A1
+  |/
+  | x  6e72f0a95b5e (draft): B0
+  | |
+  | x  28b51eb45704 (draft): A0
+  |/
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test for first version (changeset unknown in remote)
+-----------------------------------------------------------
+
+  $ dotest A.3.a A1
+  ## Running testcase A.3.a
+  # testing echange of "A1" (e5ea8f9c7314)
+  ## initial state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "A1" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "e5ea8f9c7314" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  1 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+other variant: changeset known in remote
+----------------------------------------
+
+  $ setuprepos A.3.b
+  creating test repo for test case A.3.b
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ mkcommit B0
+  $ hg push -q ../pushdest
+  $ hg push -q ../pulldest
+  $ hg update -q 0
+  $ mkcommit A1
+  created new head
+  $ hg update -q 0
+  $ mkcommit B1
+  created new head
+  $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'`
+  $ hg log -G --hidden
+  @  f6298a8ac3a4 (draft): B1
+  |
+  | o  e5ea8f9c7314 (draft): A1
+  |/
+  | x  6e72f0a95b5e (draft): B0
+  | |
+  | x  28b51eb45704 (draft): A0
+  |/
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test for first version (changeset known in remote)
+-----------------------------------------------------------
+
+check it complains about multiple heads
+
+  $ cd A.3.b
+  $ hg push -R main -r 'desc(A1)' pushdest
+  pushing to pushdest
+  searching for changes
+  abort: push creates new remote head e5ea8f9c7314!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+  $ cd ..
+
+test obsmarkers exchange.
+
+  $ dotest A.3.b A1 -f
+  ## Running testcase A.3.b
+  # testing echange of "A1" (e5ea8f9c7314)
+  ## initial state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "A1" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files (+1 heads)
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "e5ea8f9c7314" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  1 new obsolescence markers
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+  ## post pull state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-A4.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,127 @@
+============================================
+Testing obsolescence markers push: Cases A.4
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. this does not tests "obsmarkers" discovery capabilities.
+
+Category A: simple cases
+Testcase 4: Push in the middle of the obsolescence chain
+
+A.4 Push in the middle of the obsolescence chain
+================================================
+
+.. (Where we show that we should not push the marker without the successors)
+..
+.. {{{
+..   B ◔
+..     |
+..   A⇠ø⇠○ A'
+..     |/
+..     ● O
+.. }}}
+..
+.. Markers exist from:
+..
+..  * `A ø⇠○ A'`
+..  * chain from A
+..
+.. Command runs:
+..
+..  * hg push -r B
+..
+.. Expected exchange:
+..
+..  * Chain from A
+..
+.. Expected Exclude:
+..
+..  * `Ai ø⇠○ A'`
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos A.4
+  creating test repo for test case A.4
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ mkcommit B
+  $ hg update 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A0)'`
+  $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  $ hg log -G --hidden
+  @  e5ea8f9c7314 (draft): A1
+  |
+  | o  06055a7959d4 (draft): B
+  | |
+  | x  28b51eb45704 (draft): A0
+  |/
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 28b51eb45704506b5c603decd6bf7ac5e0f6a52f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test for first version
+-----------------------------
+
+  $ dotest A.4 B -f
+  ## Running testcase A.4
+  # testing echange of "B" (06055a7959d4)
+  ## initial state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 28b51eb45704506b5c603decd6bf7ac5e0f6a52f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "B" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 2 changesets with 2 changes to 2 files
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 28b51eb45704506b5c603decd6bf7ac5e0f6a52f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 28b51eb45704506b5c603decd6bf7ac5e0f6a52f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "06055a7959d4" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files
+  1 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 28b51eb45704506b5c603decd6bf7ac5e0f6a52f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 28b51eb45704506b5c603decd6bf7ac5e0f6a52f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 28b51eb45704506b5c603decd6bf7ac5e0f6a52f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-A5.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,136 @@
+============================================
+Testing obsolescence markers push: Cases A.5
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category A: simple cases
+TestCase 5: partial reordering
+
+A.5 partial reordering
+======================
+
+..
+.. {{{
+..   B ø⇠⇠
+..     | ⇡
+..   A ø⇠⇠⇠○ A'
+..     | ⇡/
+..     | ○ B'
+..     |/
+..     ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * `Aø⇠○ A'`
+..  * `Bø⇠○ B'`
+..
+.. Command run:
+..
+..  * hg push -r B
+..
+.. Expected exchange:
+..
+..  * `Bø⇠○ B'`
+..
+.. Expected Exclude:
+..
+..  * `Aø⇠○ A'`
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos A.5
+  creating test repo for test case A.5
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ mkcommit B0
+  $ hg update 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit B1
+  created new head
+  $ mkcommit A1
+  $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A0)'`
+  $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'`
+  $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  $ hg log -G --hidden
+  @  8c0a98c83722 (draft): A1
+  |
+  o  f6298a8ac3a4 (draft): B1
+  |
+  | x  6e72f0a95b5e (draft): B0
+  | |
+  | x  28b51eb45704 (draft): A0
+  |/
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 28b51eb45704506b5c603decd6bf7ac5e0f6a52f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f 8c0a98c8372212c6efde4bfdcef006f27ff759d3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test
+-----------
+
+  $ dotest A.5 B1
+  ## Running testcase A.5
+  # testing echange of "B1" (f6298a8ac3a4)
+  ## initial state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f 8c0a98c8372212c6efde4bfdcef006f27ff759d3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 28b51eb45704506b5c603decd6bf7ac5e0f6a52f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "B1" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f 8c0a98c8372212c6efde4bfdcef006f27ff759d3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 28b51eb45704506b5c603decd6bf7ac5e0f6a52f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "f6298a8ac3a4" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  1 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f 8c0a98c8372212c6efde4bfdcef006f27ff759d3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 28b51eb45704506b5c603decd6bf7ac5e0f6a52f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-A6.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,150 @@
+============================================
+Testing obsolescence markers push: Cases A.6
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category A: simple cases
+TestCase 6: new markers between changesets already known on both side
+Variants:
+# a: explicit push
+# b: bare push
+
+A.6  new markers between changesets already known on both side
+==============================================================
+
+.. {{{
+..   A ◕⇠● B
+..     |/
+..     ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * `A◕⇠● B`
+..
+.. Command runs:
+..
+..  * hg push -r B
+..  * hg push
+..
+.. Expected exchange:
+..
+..  * `A◕⇠● B`
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos A.6
+  creating test repo for test case A.6
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ hg update -q 0
+  $ mkcommit A1
+  created new head
+
+make both changeset known in remote
+
+  $ hg push -qf ../pushdest
+  $ hg push -qf ../pulldest
+
+create a marker after this
+
+  $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  $ hg log -G --hidden
+  @  e5ea8f9c7314 (draft): A1
+  |
+  | x  28b51eb45704 (draft): A0
+  |/
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+  $ cp -R A.6 A.6.a
+  $ cp -R A.6 A.6.b
+
+Actual Test (explicit push version)
+-----------------------------------
+
+  $ dotest A.6.a A1
+  ## Running testcase A.6.a
+  # testing echange of "A1" (e5ea8f9c7314)
+  ## initial state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "A1" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "e5ea8f9c7314" from main into pulldest
+  pulling from main
+  no changes found
+  1 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+Actual Test (bare push version)
+-------------------------------
+
+  $ dotest A.6.b
+  ## Running testcase A.6.b
+  ## initial state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling from main into pulldest
+  pulling from main
+  searching for changes
+  no changes found
+  1 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-A7.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,93 @@
+============================================
+Testing obsolescence markers push: Cases A.7
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category A: simple cases
+TestCase 7: markers one non targeted common changeset
+
+A.7 non targeted common changeset
+=================================
+
+.. {{{
+..    ⇠◕ A
+..     |
+..     ● O
+.. }}}
+..
+.. Markers exist from:
+..
+..  * Chain from A
+..
+.. Command run:
+..
+..  * hg push -r O
+..
+.. Expected exchange:
+..
+..  * ø
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+Initial
+
+  $ setuprepos A.7
+  creating test repo for test case A.7
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ hg push -q ../pushdest
+  $ hg push -q ../pulldest
+  $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'`
+  $ hg log -G --hidden
+  @  f5bc6836db60 (draft): A
+  |
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test
+-----------------------------------
+
+  $ dotest A.7 O
+  ## Running testcase A.7
+  # testing echange of "O" (a9bdc8b26820)
+  ## initial state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "O" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  ## post push state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pulling "a9bdc8b26820" from main into pulldest
+  pulling from main
+  no changes found
+  ## post pull state
+  # obstore: main
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-B1.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,104 @@
+============================================
+Testing obsolescence markers push: Cases B.1
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category B: pruning case
+TestCase 1: Prune on non-targeted common changeset
+
+B.1 Prune on non-targeted common changeset
+==========================================
+
+.. {{{
+..     ⊗ B
+..     |
+..     ◕ A
+..     |
+..     ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * B (prune)
+..
+.. Command runs:
+..
+..  * hg push -r O
+..
+.. Expected exclude:
+..
+..  * B (prune)
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+Initial
+
+  $ setuprepos B.1
+  creating test repo for test case B.1
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ mkcommit B
+
+make both changeset known in remote
+
+  $ hg push -qf ../pushdest
+  $ hg push -qf ../pulldest
+
+create prune marker
+
+  $ hg prune -qd '0 0' .
+  $ hg log -G --hidden
+  x  f6fbb35d8ac9 (draft): B
+  |
+  @  f5bc6836db60 (draft): A
+  |
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test
+-----------
+
+  $ dotest B.1 O
+  ## Running testcase B.1
+  # testing echange of "O" (a9bdc8b26820)
+  ## initial state
+  # obstore: main
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "O" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  ## post push state
+  # obstore: main
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pulling "a9bdc8b26820" from main into pulldest
+  pulling from main
+  no changes found
+  ## post pull state
+  # obstore: main
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-B2.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,137 @@
+============================================
+Testing obsolescence markers push: Cases B.2
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category B: pruning case
+TestCase 2: Prune on targeted common changeset
+Variants:
+# a: explicite push
+# b: bare push
+
+B.2 Pruned changeset on head: nothing pushed
+============================================
+
+.. {{{
+..     ⊗ A
+..     |
+..     ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * A (prune)
+..
+.. Command run:
+..
+..  * hg push -r O
+..  * hg push
+..
+.. Expected exchange:
+..
+..  * prune marker for A
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+Initial
+
+  $ setuprepos B.2
+  creating test repo for test case B.2
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ hg prune -qd '0 0' .
+  $ hg log -G --hidden
+  x  f5bc6836db60 (draft): A
+  |
+  @  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+  $ cp -R B.2 B.2.a
+  $ cp -R B.2 B.2.b
+
+Actual Test (explicit push version)
+-----------------------------------
+
+  $ dotest B.2.a O
+  ## Running testcase B.2.a
+  # testing echange of "O" (a9bdc8b26820)
+  ## initial state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "O" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "a9bdc8b26820" from main into pulldest
+  pulling from main
+  no changes found
+  1 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+Actual Test (bare push version)
+-----------------------------------
+
+  $ dotest B.2.b
+  ## Running testcase B.2.b
+  ## initial state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling from main into pulldest
+  pulling from main
+  searching for changes
+  no changes found
+  1 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-B3.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,115 @@
+============================================
+Testing obsolescence markers push: Cases B.3
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category B: pruning case
+TestCase 3: Pruned changeset on non-pushed part of the history
+
+B.3 Pruned changeset on non-pushed part of the history
+======================================================
+
+.. {{{
+..   ⊗ C
+..   |
+..   ○ B
+..   | ◔ A
+..   |/
+..   ● O
+.. }}}
+..
+.. Marker exists from:
+..
+..  * C (prune)
+..
+.. Commands run:
+..
+..  * hg push -r A
+..
+.. Expected exchange:
+..
+..  * ø
+..
+.. Expected exclude:
+..
+..  * chain from B
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos B.3
+  creating test repo for test case B.3
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ hg up --quiet 0
+  $ mkcommit B
+  created new head
+  $ mkcommit C
+  $ hg prune -qd '0 0' .
+  $ hg log -G --hidden
+  x  e56289ab6378 (draft): C
+  |
+  @  35b183996678 (draft): B
+  |
+  | o  f5bc6836db60 (draft): A
+  |/
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  e56289ab6378dc752fd7965f8bf66b58bda740bd 0 {35b1839966785d5703a01607229eea932db42f87} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test
+-----------------------------------
+
+  $ dotest B.3 A
+  ## Running testcase B.3
+  # testing echange of "A" (f5bc6836db60)
+  ## initial state
+  # obstore: main
+  e56289ab6378dc752fd7965f8bf66b58bda740bd 0 {35b1839966785d5703a01607229eea932db42f87} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "A" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  ## post push state
+  # obstore: main
+  e56289ab6378dc752fd7965f8bf66b58bda740bd 0 {35b1839966785d5703a01607229eea932db42f87} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pulling "f5bc6836db60" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  e56289ab6378dc752fd7965f8bf66b58bda740bd 0 {35b1839966785d5703a01607229eea932db42f87} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-B4.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,163 @@
+============================================
+Testing obsolescence markers push: Cases B.4
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category B: pruning case
+TestCase 4: Pruned changeset on common part of the history
+Variants:
+# a: explicite push
+# b: bare push
+
+B.4 Pruned changeset on common part of history
+=============================================
+
+.. {{{
+..   ⊗ C
+..   | ● B
+..   | |
+..   | ● A
+..   |/
+..   ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * C (prune)
+..
+.. Command run:
+..
+..  * hg push -r B
+..  * hg push
+..
+.. Expected exchange:
+..
+..  * prune for C
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos B.4
+  creating test repo for test case B.4
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ mkcommit B
+  $ hg phase --public .
+  $ hg push ../pushdest
+  pushing to ../pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 2 changesets with 2 changes to 2 files
+  $ hg push ../pulldest
+  pushing to ../pulldest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 2 changesets with 2 changes to 2 files
+  $ hg update -q 0
+  $ mkcommit C
+  created new head
+  $ hg prune -qd '0 0' .
+  $ hg log -G --hidden
+  x  7f7f229b13a6 (draft): C
+  |
+  | o  f6fbb35d8ac9 (public): B
+  | |
+  | o  f5bc6836db60 (public): A
+  |/
+  @  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+  $ cp -R B.4 B.4.a
+  $ cp -R B.4 B.4.b
+
+Actual Test (explicit push version)
+-----------------------------------
+
+  $ dotest B.4.a O
+  ## Running testcase B.4.a
+  # testing echange of "O" (a9bdc8b26820)
+  ## initial state
+  # obstore: main
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "O" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "a9bdc8b26820" from main into pulldest
+  pulling from main
+  no changes found
+  1 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+Actual Test (bare push version)
+-----------------------------------
+
+  $ dotest B.4.b
+  ## Running testcase B.4.b
+  ## initial state
+  # obstore: main
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling from main into pulldest
+  pulling from main
+  searching for changes
+  no changes found
+  1 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-B5.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,148 @@
+============================================
+Testing obsolescence markers push: Cases B.5
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category B: pruning case
+TestCase 5: Push of a children of changeset which successors is pruned
+
+B.5 Push of a children of changeset which successors is pruned
+==============================================================
+
+.. This case Mirror A.4, with pruned changeset successors.
+..
+.. {{{
+..   C ◔
+..     |
+..   B⇠ø⇠⊗ B'
+..     | |
+..   A ø⇠○ A'
+..     |/
+..     ●
+.. }}}
+..
+.. Marker exist from:
+..
+..  * `A ø⇠○ A'`
+..  * `B ø⇠○ B'`
+..  * chain from B
+..  * `B' is pruned`
+..
+.. Command run:
+..
+..  * hg push -r C
+..
+.. Expected exchange:
+..
+..  * chain from B
+..
+.. Expected exclude:
+..
+..  * `A ø⇠○ A'`
+..  * `B ø⇠○ B'`
+..  * `B' prune`
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos B.5
+  creating test repo for test case B.5
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ mkcommit B0
+  $ mkcommit C
+  $ hg up --quiet 0
+  $ mkcommit A1
+  created new head
+  $ mkcommit B1
+  $ hg debugobsolete --hidden `getid 'desc(A0)'` `getid 'desc(A1)'`
+  $ hg debugobsolete --hidden aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(B0)'`
+  $ hg debugobsolete --hidden `getid 'desc(B0)'` `getid 'desc(B1)'`
+  $ hg prune -qd '0 0' 'desc(B1)'
+  $ hg log -G --hidden
+  x  069b05c3876d (draft): B1
+  |
+  @  e5ea8f9c7314 (draft): A1
+  |
+  | o  1d0f3cd25300 (draft): C
+  | |
+  | x  6e72f0a95b5e (draft): B0
+  | |
+  | x  28b51eb45704 (draft): A0
+  |/
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 069b05c3876d56f62895e853a501ea58ea85f68d 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  069b05c3876d56f62895e853a501ea58ea85f68d 0 {e5ea8f9c73143125d36658e90ef70c6d2027a5b7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test (explicit push version)
+-----------------------------------
+
+  $ dotest B.5 C -f
+  ## Running testcase B.5
+  # testing echange of "C" (1d0f3cd25300)
+  ## initial state
+  # obstore: main
+  069b05c3876d56f62895e853a501ea58ea85f68d 0 {e5ea8f9c73143125d36658e90ef70c6d2027a5b7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 069b05c3876d56f62895e853a501ea58ea85f68d 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "C" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 3 changesets with 3 changes to 3 files
+  remote: 1 new obsolescence markers
+  ## post push state
+  # obstore: main
+  069b05c3876d56f62895e853a501ea58ea85f68d 0 {e5ea8f9c73143125d36658e90ef70c6d2027a5b7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 069b05c3876d56f62895e853a501ea58ea85f68d 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "1d0f3cd25300" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 3 changes to 3 files
+  1 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  069b05c3876d56f62895e853a501ea58ea85f68d 0 {e5ea8f9c73143125d36658e90ef70c6d2027a5b7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 069b05c3876d56f62895e853a501ea58ea85f68d 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-B6.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,117 @@
+============================================
+Testing obsolescence markers push: Cases B.6
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category B: pruning case
+TestCase 6: Pruned changeset with precursors not in pushed set
+
+B.6 Pruned changeset with precursors not in pushed set
+======================================================
+
+.. {{{
+..   B ø⇠⊗ B'
+..     | |
+..   A ○ |
+..     |/
+..     ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * `B ø⇠⊗ B'`
+..  * B' prune
+..
+.. Command run:
+..
+..  * hg push -r O
+..
+.. Expected exchange:
+..
+..  * `B ø⇠⊗ B'`
+..  * B' prune
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+Initial
+
+  $ setuprepos B.6
+  creating test repo for test case B.6
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ mkcommit B0
+  $ hg up --quiet 0
+  $ mkcommit B1
+  created new head
+  $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'`
+  $ hg prune -qd '0 0' .
+  $ hg log -G --hidden
+  x  f6298a8ac3a4 (draft): B1
+  |
+  | x  962ecf6b1afc (draft): B0
+  | |
+  | o  f5bc6836db60 (draft): A
+  |/
+  @  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  962ecf6b1afc94e15c7e48fdfb76ef8abd11372b f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test
+-------------------------------------
+
+  $ dotest B.6 O
+  ## Running testcase B.6
+  # testing echange of "O" (a9bdc8b26820)
+  ## initial state
+  # obstore: main
+  962ecf6b1afc94e15c7e48fdfb76ef8abd11372b f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "O" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 2 new obsolescence markers
+  ## post push state
+  # obstore: main
+  962ecf6b1afc94e15c7e48fdfb76ef8abd11372b f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  962ecf6b1afc94e15c7e48fdfb76ef8abd11372b f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "a9bdc8b26820" from main into pulldest
+  pulling from main
+  no changes found
+  2 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  962ecf6b1afc94e15c7e48fdfb76ef8abd11372b f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  962ecf6b1afc94e15c7e48fdfb76ef8abd11372b f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  962ecf6b1afc94e15c7e48fdfb76ef8abd11372b f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6298a8ac3a4b78bbeae5f1d3dc5bc3c3812f0f3 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-B7.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,100 @@
+============================================
+Testing obsolescence markers push: Cases B.7
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category B: pruning case
+TestCase 7: Prune on non-targeted common changeset
+
+B.7 Prune above non-targeted common changeset
+=============================================
+
+.. (very similar to B1, but the prune changeset is unknown on remote)
+..
+.. {{{
+..     ⊗ B
+..     |
+..     ◕ A
+..     |
+..     ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * B (prune)
+..
+.. Command runs:
+..
+..  * hg push -r O
+..
+.. Expected exclude:
+..
+..  * B (prune)
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+Initial
+
+  $ setuprepos B.7
+  creating test repo for test case B.7
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ hg push -q ../pushdest
+  $ hg push -q ../pulldest
+  $ mkcommit B
+  $ hg prune -qd '0 0' .
+  $ hg log -G --hidden
+  x  f6fbb35d8ac9 (draft): B
+  |
+  @  f5bc6836db60 (draft): A
+  |
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test
+-------------------------------------
+
+  $ dotest B.7 O
+  ## Running testcase B.7
+  # testing echange of "O" (a9bdc8b26820)
+  ## initial state
+  # obstore: main
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "O" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  ## post push state
+  # obstore: main
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pulling "a9bdc8b26820" from main into pulldest
+  pulling from main
+  no changes found
+  ## post pull state
+  # obstore: main
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-C1.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,158 @@
+============================================
+Testing obsolescence markers push: Cases C.1
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category C: advanced case
+TestCase 1: Multiple pruned changeset atop each other
+Variants:
+# a: explicite push
+# b: bare push
+
+C.1 Multiple pruned changeset atop each other
+=============================================
+
+.. {{{
+..   ⊗ B
+..   |
+..   ⊗ A
+..   |
+..   ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * A (prune)
+..  * B (prune)
+..
+.. Commands run:
+..
+..  * hg push -r O
+..  * hg push
+..
+.. Expected exchange:
+..
+..  * A (prune)
+..  * B (prune)
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+Initial
+
+  $ setuprepos C.1
+  creating test repo for test case C.1
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ mkcommit B
+  $ hg prune -qd '0 0' '.~1'
+  $ hg prune -qd '0 0' .
+  $ hg log -G --hidden
+  x  f6fbb35d8ac9 (draft): B
+  |
+  x  f5bc6836db60 (draft): A
+  |
+  @  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+  $ cp -R C.1 C.1.a
+  $ cp -R C.1 C.1.b
+
+Actual Test (explicit push)
+---------------------------
+
+  $ dotest C.1.a O
+  ## Running testcase C.1.a
+  # testing echange of "O" (a9bdc8b26820)
+  ## initial state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "O" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 2 new obsolescence markers
+  ## post push state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "a9bdc8b26820" from main into pulldest
+  pulling from main
+  no changes found
+  2 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+Actual Test (bare push)
+-------------------------------------
+
+  $ dotest C.1.b
+  ## Running testcase C.1.b
+  ## initial state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 2 new obsolescence markers
+  ## post push state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling from main into pulldest
+  pulling from main
+  searching for changes
+  no changes found
+  2 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f6fbb35d8ac958bbe70035e4c789c18471cdc0af 0 {f5bc6836db60e308a17ba08bf050154ba9c4fad7} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-C2.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,178 @@
+============================================
+Testing obsolescence markers push: Cases C.2
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category C: advanced case
+TestCase 2: Pruned changeset on precursors
+Variants:
+# a: explicite push
+# b: bare push
+
+C.2 Pruned changeset on precursors
+==================================
+
+.. {{{
+..   B ⊗
+..     |
+..   A ø⇠◔ A'
+..     |/
+..     ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * A' succeed to A
+..  * B (prune)
+..
+.. Command run:
+..
+..  * hg push -r A'
+..  * hg push
+..
+.. Expected exchange:
+..
+..  * `A ø⇠o A'`
+..  * B (prune)
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+Itinial
+
+  $ setuprepos C.2
+  creating test repo for test case C.2
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ mkcommit B
+  $ hg prune -qd '0 0' .
+  $ hg update -q 0
+  $ mkcommit A1
+  created new head
+  $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  $ hg log -G --hidden
+  @  e5ea8f9c7314 (draft): A1
+  |
+  | x  06055a7959d4 (draft): B
+  | |
+  | x  28b51eb45704 (draft): A0
+  |/
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+  $ cp -R C.2 C.2.a
+  $ cp -R C.2 C.2.b
+
+Actual Test (explicit push)
+---------------------------
+
+  $ dotest C.2.a A1
+  ## Running testcase C.2.a
+  # testing echange of "A1" (e5ea8f9c7314)
+  ## initial state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "A1" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  remote: 2 new obsolescence markers
+  ## post push state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "e5ea8f9c7314" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  2 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+Actual Test (bare push)
+-------------------------------------
+
+  $ dotest C.2.b
+  ## Running testcase C.2.b
+  ## initial state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  remote: 2 new obsolescence markers
+  ## post push state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  2 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-C3.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,179 @@
+============================================
+Testing obsolescence markers push: Cases C.3
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category C: advanced case
+TestCase 3: Pruned changeset on precursors of another pruned one
+Variants:
+# a: explicite push
+# b: bare push
+
+C.3 Pruned changeset on precursors of another pruned one
+========================================================
+
+.. {{{
+..   B ⊗
+..     |
+..   A ø⇠⊗ A'
+..     |/
+..     ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * A' succeed to A
+..  * A' (prune
+..  * B (prune)
+..
+.. Command run:
+..
+..  * hg push -r A'
+..  * hg push
+..
+.. Expected exchange:
+..
+..  * `A ø⇠⊗ A'`
+..  * A (prune)
+..  * B (prune)
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+Initial
+
+  $ setuprepos C.3
+  creating test repo for test case C.3
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ mkcommit B
+  $ hg prune -qd '0 0' .
+  $ hg update -q 0
+  $ mkcommit A1
+  created new head
+  $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  $ hg prune -qd '0 0' .
+  $ hg log -G --hidden
+  x  e5ea8f9c7314 (draft): A1
+  |
+  | x  06055a7959d4 (draft): B
+  | |
+  | x  28b51eb45704 (draft): A0
+  |/
+  @  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+  $ cp -R C.3 C.3.a
+  $ cp -R C.3 C.3.b
+
+Actual Test (explicit push)
+---------------------------
+
+  $ dotest C.3.a O
+  ## Running testcase C.3.a
+  # testing echange of "O" (a9bdc8b26820)
+  ## initial state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "O" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 3 new obsolescence markers
+  ## post push state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "a9bdc8b26820" from main into pulldest
+  pulling from main
+  no changes found
+  3 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+Actual Test (bare push)
+-------------------------------------
+
+  $ dotest C.3.b
+  ## Running testcase C.3.b
+  ## initial state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 3 new obsolescence markers
+  ## post push state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling from main into pulldest
+  pulling from main
+  searching for changes
+  no changes found
+  3 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-C4.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,129 @@
+============================================
+Testing obsolescence markers push: Cases C.4
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category C: advanced case
+TestCase 4: multiple successors, one is pruned
+
+C.4 multiple successors, one is pruned
+======================================
+
+.. (A similarish situation can appends with split markers see the Z section)
+..
+.. {{{
+..        A
+..    B ○⇢ø⇠⊗ C
+..       \|/
+..        ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * `A ø⇠○ B`
+..  * `A ø⇠○ C`
+..  * C (prune)
+..
+.. Command run:
+..
+..  * hg push -r O
+..
+.. Expected exchange:
+..
+..  * `A ø⇠○ C`
+..  * C (prune)
+..
+.. Expected exclude:
+..
+..  * `A ø⇠○ B`
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+Implemented as the non-split version
+
+  $ setuprepos C.4
+  creating test repo for test case C.4
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A
+  $ hg update -q 0
+  $ mkcommit B
+  created new head
+  $ hg update -q 0
+  $ mkcommit C
+  created new head
+  $ hg debugobsolete --hidden `getid 'desc(A)'` `getid 'desc(B)'`
+  $ hg debugobsolete --hidden `getid 'desc(A)'` `getid 'desc(C)'`
+  $ hg prune -qd '0 0' .
+  $ hg log -G --hidden
+  x  7f7f229b13a6 (draft): C
+  |
+  | o  35b183996678 (draft): B
+  |/
+  | x  f5bc6836db60 (draft): A
+  |/
+  @  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 35b1839966785d5703a01607229eea932db42f87 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test
+-----------
+
+  $ dotest C.4 O
+  ## Running testcase C.4
+  # testing echange of "O" (a9bdc8b26820)
+  ## initial state
+  # obstore: main
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 35b1839966785d5703a01607229eea932db42f87 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "O" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 2 new obsolescence markers
+  ## post push state
+  # obstore: main
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 35b1839966785d5703a01607229eea932db42f87 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "a9bdc8b26820" from main into pulldest
+  pulling from main
+  no changes found
+  2 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 35b1839966785d5703a01607229eea932db42f87 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  f5bc6836db60e308a17ba08bf050154ba9c4fad7 7f7f229b13a629a5b20581c6cb723f4e2ca54bed 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-D1.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,176 @@
+============================================
+Testing obsolescence markers push: Cases D.1
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category D: Partial Information Case
+TestCase 1: Pruned changeset based on missing precursor of something not present
+Variants:
+# a: explicite push
+# b: bare push
+
+D.1 Pruned changeset based on missing precursor of something not present
+========================================================================
+
+.. {{{
+..   B ⊗
+..     |
+..   A ◌⇠◔ A'
+..     |/
+..     ● O
+.. }}}
+..
+.. Markers exist from:
+..
+..  * `A ø⇠o A'`
+..  * B (prune)
+..
+.. Command run:
+..
+..  * hg push -r A'
+..  * hg push
+..
+.. Expected exchange:
+..
+..  * `A ø⇠o A'`
+..  * B (prune)
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos D.1
+  creating test repo for test case D.1
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ mkcommit B
+  $ hg up -q 0
+  $ mkcommit A1
+  created new head
+  $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  $ hg prune -d '0 0' 'desc(B)'
+  $ hg strip --hidden -q 'desc(A0)'
+  $ hg log -G --hidden
+  @  e5ea8f9c7314 (draft): A1
+  |
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+  $ cp -R D.1 D.1.a
+  $ cp -R D.1 D.1.b
+
+Actual Test (explicit push)
+---------------------------
+
+  $ dotest D.1.a A1
+  ## Running testcase D.1.a
+  # testing echange of "A1" (e5ea8f9c7314)
+  ## initial state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "A1" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  remote: 2 new obsolescence markers
+  ## post push state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "e5ea8f9c7314" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  2 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
+Actual Test (base push)
+---------------------------
+
+  $ dotest D.1.b
+  ## Running testcase D.1.b
+  ## initial state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  remote: 2 new obsolescence markers
+  ## post push state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  2 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  06055a7959d4128e6e3bccfd01482e83a2db8a3a 0 {28b51eb45704506b5c603decd6bf7ac5e0f6a52f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-D2.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,111 @@
+============================================
+Testing obsolescence markers push: Cases D.2
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category D: Partial Information Case
+TestCase 2: missing prune target (prune in "pushed set")
+
+D.2 missing prune target (prune in "pushed set")
+================================================
+
+.. {{{
+..   A ø⇠✕ A'
+..     |/
+..     ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * A' succeed to A
+..  * A' (prune)
+..
+.. Command runs:
+..
+..  * hg push
+..
+.. Expected exchange:
+..
+..  * `A ø⇠o A'`
+..  * A' (prune)
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+Initial
+
+  $ setuprepos D.2
+  creating test repo for test case D.2
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ hg up -q 0
+  $ mkcommit A1
+  created new head
+  $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  $ hg prune --date '0 0' .
+  $ hg strip --hidden -q 'desc(A1)'
+  $ hg log -G --hidden
+  x  28b51eb45704 (draft): A0
+  |
+  @  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test
+-----------
+
+  $ dotest D.2
+  ## Running testcase D.2
+  ## initial state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  remote: 2 new obsolescence markers
+  ## post push state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling from main into pulldest
+  pulling from main
+  searching for changes
+  no changes found
+  2 new obsolescence markers
+  ## post pull state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 {a9bdc8b26820b1b87d585b82eb0ceb4a2ecdbc04} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-D3.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,108 @@
+============================================
+Testing obsolescence markers push: Cases D.3
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category D: Partial Information Case
+TestCase 3: missing prune target (prune not in "pushed set")
+
+D.3 missing prune target (prune not in "pushed set")
+====================================================
+
+.. {{{
+..  A ø⇠✕ A'
+..     | |
+..     | ○ B
+..     |/
+..     ● O
+.. }}}
+..
+.. Marker exist from:
+..
+..  * `A ø⇠o A'`
+..  * A' (prune)
+..
+.. Command runs:
+..
+..  * hg push
+..
+.. Expected exclude:
+..
+..  * `A ø⇠o A'`
+..  * A' (prune)
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos D.3
+  creating test repo for test case D.3
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ hg up -q 0
+  $ mkcommit B
+  created new head
+  $ mkcommit A1
+  $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'`
+  $ hg prune -d '0 0' .
+  $ hg strip --hidden -q 'desc(A1)'
+  $ hg log -G --hidden
+  @  35b183996678 (draft): B
+  |
+  | x  28b51eb45704 (draft): A0
+  |/
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f 6aa67a7b4baa6fb41b06aed38d5b1201436546e2 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6aa67a7b4baa6fb41b06aed38d5b1201436546e2 0 {35b1839966785d5703a01607229eea932db42f87} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test
+-----------
+
+  $ dotest D.3 O
+  ## Running testcase D.3
+  # testing echange of "O" (a9bdc8b26820)
+  ## initial state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f 6aa67a7b4baa6fb41b06aed38d5b1201436546e2 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6aa67a7b4baa6fb41b06aed38d5b1201436546e2 0 {35b1839966785d5703a01607229eea932db42f87} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "O" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  no changes found
+  ## post push state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f 6aa67a7b4baa6fb41b06aed38d5b1201436546e2 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6aa67a7b4baa6fb41b06aed38d5b1201436546e2 0 {35b1839966785d5703a01607229eea932db42f87} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pulling "a9bdc8b26820" from main into pulldest
+  pulling from main
+  no changes found
+  ## post pull state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f 6aa67a7b4baa6fb41b06aed38d5b1201436546e2 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6aa67a7b4baa6fb41b06aed38d5b1201436546e2 0 {35b1839966785d5703a01607229eea932db42f87} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-exchange-obsmarkers-case-D4.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,138 @@
+============================================
+Testing obsolescence markers push: Cases D.4
+============================================
+
+Mercurial pushes obsolescences markers relevant to the "pushed-set", the set of
+all changesets that requested to be "in sync" after the push (even if they are
+already on both side).
+
+This test belongs to a series of tests checking such set is properly computed
+and applied. This does not tests "obsmarkers" discovery capabilities.
+
+Category D: Partial Information Case
+TestCase 4: Unknown changeset in between known changesets
+
+D.4 Unknown changeset in between known one
+==========================================
+
+.. Mostly a clarification case
+..
+.. {{{
+..   B ø⇠◌⇠○ B''
+..     |   |
+..   A ø⇠◌⇠◔ A'
+..      \ /
+..       ● O
+..
+.. }}}
+..
+.. Should be treated as A.3 case:
+..
+.. {{{
+..
+..   B ø⇠○ B''
+..     | |
+..   A ø⇠◔ A'
+..     |/
+..     ● O
+..
+.. }}}
+
+Setup
+-----
+
+  $ . $TESTDIR/testlib/exchange-obsmarker-util.sh
+
+initial
+
+  $ setuprepos D.4
+  creating test repo for test case D.4
+  - pulldest
+  - main
+  - pushdest
+  cd into `main` and proceed with env setup
+  $ cd main
+  $ mkcommit A0
+  $ mkcommit B0
+  $ hg update -q 0
+  $ mkcommit A1
+  created new head
+  $ mkcommit B1
+  $ hg debugobsolete `getid 'desc(A0)'` aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+  $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A1)'`
+  $ hg debugobsolete `getid 'desc(B0)'` bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
+  $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb `getid 'desc(B1)'`
+  $ hg log -G --hidden
+  @  069b05c3876d (draft): B1
+  |
+  o  e5ea8f9c7314 (draft): A1
+  |
+  | x  6e72f0a95b5e (draft): B0
+  | |
+  | x  28b51eb45704 (draft): A0
+  |/
+  o  a9bdc8b26820 (public): O
+  
+  $ inspect_obsmarkers
+  obsstore content
+  ================
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 069b05c3876d56f62895e853a501ea58ea85f68d 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  $ cd ..
+  $ cd ..
+
+Actual Test
+-----------
+
+  $ dotest D.4 A1
+  ## Running testcase D.4
+  # testing echange of "A1" (e5ea8f9c7314)
+  ## initial state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 069b05c3876d56f62895e853a501ea58ea85f68d 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  # obstore: pulldest
+  ## pushing "A1" from main to pushdest
+  pushing to pushdest
+  searching for changes
+  remote: adding changesets
+  remote: adding manifests
+  remote: adding file changes
+  remote: added 1 changesets with 1 changes to 1 files
+  remote: 2 new obsolescence markers
+  ## post push state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 069b05c3876d56f62895e853a501ea58ea85f68d 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  ## pulling "e5ea8f9c7314" from main into pulldest
+  pulling from main
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  2 new obsolescence markers
+  (run 'hg update' to get a working copy)
+  ## post pull state
+  # obstore: main
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  6e72f0a95b5e01a7504743aa941f69cb1fbef8b0 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 069b05c3876d56f62895e853a501ea58ea85f68d 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pushdest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  # obstore: pulldest
+  28b51eb45704506b5c603decd6bf7ac5e0f6a52f aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
+  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa e5ea8f9c73143125d36658e90ef70c6d2027a5b7 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- a/tests/test-extension.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-extension.t	Tue Apr 18 12:24:34 2017 -0400
@@ -532,6 +532,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -543,6 +545,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 
 
@@ -567,6 +571,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -578,6 +584,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 
 
@@ -845,6 +853,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -856,6 +866,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 Make sure that single '-v' option shows help and built-ins only for 'dodo' command
   $ hg help -v dodo
@@ -878,6 +890,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -889,6 +903,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 In case when extension name doesn't match any of its commands,
 help message should ask for '-v' to get list of built-in aliases
@@ -949,6 +965,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -960,6 +978,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
   $ hg help -v -e dudu
   dudu extension -
@@ -981,6 +1001,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -992,6 +1014,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 Disabled extension commands:
 
@@ -1089,6 +1113,14 @@
     intro=never  # never include an introduction message
     intro=always # always include an introduction message
   
+  You can specify a template for flags to be added in subject prefixes. Flags
+  specified by --flag option are exported as "{flags}" keyword:
+  
+    [patchbomb]
+    flagtemplate = "{separate(' ',
+                              ifeq(branch, 'default', '', branch|upper),
+                              flags)}"
+  
   You can set patchbomb to always ask for confirmation by setting
   "patchbomb.confirm" to true.
   
--- a/tests/test-filecache.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-filecache.py	Tue Apr 18 12:24:34 2017 -0400
@@ -10,24 +10,30 @@
 from mercurial import (
     extensions,
     hg,
-    scmutil,
+    localrepo,
     ui as uimod,
     util,
+    vfs as vfsmod,
 )
 
-filecache = scmutil.filecache
-
 class fakerepo(object):
     def __init__(self):
         self._filecache = {}
 
-    def join(self, p):
-        return p
+    class fakevfs(object):
+
+        def join(self, p):
+            return p
+
+    vfs = fakevfs()
+
+    def unfiltered(self):
+        return self
 
     def sjoin(self, p):
         return p
 
-    @filecache('x', 'y')
+    @localrepo.repofilecache('x', 'y')
     def cached(self):
         print('creating')
         return 'string from function'
@@ -73,7 +79,7 @@
     # atomic replace file, size doesn't change
     # hopefully st_mtime doesn't change as well so this doesn't use the cache
     # because of inode change
-    f = scmutil.opener('.')('x', 'w', atomictemp=True)
+    f = vfsmod.vfs('.')('x', 'w', atomictemp=True)
     f.write('b')
     f.close()
 
@@ -97,7 +103,7 @@
     # should recreate the object
     repo.cached
 
-    f = scmutil.opener('.')('y', 'w', atomictemp=True)
+    f = vfsmod.vfs('.')('y', 'w', atomictemp=True)
     f.write('B')
     f.close()
 
@@ -105,10 +111,10 @@
     print("* file y changed inode")
     repo.cached
 
-    f = scmutil.opener('.')('x', 'w', atomictemp=True)
+    f = vfsmod.vfs('.')('x', 'w', atomictemp=True)
     f.write('c')
     f.close()
-    f = scmutil.opener('.')('y', 'w', atomictemp=True)
+    f = vfsmod.vfs('.')('y', 'w', atomictemp=True)
     f.write('C')
     f.close()
 
@@ -200,12 +206,12 @@
         # st_mtime is advanced multiple times as expected
         for i in xrange(repetition):
             # explicit closing
-            fp = scmutil.checkambigatclosing(open(filename, 'a'))
+            fp = vfsmod.checkambigatclosing(open(filename, 'a'))
             fp.write('FOO')
             fp.close()
 
             # implicit closing by "with" statement
-            with scmutil.checkambigatclosing(open(filename, 'a')) as fp:
+            with vfsmod.checkambigatclosing(open(filename, 'a')) as fp:
                 fp.write('BAR')
 
         newstat = os.stat(filename)
--- a/tests/test-fileset.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-fileset.t	Tue Apr 18 12:24:34 2017 -0400
@@ -88,6 +88,35 @@
   $ fileset 'copied()'
   c1
 
+Test files status in different revisions
+
+  $ hg status -m
+  M b2
+  $ fileset -r0 'revs("wdir()", modified())' --traceback
+  b2
+  $ hg status -a
+  A c1
+  $ fileset -r0 'revs("wdir()", added())'
+  c1
+  $ hg status --change 0 -a
+  A a1
+  A a2
+  A b1
+  A b2
+  $ hg status -mru
+  M b2
+  R a2
+  ? c3
+  $ fileset -r0 'added() and revs("wdir()", modified() or removed() or unknown())'
+  b2
+  a2
+  $ fileset -r0 'added() or revs("wdir()", added())'
+  a1
+  a2
+  b1
+  b2
+  c1
+
 Test files properties
 
   >>> file('bin', 'wb').write('\0a')
@@ -319,7 +348,6 @@
 
 Test safety of 'encoding' on removed files
 
-#if symlink
   $ fileset 'encoding("ascii")'
   dos
   mac
@@ -330,23 +358,9 @@
   2k
   b1
   b2
-  b2link
+  b2link (symlink !)
   bin
   c1
-#else
-  $ fileset 'encoding("ascii")'
-  dos
-  mac
-  mixed
-  .hgsub
-  .hgsubstate
-  1k
-  2k
-  b1
-  b2
-  bin
-  c1
-#endif
 
 Test detection of unintentional 'matchctx.existing()' invocation
 
@@ -367,3 +381,226 @@
 
   $ fileset 'existingcaller()' 2>&1 | tail -1
   AssertionError: unexpected existing() invocation
+
+Test 'revs(...)'
+================
+
+small reminder of the repository state
+
+  $ hg log -G
+  @  changeset:   4:* (glob)
+  |  tag:         tip
+  |  user:        test
+  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  summary:     subrepo
+  |
+  o    changeset:   3:* (glob)
+  |\   parent:      2:55b05bdebf36
+  | |  parent:      1:* (glob)
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     merge
+  | |
+  | o  changeset:   2:55b05bdebf36
+  | |  parent:      0:8a9576c51c1f
+  | |  user:        test
+  | |  date:        Thu Jan 01 00:00:00 1970 +0000
+  | |  summary:     diverging
+  | |
+  o |  changeset:   1:* (glob)
+  |/   user:        test
+  |    date:        Thu Jan 01 00:00:00 1970 +0000
+  |    summary:     manychanges
+  |
+  o  changeset:   0:8a9576c51c1f
+     user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     addfiles
+  
+  $ hg status --change 0
+  A a1
+  A a2
+  A b1
+  A b2
+  $ hg status --change 1
+  M b2
+  A 1k
+  A 2k
+  A b2link (no-windows !)
+  A bin
+  A c1
+  A con.xml (no-windows !)
+  R a2
+  $ hg status --change 2
+  M b2
+  $ hg status --change 3
+  M b2
+  A 1k
+  A 2k
+  A b2link (no-windows !)
+  A bin
+  A c1
+  A con.xml (no-windows !)
+  R a2
+  $ hg status --change 4
+  A .hgsub
+  A .hgsubstate
+  $ hg status
+  A dos
+  A mac
+  A mixed
+  R con.xml (no-windows !)
+  ! a1
+  ? b2.orig
+  ? c3
+  ? unknown
+
+Test files at -r0 should be filtered by files at wdir
+-----------------------------------------------------
+
+  $ fileset -r0 '* and revs("wdir()", *)'
+  a1
+  b1
+  b2
+
+Test that "revs()" work at all
+------------------------------
+
+  $ fileset "revs('2', modified())"
+  b2
+
+Test that "revs()" work for file missing in the working copy/current context
+----------------------------------------------------------------------------
+
+(a2 not in working copy)
+
+  $ fileset "revs('0', added())"
+  a1
+  a2
+  b1
+  b2
+
+(none of the file exist in "0")
+
+  $ fileset -r 0 "revs('4', added())"
+  .hgsub
+  .hgsubstate
+
+Call with empty revset
+--------------------------
+
+  $ fileset "revs('2-2', modified())"
+
+Call with revset matching multiple revs
+---------------------------------------
+
+  $ fileset "revs('0+4', added())"
+  a1
+  a2
+  b1
+  b2
+  .hgsub
+  .hgsubstate
+
+overlapping set
+
+  $ fileset "revs('1+2', modified())"
+  b2
+
+test 'status(...)'
+=================
+
+Simple case
+-----------
+
+  $ fileset "status(3, 4, added())"
+  .hgsub
+  .hgsubstate
+
+use rev to restrict matched file
+-----------------------------------------
+
+  $ hg status --removed --rev 0 --rev 1
+  R a2
+  $ fileset "status(0, 1, removed())"
+  a2
+  $ fileset "* and status(0, 1, removed())"
+  $ fileset -r 4 "status(0, 1, removed())"
+  a2
+  $ fileset -r 4 "* and status(0, 1, removed())"
+  $ fileset "revs('4', * and status(0, 1, removed()))"
+  $ fileset "revs('0', * and status(0, 1, removed()))"
+  a2
+
+check wdir()
+------------
+
+  $ hg status --removed  --rev 4
+  R con.xml (no-windows !)
+  $ fileset "status(4, 'wdir()', removed())"
+  con.xml (no-windows !)
+
+  $ hg status --removed --rev 2
+  R a2
+  $ fileset "status('2', 'wdir()', removed())"
+  a2
+
+test backward status
+--------------------
+
+  $ hg status --removed --rev 0 --rev 4
+  R a2
+  $ hg status --added --rev 4 --rev 0
+  A a2
+  $ fileset "status(4, 0, added())"
+  a2
+
+test cross branch status
+------------------------
+
+  $ hg status --added --rev 1 --rev 2
+  A a2
+  $ fileset "status(1, 2, added())"
+  a2
+
+test with multi revs revset
+---------------------------
+  $ hg status --added --rev 0:1 --rev 3:4
+  A .hgsub
+  A .hgsubstate
+  A 1k
+  A 2k
+  A b2link (no-windows !)
+  A bin
+  A c1
+  A con.xml (no-windows !)
+  $ fileset "status('0:1', '3:4', added())"
+  .hgsub
+  .hgsubstate
+  1k
+  2k
+  b2link (no-windows !)
+  bin
+  c1
+  con.xml (no-windows !)
+
+tests with empty value
+----------------------
+
+Fully empty revset
+
+  $ fileset "status('', '4', added())"
+  hg: parse error: first argument to status must be a revision
+  [255]
+  $ fileset "status('2', '', added())"
+  hg: parse error: second argument to status must be a revision
+  [255]
+
+Empty revset will error at the revset layer
+
+  $ fileset "status(' ', '4', added())"
+  hg: parse error at 1: not a prefix: end
+  [255]
+  $ fileset "status('2', ' ', added())"
+  hg: parse error at 1: not a prefix: end
+  [255]
--- a/tests/test-flagprocessor.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-flagprocessor.t	Tue Apr 18 12:24:34 2017 -0400
@@ -163,3 +163,85 @@
   $ hg commit -Aqm 'add file'
   abort: cannot register multiple processors on flag '0x8'.
   [255]
+
+  $ cd ..
+
+# TEST: bundle repo
+  $ hg init bundletest
+  $ cd bundletest
+
+  $ cat >> .hg/hgrc << EOF
+  > [extensions]
+  > flagprocessor=$TESTDIR/flagprocessorext.py
+  > EOF
+
+  $ for i in 0 single two three 4; do
+  >   echo '[BASE64]a-bit-longer-'$i > base64
+  >   hg commit -m base64-$i -A base64
+  > done
+
+  $ hg update 2 -q
+  $ echo '[BASE64]a-bit-longer-branching' > base64
+  $ hg commit -q -m branching
+
+  $ hg bundle --base 1 bundle.hg
+  4 changesets found
+  $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
+  $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64
+  5 branching
+   base64 |  2 +-
+   1 files changed, 1 insertions(+), 1 deletions(-)
+  
+  4 base64-4
+   base64 |  2 +-
+   1 files changed, 1 insertions(+), 1 deletions(-)
+  
+  3 base64-three
+   base64 |  2 +-
+   1 files changed, 1 insertions(+), 1 deletions(-)
+  
+  2 base64-two
+   base64 |  2 +-
+   1 files changed, 1 insertions(+), 1 deletions(-)
+  
+  1 base64-single
+   base64 |  2 +-
+   1 files changed, 1 insertions(+), 1 deletions(-)
+  
+  0 base64-0
+   base64 |  1 +
+   1 files changed, 1 insertions(+), 0 deletions(-)
+  
+
+  $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q
+  $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64
+  5 branching
+   base64 |  2 +-
+   1 files changed, 1 insertions(+), 1 deletions(-)
+  
+  4 base64-4
+   base64 |  2 +-
+   1 files changed, 1 insertions(+), 1 deletions(-)
+  
+  3 base64-three
+   base64 |  2 +-
+   1 files changed, 1 insertions(+), 1 deletions(-)
+  
+  2 base64-two
+   base64 |  2 +-
+   1 files changed, 1 insertions(+), 1 deletions(-)
+  
+  1 base64-single
+   base64 |  2 +-
+   1 files changed, 1 insertions(+), 1 deletions(-)
+  
+  0 base64-0
+   base64 |  1 +
+   1 files changed, 1 insertions(+), 0 deletions(-)
+  
+  $ rm bundle.hg bundle-again.hg
+
+# TEST: hg status
+
+  $ hg status
+  $ hg diff
--- a/tests/test-gendoc-ro.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-gendoc-ro.t	Tue Apr 18 12:24:34 2017 -0400
@@ -1,4 +1,9 @@
 #require docutils gettext
 
+Error: the current ro localization has some rst defects exposed by
+moving pager to core. These two warnings about references are expected
+until the localization is corrected.
   $ $TESTDIR/check-gendoc ro
   checking for parse errors
+  gendoc.txt:58: (WARNING/2) Inline interpreted text or phrase reference start-string without end-string.
+  gendoc.txt:58: (WARNING/2) Inline interpreted text or phrase reference start-string without end-string.
--- a/tests/test-globalopts.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-globalopts.t	Tue Apr 18 12:24:34 2017 -0400
@@ -296,7 +296,7 @@
    bookmarks     create a new bookmark or list existing bookmarks
    branch        set or show the current branch name
    branches      list repository named branches
-   bundle        create a changegroup file
+   bundle        create a bundle file
    cat           output the current or given revision of files
    clone         make a copy of an existing repository
    commit        commit the specified files or all outstanding changes
@@ -333,13 +333,15 @@
    summary       summarize working directory state
    tag           add one or more tags for the current or given revision
    tags          list repository tags
-   unbundle      apply one or more changegroup files
+   unbundle      apply one or more bundle files
    update        update working directory (or switch revisions)
    verify        verify the integrity of the repository
    version       output version and copyright information
   
   additional help topics:
   
+   bundlespec    Bundle File Formats
+   color         Colorizing Outputs
    config        Configuration Files
    dates         Date Formats
    diffs         Diff Formats
@@ -351,6 +353,7 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
+   pager         Pager Support
    patterns      File Name Patterns
    phases        Working with Phases
    revisions     Specifying Revisions
@@ -377,7 +380,7 @@
    bookmarks     create a new bookmark or list existing bookmarks
    branch        set or show the current branch name
    branches      list repository named branches
-   bundle        create a changegroup file
+   bundle        create a bundle file
    cat           output the current or given revision of files
    clone         make a copy of an existing repository
    commit        commit the specified files or all outstanding changes
@@ -414,13 +417,15 @@
    summary       summarize working directory state
    tag           add one or more tags for the current or given revision
    tags          list repository tags
-   unbundle      apply one or more changegroup files
+   unbundle      apply one or more bundle files
    update        update working directory (or switch revisions)
    verify        verify the integrity of the repository
    version       output version and copyright information
   
   additional help topics:
   
+   bundlespec    Bundle File Formats
+   color         Colorizing Outputs
    config        Configuration Files
    dates         Date Formats
    diffs         Diff Formats
@@ -432,6 +437,7 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
+   pager         Pager Support
    patterns      File Name Patterns
    phases        Working with Phases
    revisions     Specifying Revisions
--- a/tests/test-glog.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-glog.t	Tue Apr 18 12:24:34 2017 -0400
@@ -82,18 +82,18 @@
   > }
 
   $ cat > printrevset.py <<EOF
-  > from mercurial import extensions, revset, commands, cmdutil
+  > from mercurial import extensions, revsetlang, commands, cmdutil
   > 
   > def uisetup(ui):
   >     def printrevset(orig, ui, repo, *pats, **opts):
   >         if opts.get('print_revset'):
   >             expr = cmdutil.getgraphlogrevs(repo, pats, opts)[1]
   >             if expr:
-  >                 tree = revset.parse(expr)
+  >                 tree = revsetlang.parse(expr)
   >             else:
   >                 tree = []
   >             ui.write('%r\n' % (opts.get('rev', []),))
-  >             ui.write(revset.prettyformat(tree) + '\n')
+  >             ui.write(revsetlang.prettyformat(tree) + '\n')
   >             return 0
   >         return orig(ui, repo, *pats, **opts)
   >     entry = extensions.wrapcommand(commands.table, 'log', printrevset)
@@ -2398,7 +2398,7 @@
 
 node template with changeset_printer:
 
-  $ hg log -Gqr 5:7 --config ui.graphnodetemplate='{rev}'
+  $ hg log -Gqr 5:7 --config ui.graphnodetemplate='"{rev}"'
   7  7:02dbb8e276b8
   |
   6    6:fc281d8ff18d
@@ -3424,3 +3424,39 @@
      summary:     0
   
 
+  $ cd ..
+
+Multiple roots (issue5440):
+
+  $ hg init multiroots
+  $ cd multiroots
+  $ cat <<EOF > .hg/hgrc
+  > [ui]
+  > logtemplate = '{rev} {desc}\n\n'
+  > EOF
+
+  $ touch foo
+  $ hg ci -Aqm foo
+  $ hg co -q null
+  $ touch bar
+  $ hg ci -Aqm bar
+
+  $ hg log -Gr null:
+  @  1 bar
+  |
+  | o  0 foo
+  |/
+  o  -1
+  
+  $ hg log -Gr null+0
+  o  0 foo
+  |
+  o  -1
+  
+  $ hg log -Gr null+1
+  @  1 bar
+  |
+  o  -1
+  
+
+  $ cd ..
--- a/tests/test-graft.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-graft.t	Tue Apr 18 12:24:34 2017 -0400
@@ -582,8 +582,7 @@
   21: fbb6c5cc81002f2b4b49c9d731404688bcae5ade
   branch=dev
   convert_revision=7e61b508e709a11d28194a5359bc3532d910af21
-  transplant_source=z\xe8F\xe9\x11\x1f\xc8\xf5wEcBP\xc7\xb9\xac (esc)
-  `h\x9b (esc)
+  transplant_source=z\xe8F\xe9\x11\x1f\xc8\xf5wEcBP\xc7\xb9\xac\n`h\x9b
   $ hg -R ../converted log -r 'origin(tip)'
   changeset:   2:e0213322b2c1
   user:        test
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hardlinks-whitelisted.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,389 @@
+#require hardlink
+#require hardlink-whitelisted
+
+This test is similar to test-hardlinks.t, but will only run on some filesystems
+that we are sure to have known good hardlink supports (see issue4546 for an
+example where the filesystem claims hardlink support but is actually
+problematic).
+
+  $ cat > nlinks.py <<EOF
+  > import sys
+  > from mercurial import util
+  > for f in sorted(sys.stdin.readlines()):
+  >     f = f[:-1]
+  >     print util.nlinks(f), f
+  > EOF
+
+  $ nlinksdir()
+  > {
+  >     find $1 -type f | python $TESTTMP/nlinks.py
+  > }
+
+Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
+
+  $ cat > linkcp.py <<EOF
+  > from mercurial import util
+  > import sys
+  > util.copyfiles(sys.argv[1], sys.argv[2], hardlink=True)
+  > EOF
+
+  $ linkcp()
+  > {
+  >     python $TESTTMP/linkcp.py $1 $2
+  > }
+
+Prepare repo r1:
+
+  $ hg init r1
+  $ cd r1
+
+  $ echo c1 > f1
+  $ hg add f1
+  $ hg ci -m0
+
+  $ mkdir d1
+  $ cd d1
+  $ echo c2 > f2
+  $ hg add f2
+  $ hg ci -m1
+  $ cd ../..
+
+  $ nlinksdir r1/.hg/store
+  1 r1/.hg/store/00changelog.i
+  1 r1/.hg/store/00manifest.i
+  1 r1/.hg/store/data/d1/f2.i
+  1 r1/.hg/store/data/f1.i
+  1 r1/.hg/store/fncache
+  1 r1/.hg/store/phaseroots
+  1 r1/.hg/store/undo
+  1 r1/.hg/store/undo.backup.fncache
+  1 r1/.hg/store/undo.backupfiles
+  1 r1/.hg/store/undo.phaseroots
+
+
+Create hardlinked clone r2:
+
+  $ hg clone -U --debug r1 r2 --config progress.debug=true
+  linking: 1
+  linking: 2
+  linking: 3
+  linking: 4
+  linking: 5
+  linking: 6
+  linking: 7
+  linked 7 files
+
+Create non-hardlinked clone r3:
+
+  $ hg clone --pull r1 r3
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+
+Repos r1 and r2 should now contain hardlinked files:
+
+  $ nlinksdir r1/.hg/store
+  2 r1/.hg/store/00changelog.i
+  2 r1/.hg/store/00manifest.i
+  2 r1/.hg/store/data/d1/f2.i
+  2 r1/.hg/store/data/f1.i
+  2 r1/.hg/store/fncache
+  1 r1/.hg/store/phaseroots
+  1 r1/.hg/store/undo
+  1 r1/.hg/store/undo.backup.fncache
+  1 r1/.hg/store/undo.backupfiles
+  1 r1/.hg/store/undo.phaseroots
+
+  $ nlinksdir r2/.hg/store
+  2 r2/.hg/store/00changelog.i
+  2 r2/.hg/store/00manifest.i
+  2 r2/.hg/store/data/d1/f2.i
+  2 r2/.hg/store/data/f1.i
+  2 r2/.hg/store/fncache
+
+Repo r3 should not be hardlinked:
+
+  $ nlinksdir r3/.hg/store
+  1 r3/.hg/store/00changelog.i
+  1 r3/.hg/store/00manifest.i
+  1 r3/.hg/store/data/d1/f2.i
+  1 r3/.hg/store/data/f1.i
+  1 r3/.hg/store/fncache
+  1 r3/.hg/store/phaseroots
+  1 r3/.hg/store/undo
+  1 r3/.hg/store/undo.backupfiles
+  1 r3/.hg/store/undo.phaseroots
+
+
+Create a non-inlined filelog in r3:
+
+  $ cd r3/d1
+  >>> f = open('data1', 'wb')
+  >>> for x in range(10000):
+  ...     f.write("%s\n" % str(x))
+  >>> f.close()
+  $ for j in 0 1 2 3 4 5 6 7 8 9; do
+  >   cat data1 >> f2
+  >   hg commit -m$j
+  > done
+  $ cd ../..
+
+  $ nlinksdir r3/.hg/store
+  1 r3/.hg/store/00changelog.i
+  1 r3/.hg/store/00manifest.i
+  1 r3/.hg/store/data/d1/f2.d
+  1 r3/.hg/store/data/d1/f2.i
+  1 r3/.hg/store/data/f1.i
+  1 r3/.hg/store/fncache
+  1 r3/.hg/store/phaseroots
+  1 r3/.hg/store/undo
+  1 r3/.hg/store/undo.backup.fncache
+  1 r3/.hg/store/undo.backup.phaseroots
+  1 r3/.hg/store/undo.backupfiles
+  1 r3/.hg/store/undo.phaseroots
+
+Push to repo r1 should break up most hardlinks in r2:
+
+  $ hg -R r2 verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  2 files, 2 changesets, 2 total revisions
+
+  $ cd r3
+  $ hg push
+  pushing to $TESTTMP/r1 (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 10 changesets with 10 changes to 1 files
+
+  $ cd ..
+
+  $ nlinksdir r2/.hg/store
+  1 r2/.hg/store/00changelog.i
+  1 r2/.hg/store/00manifest.i
+  1 r2/.hg/store/data/d1/f2.i
+  2 r2/.hg/store/data/f1.i
+  2 r2/.hg/store/fncache
+
+  $ hg -R r2 verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  2 files, 2 changesets, 2 total revisions
+
+
+  $ cd r1
+  $ hg up
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Committing a change to f1 in r1 must break up hardlink f1.i in r2:
+
+  $ echo c1c1 >> f1
+  $ hg ci -m00
+  $ cd ..
+
+  $ nlinksdir r2/.hg/store
+  1 r2/.hg/store/00changelog.i
+  1 r2/.hg/store/00manifest.i
+  1 r2/.hg/store/data/d1/f2.i
+  1 r2/.hg/store/data/f1.i
+  2 r2/.hg/store/fncache
+
+
+  $ cd r3
+  $ hg tip --template '{rev}:{node|short}\n'
+  11:a6451b6bc41f
+  $ echo bla > f1
+  $ hg ci -m1
+  $ cd ..
+
+Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
+
+  $ linkcp r3 r4
+
+r4 has hardlinks in the working dir (not just inside .hg):
+
+  $ nlinksdir r4
+  2 r4/.hg/00changelog.i
+  2 r4/.hg/branch
+  2 r4/.hg/cache/branch2-served
+  2 r4/.hg/cache/checkisexec
+  3 r4/.hg/cache/checklink (?)
+  ? r4/.hg/cache/checklink-target (glob)
+  2 r4/.hg/cache/checknoexec
+  2 r4/.hg/cache/rbc-names-v1
+  2 r4/.hg/cache/rbc-revs-v1
+  2 r4/.hg/dirstate
+  2 r4/.hg/hgrc
+  2 r4/.hg/last-message.txt
+  2 r4/.hg/requires
+  2 r4/.hg/store/00changelog.i
+  2 r4/.hg/store/00manifest.i
+  2 r4/.hg/store/data/d1/f2.d
+  2 r4/.hg/store/data/d1/f2.i
+  2 r4/.hg/store/data/f1.i
+  2 r4/.hg/store/fncache
+  2 r4/.hg/store/phaseroots
+  2 r4/.hg/store/undo
+  2 r4/.hg/store/undo.backup.fncache
+  2 r4/.hg/store/undo.backup.phaseroots
+  2 r4/.hg/store/undo.backupfiles
+  2 r4/.hg/store/undo.phaseroots
+  4 r4/.hg/undo.backup.dirstate
+  2 r4/.hg/undo.bookmarks
+  2 r4/.hg/undo.branch
+  2 r4/.hg/undo.desc
+  4 r4/.hg/undo.dirstate
+  2 r4/d1/data1
+  2 r4/d1/f2
+  2 r4/f1
+
+Update back to revision 11 in r4 should break hardlink of file f1:
+
+  $ hg -R r4 up 11
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ nlinksdir r4
+  2 r4/.hg/00changelog.i
+  1 r4/.hg/branch
+  2 r4/.hg/cache/branch2-served
+  2 r4/.hg/cache/checkisexec
+  2 r4/.hg/cache/checklink-target
+  2 r4/.hg/cache/checknoexec
+  2 r4/.hg/cache/rbc-names-v1
+  2 r4/.hg/cache/rbc-revs-v1
+  1 r4/.hg/dirstate
+  2 r4/.hg/hgrc
+  2 r4/.hg/last-message.txt
+  2 r4/.hg/requires
+  2 r4/.hg/store/00changelog.i
+  2 r4/.hg/store/00manifest.i
+  2 r4/.hg/store/data/d1/f2.d
+  2 r4/.hg/store/data/d1/f2.i
+  2 r4/.hg/store/data/f1.i
+  2 r4/.hg/store/fncache
+  2 r4/.hg/store/phaseroots
+  2 r4/.hg/store/undo
+  2 r4/.hg/store/undo.backup.fncache
+  2 r4/.hg/store/undo.backup.phaseroots
+  2 r4/.hg/store/undo.backupfiles
+  2 r4/.hg/store/undo.phaseroots
+  4 r4/.hg/undo.backup.dirstate
+  2 r4/.hg/undo.bookmarks
+  2 r4/.hg/undo.branch
+  2 r4/.hg/undo.desc
+  4 r4/.hg/undo.dirstate
+  2 r4/d1/data1
+  2 r4/d1/f2
+  1 r4/f1
+
+
+Test hardlinking outside hg:
+
+  $ mkdir x
+  $ echo foo > x/a
+
+  $ linkcp x y
+  $ echo bar >> y/a
+
+No diff if hardlink:
+
+  $ diff x/a y/a
+
+Test mq hardlinking:
+
+  $ echo "[extensions]" >> $HGRCPATH
+  $ echo "mq=" >> $HGRCPATH
+
+  $ hg init a
+  $ cd a
+
+  $ hg qimport -n foo - << EOF
+  > # HG changeset patch
+  > # Date 1 0
+  > diff -r 2588a8b53d66 a
+  > --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  > +++ b/a	Wed Jul 23 15:54:29 2008 +0200
+  > @@ -0,0 +1,1 @@
+  > +a
+  > EOF
+  adding foo to series file
+
+  $ hg qpush
+  applying foo
+  now at: foo
+
+  $ cd ..
+  $ linkcp a b
+  $ cd b
+
+  $ hg qimport -n bar - << EOF
+  > # HG changeset patch
+  > # Date 2 0
+  > diff -r 2588a8b53d66 a
+  > --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  > +++ b/b	Wed Jul 23 15:54:29 2008 +0200
+  > @@ -0,0 +1,1 @@
+  > +b
+  > EOF
+  adding bar to series file
+
+  $ hg qpush
+  applying bar
+  now at: bar
+
+  $ cat .hg/patches/status
+  430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
+  4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
+
+  $ cat .hg/patches/series
+  foo
+  bar
+
+  $ cat ../a/.hg/patches/status
+  430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
+
+  $ cat ../a/.hg/patches/series
+  foo
+
+Test tags hardlinking:
+
+  $ hg qdel -r qbase:qtip
+  patch foo finalized without changeset message
+  patch bar finalized without changeset message
+
+  $ hg tag -l lfoo
+  $ hg tag foo
+
+  $ cd ..
+  $ linkcp b c
+  $ cd c
+
+  $ hg tag -l -r 0 lbar
+  $ hg tag -r 0 bar
+
+  $ cat .hgtags
+  4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
+  430ed4828a74fa4047bc816a25500f7472ab4bfe bar
+
+  $ cat .hg/localtags
+  4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
+  430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
+
+  $ cat ../b/.hgtags
+  4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
+
+  $ cat ../b/.hg/localtags
+  4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
+
+  $ cd ..
--- a/tests/test-hardlinks.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hardlinks.t	Tue Apr 18 12:24:34 2017 -0400
@@ -166,7 +166,7 @@
   1 r2/.hg/store/00manifest.i
   1 r2/.hg/store/data/d1/f2.i
   2 r2/.hg/store/data/f1.i
-  1 r2/.hg/store/fncache
+  [12] r2/\.hg/store/fncache (re)
 
   $ hg -R r2 verify
   checking changesets
@@ -191,7 +191,7 @@
   1 r2/.hg/store/00manifest.i
   1 r2/.hg/store/data/d1/f2.i
   1 r2/.hg/store/data/f1.i
-  1 r2/.hg/store/fncache
+  [12] r2/\.hg/store/fncache (re)
 
 
   $ cd r3
@@ -211,10 +211,10 @@
   2 r4/.hg/00changelog.i
   2 r4/.hg/branch
   2 r4/.hg/cache/branch2-served
-  2 r4/.hg/cache/checkisexec
+  2 r4/.hg/cache/checkisexec (execbit !)
   3 r4/.hg/cache/checklink (?)
-  ? r4/.hg/cache/checklink-target (glob)
-  2 r4/.hg/cache/checknoexec
+  ? r4/.hg/cache/checklink-target (glob) (symlink !)
+  2 r4/.hg/cache/checknoexec (execbit !)
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   2 r4/.hg/dirstate
@@ -233,11 +233,11 @@
   2 r4/.hg/store/undo.backup.phaseroots
   2 r4/.hg/store/undo.backupfiles
   2 r4/.hg/store/undo.phaseroots
-  2 r4/.hg/undo.backup.dirstate
+  [24] r4/\.hg/undo\.backup\.dirstate (re)
   2 r4/.hg/undo.bookmarks
   2 r4/.hg/undo.branch
   2 r4/.hg/undo.desc
-  2 r4/.hg/undo.dirstate
+  [24] r4/\.hg/undo\.dirstate (re)
   2 r4/d1/data1
   2 r4/d1/f2
   2 r4/f1
@@ -251,9 +251,9 @@
   2 r4/.hg/00changelog.i
   1 r4/.hg/branch
   2 r4/.hg/cache/branch2-served
-  2 r4/.hg/cache/checkisexec
-  2 r4/.hg/cache/checklink-target
-  2 r4/.hg/cache/checknoexec
+  2 r4/.hg/cache/checkisexec (execbit !)
+  2 r4/.hg/cache/checklink-target (symlink !)
+  2 r4/.hg/cache/checknoexec (execbit !)
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   1 r4/.hg/dirstate
@@ -272,11 +272,11 @@
   2 r4/.hg/store/undo.backup.phaseroots
   2 r4/.hg/store/undo.backupfiles
   2 r4/.hg/store/undo.phaseroots
-  2 r4/.hg/undo.backup.dirstate
+  [24] r4/\.hg/undo\.backup\.dirstate (re)
   2 r4/.hg/undo.bookmarks
   2 r4/.hg/undo.branch
   2 r4/.hg/undo.desc
-  2 r4/.hg/undo.dirstate
+  [24] r4/\.hg/undo\.dirstate (re)
   2 r4/d1/data1
   2 r4/d1/f2
   1 r4/f1
--- a/tests/test-help.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-help.t	Tue Apr 18 12:24:34 2017 -0400
@@ -58,7 +58,7 @@
    bookmarks     create a new bookmark or list existing bookmarks
    branch        set or show the current branch name
    branches      list repository named branches
-   bundle        create a changegroup file
+   bundle        create a bundle file
    cat           output the current or given revision of files
    clone         make a copy of an existing repository
    commit        commit the specified files or all outstanding changes
@@ -95,13 +95,15 @@
    summary       summarize working directory state
    tag           add one or more tags for the current or given revision
    tags          list repository tags
-   unbundle      apply one or more changegroup files
+   unbundle      apply one or more bundle files
    update        update working directory (or switch revisions)
    verify        verify the integrity of the repository
    version       output version and copyright information
   
   additional help topics:
   
+   bundlespec    Bundle File Formats
+   color         Colorizing Outputs
    config        Configuration Files
    dates         Date Formats
    diffs         Diff Formats
@@ -113,6 +115,7 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
+   pager         Pager Support
    patterns      File Name Patterns
    phases        Working with Phases
    revisions     Specifying Revisions
@@ -133,7 +136,7 @@
    bookmarks     create a new bookmark or list existing bookmarks
    branch        set or show the current branch name
    branches      list repository named branches
-   bundle        create a changegroup file
+   bundle        create a bundle file
    cat           output the current or given revision of files
    clone         make a copy of an existing repository
    commit        commit the specified files or all outstanding changes
@@ -170,13 +173,15 @@
    summary       summarize working directory state
    tag           add one or more tags for the current or given revision
    tags          list repository tags
-   unbundle      apply one or more changegroup files
+   unbundle      apply one or more bundle files
    update        update working directory (or switch revisions)
    verify        verify the integrity of the repository
    version       output version and copyright information
   
   additional help topics:
   
+   bundlespec    Bundle File Formats
+   color         Colorizing Outputs
    config        Configuration Files
    dates         Date Formats
    diffs         Diff Formats
@@ -188,6 +193,7 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
+   pager         Pager Support
    patterns      File Name Patterns
    phases        Working with Phases
    revisions     Specifying Revisions
@@ -248,7 +254,6 @@
        censor        erase file content at a given revision
        churn         command to display statistics about repository history
        clonebundles  advertise pre-generated bundles to seed clones
-       color         colorize output from some commands
        convert       import revisions from foreign VCS repositories into
                      Mercurial
        eol           automatically manage newlines in repository files
@@ -262,7 +267,6 @@
        largefiles    track large binary files
        mq            manage a stack of patches
        notify        hooks for sending email push notifications
-       pager         browse command output with an external pager
        patchbomb     command to send changesets as (a series of) patch emails
        purge         command to delete untracked files from the working
                      directory
@@ -315,6 +319,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -326,6 +332,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
   
   (use 'hg help' for the full list of commands)
 
@@ -411,6 +419,8 @@
                           all prompts
    -q --quiet             suppress output
    -v --verbose           enable additional output
+      --color TYPE        when to colorize (boolean, always, auto, never, or
+                          debug)
       --config CONFIG [+] set/override config option (use 'section.name=value')
       --debug             enable debugging output
       --debugger          start debugger
@@ -422,6 +432,8 @@
       --version           output version information and exit
    -h --help              display help and exit
       --hidden            consider hidden changesets
+      --pager TYPE        when to paginate (boolean, always, auto, or never)
+                          (default: auto)
 
 Test the textwidth config option
 
@@ -533,6 +545,7 @@
    -c --change REV          change made by revision
    -a --text                treat all files as text
    -g --git                 use git extended diff format
+      --binary              generate binary diffs in git mode (default)
       --nodates             omit dates from diff headers
       --noprefix            omit a/ and b/ prefixes from filenames
    -p --show-function       show which function each change is in
@@ -678,6 +691,7 @@
   >     ('', 'newline', '', 'line1\nline2')],
   >     'hg nohelp',
   >     norepo=True)
+  > @command('debugoptADV', [('', 'aopt', None, 'option is (ADVANCED)')])
   > @command('debugoptDEP', [('', 'dopt', None, 'option is (DEPRECATED)')])
   > @command('debugoptEXP', [('', 'eopt', None, 'option is (EXPERIMENTAL)')])
   > def nohelp(ui, *args, **kwargs):
@@ -768,7 +782,7 @@
    bookmarks     create a new bookmark or list existing bookmarks
    branch        set or show the current branch name
    branches      list repository named branches
-   bundle        create a changegroup file
+   bundle        create a bundle file
    cat           output the current or given revision of files
    clone         make a copy of an existing repository
    commit        commit the specified files or all outstanding changes
@@ -805,7 +819,7 @@
    summary       summarize working directory state
    tag           add one or more tags for the current or given revision
    tags          list repository tags
-   unbundle      apply one or more changegroup files
+   unbundle      apply one or more bundle files
    update        update working directory (or switch revisions)
    verify        verify the integrity of the repository
    version       output version and copyright information
@@ -816,6 +830,8 @@
   
   additional help topics:
   
+   bundlespec    Bundle File Formats
+   color         Colorizing Outputs
    config        Configuration Files
    dates         Date Formats
    diffs         Diff Formats
@@ -827,6 +843,7 @@
    hgweb         Configuring hgweb
    internals     Technical implementation topics
    merge-tools   Merge Tools
+   pager         Pager Support
    patterns      File Name Patterns
    phases        Working with Phases
    revisions     Specifying Revisions
@@ -853,6 +870,7 @@
    debugbundle   lists the contents of a bundle
    debugcheckstate
                  validate the correctness of the current dirstate
+   debugcolor    show available color, effects or style
    debugcommands
                  list all available commands and options
    debugcomplete
@@ -889,6 +907,7 @@
                  complete "names" - tags, open branch names, bookmark names
    debugobsolete
                  create arbitrary obsolete marker
+   debugoptADV   (no help text available)
    debugoptDEP   (no help text available)
    debugoptEXP   (no help text available)
    debugpathcomplete
@@ -925,6 +944,7 @@
   """""""""""""""""""""""""""""""
   
        bundles       Bundles
+       censor        Censor
        changegroups  Changegroups
        requirements  Repository Requirements
        revlogs       Revision Logs
@@ -937,37 +957,51 @@
   """"""""""""
   
       Changegroups are representations of repository revlog data, specifically
-      the changelog, manifest, and filelogs.
+      the changelog data, root/flat manifest data, treemanifest data, and
+      filelogs.
   
       There are 3 versions of changegroups: "1", "2", and "3". From a high-
       level, versions "1" and "2" are almost exactly the same, with the only
-      difference being a header on entries in the changeset segment. Version "3"
-      adds support for exchanging treemanifests and includes revlog flags in the
-      delta header.
-  
-      Changegroups consists of 3 logical segments:
+      difference being an additional item in the *delta header*.  Version "3"
+      adds support for revlog flags in the *delta header* and optionally
+      exchanging treemanifests (enabled by setting an option on the
+      "changegroup" part in the bundle2).
+  
+      Changegroups when not exchanging treemanifests consist of 3 logical
+      segments:
   
         +---------------------------------+
         |           |          |          |
         | changeset | manifest | filelogs |
         |           |          |          |
+        |           |          |          |
         +---------------------------------+
   
+      When exchanging treemanifests, there are 4 logical segments:
+  
+        +-------------------------------------------------+
+        |           |          |               |          |
+        | changeset |   root   | treemanifests | filelogs |
+        |           | manifest |               |          |
+        |           |          |               |          |
+        +-------------------------------------------------+
+  
       The principle building block of each segment is a *chunk*. A *chunk* is a
       framed piece of data:
   
         +---------------------------------------+
         |           |                           |
         |  length   |           data            |
-        | (32 bits) |       <length> bytes      |
+        | (4 bytes) |   (<length - 4> bytes)    |
         |           |                           |
         +---------------------------------------+
   
-      Each chunk starts with a 32-bit big-endian signed integer indicating the
-      length of the raw data that follows.
-  
-      There is a special case chunk that has 0 length ("0x00000000"). We call
-      this an *empty chunk*.
+      All integers are big-endian signed integers. Each chunk starts with a
+      32-bit integer indicating the length of the entire chunk (including the
+      length field itself).
+  
+      There is a special case chunk that has a value of 0 for the length
+      ("0x00000000"). We call this an *empty chunk*.
   
       Delta Groups
       ============
@@ -981,26 +1015,27 @@
         +------------------------------------------------------------------------+
         |                |             |               |             |           |
         | chunk0 length  | chunk0 data | chunk1 length | chunk1 data |    0x0    |
-        |   (32 bits)    |  (various)  |   (32 bits)   |  (various)  | (32 bits) |
+        |   (4 bytes)    |  (various)  |   (4 bytes)   |  (various)  | (4 bytes) |
         |                |             |               |             |           |
-        +------------------------------------------------------------+-----------+
+        +------------------------------------------------------------------------+
   
       Each *chunk*'s data consists of the following:
   
-        +-----------------------------------------+
-        |              |              |           |
-        | delta header | mdiff header |   delta   |
-        |  (various)   |  (12 bytes)  | (various) |
-        |              |              |           |
-        +-----------------------------------------+
-  
-      The *length* field is the byte length of the remaining 3 logical pieces of
-      data. The *delta* is a diff from an existing entry in the changelog.
+        +---------------------------------------+
+        |                        |              |
+        |     delta header       |  delta data  |
+        |  (various by version)  |  (various)   |
+        |                        |              |
+        +---------------------------------------+
+  
+      The *delta data* is a series of *delta*s that describe a diff from an
+      existing entry (either that the recipient already has, or previously
+      specified in the bundlei/changegroup).
   
       The *delta header* is different between versions "1", "2", and "3" of the
       changegroup format.
   
-      Version 1:
+      Version 1 (headerlen=80):
   
         +------------------------------------------------------+
         |            |             |             |             |
@@ -1009,7 +1044,7 @@
         |            |             |             |             |
         +------------------------------------------------------+
   
-      Version 2:
+      Version 2 (headerlen=100):
   
         +------------------------------------------------------------------+
         |            |             |             |            |            |
@@ -1018,30 +1053,36 @@
         |            |             |             |            |            |
         +------------------------------------------------------------------+
   
-      Version 3:
+      Version 3 (headerlen=102):
   
         +------------------------------------------------------------------------------+
         |            |             |             |            |            |           |
-        |    node    |   p1 node   |   p2 node   | base node  | link node  | flags     |
+        |    node    |   p1 node   |   p2 node   | base node  | link node  |   flags   |
         | (20 bytes) |  (20 bytes) |  (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) |
         |            |             |             |            |            |           |
         +------------------------------------------------------------------------------+
   
-      The *mdiff header* consists of 3 32-bit big-endian signed integers
-      describing offsets at which to apply the following delta content:
-  
-        +-------------------------------------+
-        |           |            |            |
-        |  offset   | old length | new length |
-        | (32 bits) |  (32 bits) |  (32 bits) |
-        |           |            |            |
-        +-------------------------------------+
+      The *delta data* consists of "chunklen - 4 - headerlen" bytes, which
+      contain a series of *delta*s, densely packed (no separators). These deltas
+      describe a diff from an existing entry (either that the recipient already
+      has, or previously specified in the bundle/changegroup). The format is
+      described more fully in "hg help internals.bdiff", but briefly:
+  
+        +---------------------------------------------------------------+
+        |              |            |            |                      |
+        | start offset | end offset | new length |        content       |
+        |  (4 bytes)   |  (4 bytes) |  (4 bytes) | (<new length> bytes) |
+        |              |            |            |                      |
+        +---------------------------------------------------------------+
+  
+      Please note that the length field in the delta data does *not* include
+      itself.
   
       In version 1, the delta is always applied against the previous node from
       the changegroup or the first parent if this is the first entry in the
       changegroup.
   
-      In version 2, the delta base node is encoded in the entry in the
+      In version 2 and up, the delta base node is encoded in the entry in the
       changegroup. This allows the delta to be expressed against any parent,
       which can result in smaller deltas and more efficient encoding of data.
   
@@ -1049,46 +1090,61 @@
       =================
   
       The *changeset segment* consists of a single *delta group* holding
-      changelog data. It is followed by an *empty chunk* to denote the boundary
-      to the *manifests segment*.
+      changelog data. The *empty chunk* at the end of the *delta group* denotes
+      the boundary to the *manifest segment*.
   
       Manifest Segment
       ================
   
       The *manifest segment* consists of a single *delta group* holding manifest
-      data. It is followed by an *empty chunk* to denote the boundary to the
-      *filelogs segment*.
+      data. If treemanifests are in use, it contains only the manifest for the
+      root directory of the repository. Otherwise, it contains the entire
+      manifest data. The *empty chunk* at the end of the *delta group* denotes
+      the boundary to the next segment (either the *treemanifests segment* or
+      the *filelogs segment*, depending on version and the request options).
+  
+      Treemanifests Segment
+      ---------------------
+  
+      The *treemanifests segment* only exists in changegroup version "3", and
+      only if the 'treemanifest' param is part of the bundle2 changegroup part
+      (it is not possible to use changegroup version 3 outside of bundle2).
+      Aside from the filenames in the *treemanifests segment* containing a
+      trailing "/" character, it behaves identically to the *filelogs segment*
+      (see below). The final sub-segment is followed by an *empty chunk*
+      (logically, a sub-segment with filename size 0). This denotes the boundary
+      to the *filelogs segment*.
   
       Filelogs Segment
       ================
   
-      The *filelogs* segment consists of multiple sub-segments, each
+      The *filelogs segment* consists of multiple sub-segments, each
       corresponding to an individual file whose data is being described:
   
-        +--------------------------------------+
-        |          |          |          |     |
-        | filelog0 | filelog1 | filelog2 | ... |
-        |          |          |          |     |
-        +--------------------------------------+
-  
-      In version "3" of the changegroup format, filelogs may include directory
-      logs when treemanifests are in use. directory logs are identified by
-      having a trailing '/' on their filename (see below).
-  
-      The final filelog sub-segment is followed by an *empty chunk* to denote
-      the end of the segment and the overall changegroup.
+        +--------------------------------------------------+
+        |          |          |          |     |           |
+        | filelog0 | filelog1 | filelog2 | ... |    0x0    |
+        |          |          |          |     | (4 bytes) |
+        |          |          |          |     |           |
+        +--------------------------------------------------+
+  
+      The final filelog sub-segment is followed by an *empty chunk* (logically,
+      a sub-segment with filename size 0). This denotes the end of the segment
+      and of the overall changegroup.
   
       Each filelog sub-segment consists of the following:
   
-        +------------------------------------------+
-        |               |            |             |
-        | filename size |  filename  | delta group |
-        |   (32 bits)   |  (various) |  (various)  |
-        |               |            |             |
-        +------------------------------------------+
+        +------------------------------------------------------+
+        |                 |                      |             |
+        | filename length |       filename       | delta group |
+        |    (4 bytes)    | (<length - 4> bytes) |  (various)  |
+        |                 |                      |             |
+        +------------------------------------------------------+
   
       That is, a *chunk* consisting of the filename (not terminated or padded)
-      followed by N chunks constituting the *delta group* for this file.
+      followed by N chunks constituting the *delta group* for this file. The
+      *empty chunk* at the end of each *delta group* denotes the boundary to the
+      next filelog sub-segment.
 
 Test list of commands with command with no help text
 
@@ -1102,7 +1158,15 @@
   (use 'hg help -v helpext' to show built-in aliases and global options)
 
 
-test deprecated and experimental options are hidden in command help
+test advanced, deprecated and experimental options are hidden in command help
+  $ hg help debugoptADV
+  hg debugoptADV
+  
+  (no help text available)
+  
+  options:
+  
+  (some details hidden, use --verbose to show complete help)
   $ hg help debugoptDEP
   hg debugoptDEP
   
@@ -1121,7 +1185,9 @@
   
   (some details hidden, use --verbose to show complete help)
 
-test deprecated and experimental options is shown with -v
+test advanced, deprecated and experimental options are shown with -v
+  $ hg help -v debugoptADV | grep aopt
+    --aopt option is (ADVANCED)
   $ hg help -v debugoptDEP | grep dopt
     --dopt option is (DEPRECATED)
   $ hg help -v debugoptEXP | grep eopt
@@ -1547,11 +1613,11 @@
          "default:pushurl" should be used instead.
   
   $ hg help glossary.mcguffin
-  abort: help section not found
+  abort: help section not found: glossary.mcguffin
   [255]
 
   $ hg help glossary.mc.guffin
-  abort: help section not found
+  abort: help section not found: glossary.mc.guffin
   [255]
 
   $ hg help template.files
@@ -1776,6 +1842,15 @@
       See the merge-tools and ui sections of hgrc(5) for details on the
       configuration of merge tools.
 
+Compression engines listed in `hg help bundlespec`
+
+  $ hg help bundlespec | grep gzip
+          "v1" bundles can only use the "gzip", "bzip2", and "none" compression
+        An algorithm that produces smaller bundles than "gzip".
+        This engine will likely produce smaller bundles than "gzip" but will be
+      "gzip"
+        better compression than "gzip". It also frequently yields better (?)
+
 Test usage of section marks in help documents
 
   $ cd "$TESTDIR"/../doc
@@ -1792,7 +1867,7 @@
   $ hg serve -R "$TESTTMP/test" -n test -p $HGPORT -d --pid-file=hg.pid
   $ cat hg.pid >> $DAEMON_PIDS
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -1837,6 +1912,20 @@
   <tr><td colspan="2"><h2><a name="topics" href="#topics">Topics</a></h2></td></tr>
   
   <tr><td>
+  <a href="/help/bundlespec">
+  bundlespec
+  </a>
+  </td><td>
+  Bundle File Formats
+  </td></tr>
+  <tr><td>
+  <a href="/help/color">
+  color
+  </a>
+  </td><td>
+  Colorizing Outputs
+  </td></tr>
+  <tr><td>
   <a href="/help/config">
   config
   </a>
@@ -1914,6 +2003,13 @@
   Merge Tools
   </td></tr>
   <tr><td>
+  <a href="/help/pager">
+  pager
+  </a>
+  </td><td>
+  Pager Support
+  </td></tr>
+  <tr><td>
   <a href="/help/patterns">
   patterns
   </a>
@@ -2151,7 +2247,7 @@
   bundle
   </a>
   </td><td>
-  create a changegroup file
+  create a bundle file
   </td></tr>
   <tr><td>
   <a href="/help/cat">
@@ -2333,7 +2429,7 @@
   unbundle
   </a>
   </td><td>
-  apply one or more changegroup files
+  apply one or more bundle files
   </td></tr>
   <tr><td>
   <a href="/help/verify">
@@ -2361,7 +2457,7 @@
   </html>
   
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help/add"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help/add"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -2491,6 +2587,9 @@
   <td>--verbose</td>
   <td>enable additional output</td></tr>
   <tr><td></td>
+  <td>--color TYPE</td>
+  <td>when to colorize (boolean, always, auto, never, or debug)</td></tr>
+  <tr><td></td>
   <td>--config CONFIG [+]</td>
   <td>set/override config option (use 'section.name=value')</td></tr>
   <tr><td></td>
@@ -2523,6 +2622,9 @@
   <tr><td></td>
   <td>--hidden</td>
   <td>consider hidden changesets</td></tr>
+  <tr><td></td>
+  <td>--pager TYPE</td>
+  <td>when to paginate (boolean, always, auto, or never) (default: auto)</td></tr>
   </table>
   
   </div>
@@ -2535,7 +2637,7 @@
   </html>
   
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help/remove"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help/remove"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -2686,6 +2788,9 @@
   <td>--verbose</td>
   <td>enable additional output</td></tr>
   <tr><td></td>
+  <td>--color TYPE</td>
+  <td>when to colorize (boolean, always, auto, never, or debug)</td></tr>
+  <tr><td></td>
   <td>--config CONFIG [+]</td>
   <td>set/override config option (use 'section.name=value')</td></tr>
   <tr><td></td>
@@ -2718,6 +2823,9 @@
   <tr><td></td>
   <td>--hidden</td>
   <td>consider hidden changesets</td></tr>
+  <tr><td></td>
+  <td>--pager TYPE</td>
+  <td>when to paginate (boolean, always, auto, or never) (default: auto)</td></tr>
   </table>
   
   </div>
@@ -2730,7 +2838,7 @@
   </html>
   
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help/dates"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help/dates"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -2837,7 +2945,7 @@
 
 Sub-topic indexes rendered properly
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help/internals"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help/internals"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -2889,6 +2997,13 @@
   Bundles
   </td></tr>
   <tr><td>
+  <a href="/help/internals.censor">
+  censor
+  </a>
+  </td><td>
+  Censor
+  </td></tr>
+  <tr><td>
   <a href="/help/internals.changegroups">
   changegroups
   </a>
@@ -2933,7 +3048,7 @@
 
 Sub-topic topics rendered properly
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT "help/internals.changegroups"
+  $ get-with-headers.py $LOCALIP:$HGPORT "help/internals.changegroups"
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -2980,26 +3095,41 @@
   <h1>Changegroups</h1>
   <p>
   Changegroups are representations of repository revlog data, specifically
-  the changelog, manifest, and filelogs.
+  the changelog data, root/flat manifest data, treemanifest data, and
+  filelogs.
   </p>
   <p>
   There are 3 versions of changegroups: &quot;1&quot;, &quot;2&quot;, and &quot;3&quot;. From a
-  high-level, versions &quot;1&quot; and &quot;2&quot; are almost exactly the same, with
-  the only difference being a header on entries in the changeset
-  segment. Version &quot;3&quot; adds support for exchanging treemanifests and
-  includes revlog flags in the delta header.
+  high-level, versions &quot;1&quot; and &quot;2&quot; are almost exactly the same, with the
+  only difference being an additional item in the *delta header*.  Version
+  &quot;3&quot; adds support for revlog flags in the *delta header* and optionally
+  exchanging treemanifests (enabled by setting an option on the
+  &quot;changegroup&quot; part in the bundle2).
   </p>
   <p>
-  Changegroups consists of 3 logical segments:
+  Changegroups when not exchanging treemanifests consist of 3 logical
+  segments:
   </p>
   <pre>
   +---------------------------------+
   |           |          |          |
   | changeset | manifest | filelogs |
   |           |          |          |
+  |           |          |          |
   +---------------------------------+
   </pre>
   <p>
+  When exchanging treemanifests, there are 4 logical segments:
+  </p>
+  <pre>
+  +-------------------------------------------------+
+  |           |          |               |          |
+  | changeset |   root   | treemanifests | filelogs |
+  |           | manifest |               |          |
+  |           |          |               |          |
+  +-------------------------------------------------+
+  </pre>
+  <p>
   The principle building block of each segment is a *chunk*. A *chunk*
   is a framed piece of data:
   </p>
@@ -3007,17 +3137,18 @@
   +---------------------------------------+
   |           |                           |
   |  length   |           data            |
-  | (32 bits) |       &lt;length&gt; bytes      |
+  | (4 bytes) |   (&lt;length - 4&gt; bytes)    |
   |           |                           |
   +---------------------------------------+
   </pre>
   <p>
-  Each chunk starts with a 32-bit big-endian signed integer indicating
-  the length of the raw data that follows.
+  All integers are big-endian signed integers. Each chunk starts with a 32-bit
+  integer indicating the length of the entire chunk (including the length field
+  itself).
   </p>
   <p>
-  There is a special case chunk that has 0 length (&quot;0x00000000&quot;). We
-  call this an *empty chunk*.
+  There is a special case chunk that has a value of 0 for the length
+  (&quot;0x00000000&quot;). We call this an *empty chunk*.
   </p>
   <h2>Delta Groups</h2>
   <p>
@@ -3032,31 +3163,32 @@
   +------------------------------------------------------------------------+
   |                |             |               |             |           |
   | chunk0 length  | chunk0 data | chunk1 length | chunk1 data |    0x0    |
-  |   (32 bits)    |  (various)  |   (32 bits)   |  (various)  | (32 bits) |
+  |   (4 bytes)    |  (various)  |   (4 bytes)   |  (various)  | (4 bytes) |
   |                |             |               |             |           |
-  +------------------------------------------------------------+-----------+
+  +------------------------------------------------------------------------+
   </pre>
   <p>
   Each *chunk*'s data consists of the following:
   </p>
   <pre>
-  +-----------------------------------------+
-  |              |              |           |
-  | delta header | mdiff header |   delta   |
-  |  (various)   |  (12 bytes)  | (various) |
-  |              |              |           |
-  +-----------------------------------------+
+  +---------------------------------------+
+  |                        |              |
+  |     delta header       |  delta data  |
+  |  (various by version)  |  (various)   |
+  |                        |              |
+  +---------------------------------------+
   </pre>
   <p>
-  The *length* field is the byte length of the remaining 3 logical pieces
-  of data. The *delta* is a diff from an existing entry in the changelog.
+  The *delta data* is a series of *delta*s that describe a diff from an existing
+  entry (either that the recipient already has, or previously specified in the
+  bundlei/changegroup).
   </p>
   <p>
   The *delta header* is different between versions &quot;1&quot;, &quot;2&quot;, and
   &quot;3&quot; of the changegroup format.
   </p>
   <p>
-  Version 1:
+  Version 1 (headerlen=80):
   </p>
   <pre>
   +------------------------------------------------------+
@@ -3067,7 +3199,7 @@
   +------------------------------------------------------+
   </pre>
   <p>
-  Version 2:
+  Version 2 (headerlen=100):
   </p>
   <pre>
   +------------------------------------------------------------------+
@@ -3078,85 +3210,104 @@
   +------------------------------------------------------------------+
   </pre>
   <p>
-  Version 3:
+  Version 3 (headerlen=102):
   </p>
   <pre>
   +------------------------------------------------------------------------------+
   |            |             |             |            |            |           |
-  |    node    |   p1 node   |   p2 node   | base node  | link node  | flags     |
+  |    node    |   p1 node   |   p2 node   | base node  | link node  |   flags   |
   | (20 bytes) |  (20 bytes) |  (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) |
   |            |             |             |            |            |           |
   +------------------------------------------------------------------------------+
   </pre>
   <p>
-  The *mdiff header* consists of 3 32-bit big-endian signed integers
-  describing offsets at which to apply the following delta content:
+  The *delta data* consists of &quot;chunklen - 4 - headerlen&quot; bytes, which contain a
+  series of *delta*s, densely packed (no separators). These deltas describe a diff
+  from an existing entry (either that the recipient already has, or previously
+  specified in the bundle/changegroup). The format is described more fully in
+  &quot;hg help internals.bdiff&quot;, but briefly:
   </p>
   <pre>
-  +-------------------------------------+
-  |           |            |            |
-  |  offset   | old length | new length |
-  | (32 bits) |  (32 bits) |  (32 bits) |
-  |           |            |            |
-  +-------------------------------------+
+  +---------------------------------------------------------------+
+  |              |            |            |                      |
+  | start offset | end offset | new length |        content       |
+  |  (4 bytes)   |  (4 bytes) |  (4 bytes) | (&lt;new length&gt; bytes) |
+  |              |            |            |                      |
+  +---------------------------------------------------------------+
   </pre>
   <p>
+  Please note that the length field in the delta data does *not* include itself.
+  </p>
+  <p>
   In version 1, the delta is always applied against the previous node from
   the changegroup or the first parent if this is the first entry in the
   changegroup.
   </p>
   <p>
-  In version 2, the delta base node is encoded in the entry in the
+  In version 2 and up, the delta base node is encoded in the entry in the
   changegroup. This allows the delta to be expressed against any parent,
   which can result in smaller deltas and more efficient encoding of data.
   </p>
   <h2>Changeset Segment</h2>
   <p>
   The *changeset segment* consists of a single *delta group* holding
-  changelog data. It is followed by an *empty chunk* to denote the
-  boundary to the *manifests segment*.
+  changelog data. The *empty chunk* at the end of the *delta group* denotes
+  the boundary to the *manifest segment*.
   </p>
   <h2>Manifest Segment</h2>
   <p>
-  The *manifest segment* consists of a single *delta group* holding
-  manifest data. It is followed by an *empty chunk* to denote the boundary
-  to the *filelogs segment*.
+  The *manifest segment* consists of a single *delta group* holding manifest
+  data. If treemanifests are in use, it contains only the manifest for the
+  root directory of the repository. Otherwise, it contains the entire
+  manifest data. The *empty chunk* at the end of the *delta group* denotes
+  the boundary to the next segment (either the *treemanifests segment* or the
+  *filelogs segment*, depending on version and the request options).
+  </p>
+  <h3>Treemanifests Segment</h3>
+  <p>
+  The *treemanifests segment* only exists in changegroup version &quot;3&quot;, and
+  only if the 'treemanifest' param is part of the bundle2 changegroup part
+  (it is not possible to use changegroup version 3 outside of bundle2).
+  Aside from the filenames in the *treemanifests segment* containing a
+  trailing &quot;/&quot; character, it behaves identically to the *filelogs segment*
+  (see below). The final sub-segment is followed by an *empty chunk* (logically,
+  a sub-segment with filename size 0). This denotes the boundary to the
+  *filelogs segment*.
   </p>
   <h2>Filelogs Segment</h2>
   <p>
-  The *filelogs* segment consists of multiple sub-segments, each
+  The *filelogs segment* consists of multiple sub-segments, each
   corresponding to an individual file whose data is being described:
   </p>
   <pre>
-  +--------------------------------------+
-  |          |          |          |     |
-  | filelog0 | filelog1 | filelog2 | ... |
-  |          |          |          |     |
-  +--------------------------------------+
+  +--------------------------------------------------+
+  |          |          |          |     |           |
+  | filelog0 | filelog1 | filelog2 | ... |    0x0    |
+  |          |          |          |     | (4 bytes) |
+  |          |          |          |     |           |
+  +--------------------------------------------------+
   </pre>
   <p>
-  In version &quot;3&quot; of the changegroup format, filelogs may include
-  directory logs when treemanifests are in use. directory logs are
-  identified by having a trailing '/' on their filename (see below).
-  </p>
-  <p>
-  The final filelog sub-segment is followed by an *empty chunk* to denote
-  the end of the segment and the overall changegroup.
+  The final filelog sub-segment is followed by an *empty chunk* (logically,
+  a sub-segment with filename size 0). This denotes the end of the segment
+  and of the overall changegroup.
   </p>
   <p>
   Each filelog sub-segment consists of the following:
   </p>
   <pre>
-  +------------------------------------------+
-  |               |            |             |
-  | filename size |  filename  | delta group |
-  |   (32 bits)   |  (various) |  (various)  |
-  |               |            |             |
-  +------------------------------------------+
+  +------------------------------------------------------+
+  |                 |                      |             |
+  | filename length |       filename       | delta group |
+  |    (4 bytes)    | (&lt;length - 4&gt; bytes) |  (various)  |
+  |                 |                      |             |
+  +------------------------------------------------------+
   </pre>
   <p>
   That is, a *chunk* consisting of the filename (not terminated or padded)
-  followed by N chunks constituting the *delta group* for this file.
+  followed by N chunks constituting the *delta group* for this file. The
+  *empty chunk* at the end of each *delta group* denotes the boundary to the
+  next filelog sub-segment.
   </p>
   
   </div>
--- a/tests/test-hgrc.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgrc.t	Tue Apr 18 12:24:34 2017 -0400
@@ -176,6 +176,20 @@
   --debug: ui.debug=True
   --quiet: ui.quiet=False
 
+with environment variables
+
+  $ PAGER=p1 EDITOR=e1 VISUAL=e2 hg showconfig --debug
+  set config by: $EDITOR
+  set config by: $VISUAL
+  set config by: $PAGER
+  read config from: $TESTTMP/hgrc
+  repo: bundle.mainreporoot=$TESTTMP
+  $PAGER: pager.pager=p1
+  $VISUAL: ui.editor=e2
+  --verbose: ui.verbose=False
+  --debug: ui.debug=True
+  --quiet: ui.quiet=False
+
 plain mode with exceptions
 
   $ cat > plain.py <<EOF
--- a/tests/test-hgweb-commands.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgweb-commands.t	Tue Apr 18 12:24:34 2017 -0400
@@ -58,7 +58,7 @@
 
 Logs and changes
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/?style=atom'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/?style=atom'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -244,7 +244,7 @@
    </entry>
   
   </feed>
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/?style=rss'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/?style=rss'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -422,7 +422,7 @@
   
     </channel>
   </rss> (no-eol)
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/?style=atom'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/?style=atom'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -522,7 +522,7 @@
    </entry>
   
   </feed>
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/?style=rss'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/?style=rss'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -618,7 +618,7 @@
   
     </channel>
   </rss> (no-eol)
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/foo/?style=atom'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/foo/?style=atom'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -673,7 +673,7 @@
    </entry>
   
   </feed>
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log/1/foo/?style=rss'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log/1/foo/?style=rss'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -694,7 +694,7 @@
   
     </channel>
   </rss>
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'shortlog/'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'shortlog/'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -834,7 +834,7 @@
   </body>
   </html>
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'rev/0/'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'rev/0/'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -965,7 +965,7 @@
   </body>
   </html>
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'rev/1/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'rev/1/?style=raw'
   200 Script output follows
   
   
@@ -982,7 +982,7 @@
   @@ -0,0 +1,1 @@
   +2ef0ac749a14e4f57a5a822464a0902c6f7f448f 1.0
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=base'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=base'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -1071,12 +1071,12 @@
   </body>
   </html>
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=stable&style=raw' | grep 'revision:'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=stable&style=raw' | grep 'revision:'
   revision:    2
 
 Search with revset syntax
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=tip^&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=tip^&style=raw'
   200 Script output follows
   
   
@@ -1093,7 +1093,7 @@
   branch:      stable
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=last(all(),2)^&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=last(all(),2)^&style=raw'
   200 Script output follows
   
   
@@ -1117,7 +1117,7 @@
   branch:      default
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=last(all(,2)^&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=last(all(,2)^&style=raw'
   200 Script output follows
   
   
@@ -1127,7 +1127,7 @@
   # Mode literal keyword search
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=last(al(),2)^&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=last(al(),2)^&style=raw'
   200 Script output follows
   
   
@@ -1137,7 +1137,7 @@
   # Mode literal keyword search
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=bookmark(anotherthing)&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=bookmark(anotherthing)&style=raw'
   200 Script output follows
   
   
@@ -1155,7 +1155,7 @@
   bookmark:    anotherthing
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=bookmark(abc)&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=bookmark(abc)&style=raw'
   200 Script output follows
   
   
@@ -1165,7 +1165,7 @@
   # Mode literal keyword search
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=deadbeef:&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=deadbeef:&style=raw'
   200 Script output follows
   
   
@@ -1176,7 +1176,7 @@
   
   
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=user("test")&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=user("test")&style=raw'
   200 Script output follows
   
   
@@ -1217,7 +1217,7 @@
   bookmark:    anotherthing
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=user("re:test")&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=user("re:test")&style=raw'
   200 Script output follows
   
   
@@ -1230,11 +1230,11 @@
 
 File-related
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/1/foo/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/1/foo/?style=raw'
   200 Script output follows
   
   foo
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'annotate/1/foo/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'annotate/1/foo/?style=raw'
   200 Script output follows
   
   
@@ -1243,7 +1243,7 @@
   
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/1/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/1/?style=raw'
   200 Script output follows
   
   
@@ -1259,7 +1259,7 @@
   $ hg parents --template "{node|short}\n" -r 1 foo
   2ef0ac749a14
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/1/foo'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/1/foo'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -1343,9 +1343,12 @@
   <div class="overflow">
   <div class="sourcefirst linewraptoggle">line wrap: <a class="linewraplink" href="javascript:toggleLinewrap()">on</a></div>
   <div class="sourcefirst"> line source</div>
-  <pre class="sourcelines stripes4 wrap bottomline">
+  <pre class="sourcelines stripes4 wrap bottomline" data-logurl="/log/1/foo">
   <span id="l1">foo</span><a href="#l1"></a></pre>
   </div>
+  
+  <script type="text/javascript" src="/static/followlines.js"></script>
+  
   </div>
   </div>
   
@@ -1354,7 +1357,7 @@
   </body>
   </html>
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'filediff/0/foo/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'filediff/0/foo/?style=raw'
   200 Script output follows
   
   
@@ -1368,7 +1371,7 @@
   
   
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'filediff/1/foo/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'filediff/1/foo/?style=raw'
   200 Script output follows
   
   
@@ -1384,7 +1387,7 @@
   $ hg parents --template "{node|short}\n" -r 2 foo
   2ef0ac749a14
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/2/foo'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/2/foo'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -1468,9 +1471,12 @@
   <div class="overflow">
   <div class="sourcefirst linewraptoggle">line wrap: <a class="linewraplink" href="javascript:toggleLinewrap()">on</a></div>
   <div class="sourcefirst"> line source</div>
-  <pre class="sourcelines stripes4 wrap bottomline">
+  <pre class="sourcelines stripes4 wrap bottomline" data-logurl="/log/2/foo">
   <span id="l1">another</span><a href="#l1"></a></pre>
   </div>
+  
+  <script type="text/javascript" src="/static/followlines.js"></script>
+  
   </div>
   </div>
   
@@ -1483,23 +1489,23 @@
 
 Overviews
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'raw-tags'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'raw-tags'
   200 Script output follows
   
   tip	cad8025a2e87f88c06259790adfa15acb4080123
   1.0	2ef0ac749a14e4f57a5a822464a0902c6f7f448f
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'raw-branches'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'raw-branches'
   200 Script output follows
   
   unstable	cad8025a2e87f88c06259790adfa15acb4080123	open
   stable	1d22e65f027e5a0609357e7d8e7508cd2ba5d2fe	inactive
   default	a4f92ed23982be056b9852de5dfe873eaac7f0de	inactive
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'raw-bookmarks'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'raw-bookmarks'
   200 Script output follows
   
   something	cad8025a2e87f88c06259790adfa15acb4080123
   anotherthing	2ef0ac749a14e4f57a5a822464a0902c6f7f448f
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'summary/?style=gitweb'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'summary/?style=gitweb'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -1697,7 +1703,7 @@
   </body>
   </html>
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'graph/?style=gitweb'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'graph/?style=gitweb'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -1843,7 +1849,7 @@
   
 raw graph
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'graph/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'graph/?style=raw'
   200 Script output follows
   
   
@@ -1893,28 +1899,28 @@
 
 capabilities
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities'; echo
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities'; echo
   200 Script output follows
   
   lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=*zlib (glob)
 
 heads
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=heads'
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=heads'
   200 Script output follows
   
   cad8025a2e87f88c06259790adfa15acb4080123
 
 branches
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=branches&nodes=0000000000000000000000000000000000000000'
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=branches&nodes=0000000000000000000000000000000000000000'
   200 Script output follows
   
   0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
 
 changegroup
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=changegroup&roots=0000000000000000000000000000000000000000'
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=changegroup&roots=0000000000000000000000000000000000000000'
   200 Script output follows
   
   x\x9c\xbd\x94MHTQ\x14\xc7'+\x9d\xc66\x81\x89P\xc1\xa3\x14\xcct\xba\xef\xbe\xfb\xde\xbb\xcfr0\xb3"\x02\x11[%\x98\xdcO\xa7\xd2\x19\x98y\xd2\x07h"\x96\xa0e\xda\xa6lUY-\xca\x08\xa2\x82\x16\x96\xd1\xa2\xf0#\xc8\x95\x1b\xdd$!m*"\xc8\x82\xea\xbe\x9c\x01\x85\xc9\x996\x1d\xf8\xc1\xe3~\x9d\xff9\xef\x7f\xaf\xcf\xe7\xbb\x19\xfc4\xec^\xcb\x9b\xfbz\xa6\xbe\xb3\x90_\xef/\x8d\x9e\xad\xbe\xe4\xcb0\xd2\xec\xad\x12X:\xc8\x12\x12\xd9:\x95\xba	\x1cG\xb7$\xc5\xc44\x1c(\x1d\x03\x03\xdb\x84\x0cK#\xe0\x8a\xb8\x1b\x00\x1a\x08p\xb2SF\xa3\x01\x8f\x00%q\xa1Ny{k!8\xe5t>[{\xe2j\xddl\xc3\xcf\xee\xd0\xddW\x9ff3U\x9djobj\xbb\x87E\x88\x05l\x001\x12\x18\x13\xc6 \xb7(\xe3\x02a\x80\x81\xcel.u\x9b\x1b\x8c\x91\x80Z\x0c\x15\x15 (esc)
@@ -1925,14 +1931,14 @@
 
 stream_out
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=stream_out'
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
   200 Script output follows
   
   1
 
 failing unbundle, requires POST request
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=unbundle'
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=unbundle'
   405 push requires POST request
   
   0
@@ -1941,7 +1947,7 @@
 
 Static files
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'static/style.css'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'static/style.css'
   200 Script output follows
   
   a { text-decoration:none; }
@@ -2077,7 +2083,7 @@
   > --cwd .. -R `pwd`
   $ cat hg.pid >> $DAEMON_PIDS
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'log?rev=adds("foo")&style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'log?rev=adds("foo")&style=raw'
   200 Script output follows
   
   
@@ -2110,7 +2116,7 @@
 
 Graph json escape of multibyte character
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'graph/' > out
+  $ get-with-headers.py $LOCALIP:$HGPORT 'graph/' > out
   >>> from __future__ import print_function
   >>> for line in open("out"):
   ...     if line.startswith("var data ="):
@@ -2121,14 +2127,14 @@
 
 (plain version to check the format)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | dd ibs=75 count=1 2> /dev/null; echo
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | dd ibs=75 count=1 2> /dev/null; echo
   200 Script output follows
   
   lookup changegroupsubset branchmap pushkey known
 
 (spread version to check the content)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n'; echo
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n'; echo
   200
   Script
   output
@@ -2194,28 +2200,33 @@
 
 Test paging
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT \
+  $ get-with-headers.py $LOCALIP:$HGPORT \
   >   'graph/?style=raw' | grep changeset
   changeset:   aed2d9c1d0e7
   changeset:   b60a39a85a01
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT \
+  $ get-with-headers.py $LOCALIP:$HGPORT \
   >   'graph/?style=raw&revcount=3' | grep changeset
   changeset:   aed2d9c1d0e7
   changeset:   b60a39a85a01
   changeset:   ada793dcc118
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT \
+  $ get-with-headers.py $LOCALIP:$HGPORT \
   >   'graph/e06180cbfb0?style=raw&revcount=3' | grep changeset
   changeset:   e06180cbfb0c
   changeset:   b4e73ffab476
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT \
+  $ get-with-headers.py $LOCALIP:$HGPORT \
   >   'graph/b4e73ffab47?style=raw&revcount=3' | grep changeset
   changeset:   b4e73ffab476
 
   $ cat errors.log
 
+MSYS changes environment variables starting with '/' into 'C:/MinGW/msys/1.0',
+which changes the status line to '400 no such method: C:'.
+
+#if no-msys
+
 bookmarks view doesn't choke on bookmarks on secret changesets (issue3774)
 
   $ hg phase -fs 4
@@ -2381,7 +2392,7 @@
   node:        (0, 6) (color 1)
   
   
-
+#endif
 
 
   $ cd ..
--- a/tests/test-hgweb-descend-empties.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgweb-descend-empties.t	Tue Apr 18 12:24:34 2017 -0400
@@ -29,7 +29,7 @@
 
 manifest with descending (paper)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -147,7 +147,7 @@
 
 manifest with descending (coal)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=coal'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=coal'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
@@ -266,7 +266,7 @@
 
 manifest with descending (monoblue)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=monoblue'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=monoblue'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
@@ -379,7 +379,7 @@
 
 manifest with descending (gitweb)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=gitweb'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=gitweb'
   200 Script output follows
   
   <?xml version="1.0" encoding="ascii"?>
@@ -482,7 +482,7 @@
 
 manifest with descending (spartan)
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file?style=spartan'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file?style=spartan'
   200 Script output follows
   
   <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
--- a/tests/test-hgweb-filelog.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgweb-filelog.t	Tue Apr 18 12:24:34 2017 -0400
@@ -190,6 +190,7 @@
   <h3>
    log a @ 4:<a href="/rev/3f41bc784e7e">3f41bc784e7e</a>
    <span class="branchname">a-branch</span> 
+   
   </h3>
   
   <form class="search" action="/log">
@@ -221,6 +222,7 @@
      <span class="branchname">a-branch</span> 
     </td>
    </tr>
+   
    <tr>
     <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
     <td class="author">test</td>
@@ -229,6 +231,7 @@
      <span class="tag">a-tag</span> <span class="tag">a-bookmark</span> 
     </td>
    </tr>
+   
   
   </tbody>
   </table>
@@ -309,6 +312,7 @@
   <h3>
    log a @ 4:<a href="/rev/3f41bc784e7e">3f41bc784e7e</a>
    <span class="branchname">a-branch</span> 
+   
   </h3>
   
   <form class="search" action="/log">
@@ -340,6 +344,7 @@
      <span class="branchname">a-branch</span> 
     </td>
    </tr>
+   
    <tr>
     <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
     <td class="author">test</td>
@@ -348,6 +353,7 @@
      <span class="tag">a-tag</span> <span class="tag">a-bookmark</span> 
     </td>
    </tr>
+   
   
   </tbody>
   </table>
@@ -428,6 +434,7 @@
   <h3>
    log a @ 1:<a href="/rev/5ed941583260">5ed941583260</a>
    <span class="tag">a-tag</span> <span class="tag">a-bookmark</span> 
+   
   </h3>
   
   <form class="search" action="/log">
@@ -459,6 +466,7 @@
      <span class="tag">a-tag</span> <span class="tag">a-bookmark</span> 
     </td>
    </tr>
+   
   
   </tbody>
   </table>
@@ -539,6 +547,7 @@
   <h3>
    log a @ 1:<a href="/rev/5ed941583260">5ed941583260</a>
    <span class="tag">a-tag</span> <span class="tag">a-bookmark</span> 
+   
   </h3>
   
   <form class="search" action="/log">
@@ -570,6 +579,7 @@
      <span class="tag">a-tag</span> <span class="tag">a-bookmark</span> 
     </td>
    </tr>
+   
   
   </tbody>
   </table>
@@ -654,6 +664,264 @@
   
   [1]
 
+  $ hg log -r 'followlines(c, 1:2, startrev=tip) and follow(c)'
+  changeset:   0:6563da9dcf87
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     b
+  
+  changeset:   7:46c1a66bd8fc
+  branch:      a-branch
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     change c
+  
+  $ (get-with-headers.py localhost:$HGPORT 'log/tip/c?linerange=1:2')
+  200 Script output follows
+  
+  <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+  <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+  <head>
+  <link rel="icon" href="/static/hgicon.png" type="image/png" />
+  <meta name="robots" content="index, nofollow" />
+  <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
+  <script type="text/javascript" src="/static/mercurial.js"></script>
+  
+  <title>test: c history</title>
+  <link rel="alternate" type="application/atom+xml"
+     href="/atom-log/tip/c" title="Atom feed for test:c" />
+  <link rel="alternate" type="application/rss+xml"
+     href="/rss-log/tip/c" title="RSS feed for test:c" />
+  </head>
+  <body>
+  
+  <div class="container">
+  <div class="menu">
+  <div class="logo">
+  <a href="https://mercurial-scm.org/">
+  <img src="/static/hglogo.png" alt="mercurial" /></a>
+  </div>
+  <ul>
+  <li><a href="/shortlog/tip">log</a></li>
+  <li><a href="/graph/tip">graph</a></li>
+  <li><a href="/tags">tags</a></li>
+  <li><a href="/bookmarks">bookmarks</a></li>
+  <li><a href="/branches">branches</a></li>
+  </ul>
+  <ul>
+  <li><a href="/rev/tip">changeset</a></li>
+  <li><a href="/file/tip">browse</a></li>
+  </ul>
+  <ul>
+  <li><a href="/file/tip/c">file</a></li>
+  <li><a href="/diff/tip/c">diff</a></li>
+  <li><a href="/comparison/tip/c">comparison</a></li>
+  <li><a href="/annotate/tip/c">annotate</a></li>
+  <li class="active">file log</li>
+  <li><a href="/raw-file/tip/c">raw</a></li>
+  </ul>
+  <ul>
+  <li><a href="/help">help</a></li>
+  </ul>
+  <div class="atom-logo">
+  <a href="/atom-log/tip/c" title="subscribe to atom feed">
+  <img class="atom-logo" src="/static/feed-icon-14x14.png" alt="atom feed" />
+  </a>
+  </div>
+  </div>
+  
+  <div class="main">
+  <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
+  <h3>
+   log c @ 7:<a href="/rev/46c1a66bd8fc">46c1a66bd8fc</a>
+   <span class="branchname">a-branch</span> <span class="tag">tip</span> 
+    (following lines 1:2 <a href="/log/tip/c">back to filelog</a>)
+  </h3>
+  
+  <form class="search" action="/log">
+  
+  <p><input name="rev" id="search1" type="text" size="30" /></p>
+  <div id="hint">Find changesets by keywords (author, files, the commit message), revision
+  number or hash, or <a href="/help/revsets">revset expression</a>.</div>
+  </form>
+  
+  <div class="navigate">
+  <a href="/log/tip/c?linerange=1%3A2&revcount=30">less</a>
+  <a href="/log/tip/c?linerange=1%3A2&revcount=120">more</a>
+  |  </div>
+  
+  <table class="bigtable">
+  <thead>
+   <tr>
+    <th class="age">age</th>
+    <th class="author">author</th>
+    <th class="description">description</th>
+   </tr>
+  </thead>
+  <tbody class="stripes2">
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/46c1a66bd8fc">change c</a>
+     <span class="branchhead">a-branch</span> <span class="tag">tip</span> 
+    </td>
+   </tr>
+   
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/6563da9dcf87">b</a>
+     
+    </td>
+   </tr>
+   
+  
+  </tbody>
+  </table>
+  
+  <div class="navigate">
+  <a href="/log/tip/c?linerange=1%3A2&revcount=30">less</a>
+  <a href="/log/tip/c?linerange=1%3A2&revcount=120">more</a>
+  |  
+  </div>
+  
+  </div>
+  </div>
+  
+  
+  
+  </body>
+  </html>
+  
+  $ (get-with-headers.py localhost:$HGPORT 'log/tip/c?linerange=1%3A2&revcount=1')
+  200 Script output follows
+  
+  <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+  <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+  <head>
+  <link rel="icon" href="/static/hgicon.png" type="image/png" />
+  <meta name="robots" content="index, nofollow" />
+  <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
+  <script type="text/javascript" src="/static/mercurial.js"></script>
+  
+  <title>test: c history</title>
+  <link rel="alternate" type="application/atom+xml"
+     href="/atom-log/tip/c" title="Atom feed for test:c" />
+  <link rel="alternate" type="application/rss+xml"
+     href="/rss-log/tip/c" title="RSS feed for test:c" />
+  </head>
+  <body>
+  
+  <div class="container">
+  <div class="menu">
+  <div class="logo">
+  <a href="https://mercurial-scm.org/">
+  <img src="/static/hglogo.png" alt="mercurial" /></a>
+  </div>
+  <ul>
+  <li><a href="/shortlog/tip?revcount=1">log</a></li>
+  <li><a href="/graph/tip?revcount=1">graph</a></li>
+  <li><a href="/tags?revcount=1">tags</a></li>
+  <li><a href="/bookmarks?revcount=1">bookmarks</a></li>
+  <li><a href="/branches?revcount=1">branches</a></li>
+  </ul>
+  <ul>
+  <li><a href="/rev/tip?revcount=1">changeset</a></li>
+  <li><a href="/file/tip?revcount=1">browse</a></li>
+  </ul>
+  <ul>
+  <li><a href="/file/tip/c?revcount=1">file</a></li>
+  <li><a href="/diff/tip/c?revcount=1">diff</a></li>
+  <li><a href="/comparison/tip/c?revcount=1">comparison</a></li>
+  <li><a href="/annotate/tip/c?revcount=1">annotate</a></li>
+  <li class="active">file log</li>
+  <li><a href="/raw-file/tip/c">raw</a></li>
+  </ul>
+  <ul>
+  <li><a href="/help?revcount=1">help</a></li>
+  </ul>
+  <div class="atom-logo">
+  <a href="/atom-log/tip/c" title="subscribe to atom feed">
+  <img class="atom-logo" src="/static/feed-icon-14x14.png" alt="atom feed" />
+  </a>
+  </div>
+  </div>
+  
+  <div class="main">
+  <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
+  <h3>
+   log c @ 7:<a href="/rev/46c1a66bd8fc?revcount=1">46c1a66bd8fc</a>
+   <span class="branchname">a-branch</span> <span class="tag">tip</span> 
+    (following lines 1:2 <a href="/log/tip/c?revcount=1">back to filelog</a>)
+  </h3>
+  
+  <form class="search" action="/log">
+  <input type="hidden" name="revcount" value="1" />
+  <p><input name="rev" id="search1" type="text" size="30" /></p>
+  <div id="hint">Find changesets by keywords (author, files, the commit message), revision
+  number or hash, or <a href="/help/revsets">revset expression</a>.</div>
+  </form>
+  
+  <div class="navigate">
+  <a href="/log/tip/c?linerange=1%3A2&revcount=1">less</a>
+  <a href="/log/tip/c?linerange=1%3A2&revcount=2">more</a>
+  |  </div>
+  
+  <table class="bigtable">
+  <thead>
+   <tr>
+    <th class="age">age</th>
+    <th class="author">author</th>
+    <th class="description">description</th>
+   </tr>
+  </thead>
+  <tbody class="stripes2">
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/46c1a66bd8fc?revcount=1">change c</a>
+     <span class="branchhead">a-branch</span> <span class="tag">tip</span> 
+    </td>
+   </tr>
+   
+  
+  </tbody>
+  </table>
+  
+  <div class="navigate">
+  <a href="/log/tip/c?linerange=1%3A2&revcount=1">less</a>
+  <a href="/log/tip/c?linerange=1%3A2&revcount=2">more</a>
+  |  
+  </div>
+  
+  </div>
+  </div>
+  
+  
+  
+  </body>
+  </html>
+  
+  $ (get-with-headers.py localhost:$HGPORT 'log/3/a?linerange=1' --headeronly)
+  400 invalid linerange parameter
+  [1]
+  $ (get-with-headers.py localhost:$HGPORT 'log/3/a?linerange=1:a' --headeronly)
+  400 invalid linerange parameter
+  [1]
+  $ (get-with-headers.py localhost:$HGPORT 'log/3/a?linerange=1:2&linerange=3:4' --headeronly)
+  400 redundant linerange parameter
+  [1]
+  $ (get-with-headers.py localhost:$HGPORT 'log/3/a?linerange=3:2' --headeronly)
+  400 line range must be positive
+  [1]
+  $ (get-with-headers.py localhost:$HGPORT 'log/3/a?linerange=0:1' --headeronly)
+  400 fromline must be strictly positive
+  [1]
+
 should show base link, use spartan because it shows it
 
   $ (get-with-headers.py localhost:$HGPORT 'log/tip/c?style=spartan')
@@ -762,6 +1030,674 @@
   </html>
   
 
+filelog with patch
+
+  $ (get-with-headers.py localhost:$HGPORT 'log/4/a?patch=1')
+  200 Script output follows
+  
+  <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+  <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+  <head>
+  <link rel="icon" href="/static/hgicon.png" type="image/png" />
+  <meta name="robots" content="index, nofollow" />
+  <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
+  <script type="text/javascript" src="/static/mercurial.js"></script>
+  
+  <title>test: a history</title>
+  <link rel="alternate" type="application/atom+xml"
+     href="/atom-log/tip/a" title="Atom feed for test:a" />
+  <link rel="alternate" type="application/rss+xml"
+     href="/rss-log/tip/a" title="RSS feed for test:a" />
+  </head>
+  <body>
+  
+  <div class="container">
+  <div class="menu">
+  <div class="logo">
+  <a href="https://mercurial-scm.org/">
+  <img src="/static/hglogo.png" alt="mercurial" /></a>
+  </div>
+  <ul>
+  <li><a href="/shortlog/4">log</a></li>
+  <li><a href="/graph/4">graph</a></li>
+  <li><a href="/tags">tags</a></li>
+  <li><a href="/bookmarks">bookmarks</a></li>
+  <li><a href="/branches">branches</a></li>
+  </ul>
+  <ul>
+  <li><a href="/rev/4">changeset</a></li>
+  <li><a href="/file/4">browse</a></li>
+  </ul>
+  <ul>
+  <li><a href="/file/4/a">file</a></li>
+  <li><a href="/diff/4/a">diff</a></li>
+  <li><a href="/comparison/4/a">comparison</a></li>
+  <li><a href="/annotate/4/a">annotate</a></li>
+  <li class="active">file log</li>
+  <li><a href="/raw-file/4/a">raw</a></li>
+  </ul>
+  <ul>
+  <li><a href="/help">help</a></li>
+  </ul>
+  <div class="atom-logo">
+  <a href="/atom-log/tip/a" title="subscribe to atom feed">
+  <img class="atom-logo" src="/static/feed-icon-14x14.png" alt="atom feed" />
+  </a>
+  </div>
+  </div>
+  
+  <div class="main">
+  <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
+  <h3>
+   log a @ 4:<a href="/rev/3f41bc784e7e">3f41bc784e7e</a>
+   <span class="branchname">a-branch</span> 
+   
+  </h3>
+  
+  <form class="search" action="/log">
+  
+  <p><input name="rev" id="search1" type="text" size="30" /></p>
+  <div id="hint">Find changesets by keywords (author, files, the commit message), revision
+  number or hash, or <a href="/help/revsets">revset expression</a>.</div>
+  </form>
+  
+  <div class="navigate">
+  <a href="/log/4/a?patch=1&revcount=30">less</a>
+  <a href="/log/4/a?patch=1&revcount=120">more</a>
+  | <a href="/log/5ed941583260/a">(0)</a> <a href="/log/tip/a">tip</a> </div>
+  
+  <table class="bigtable">
+  <thead>
+   <tr>
+    <th class="age">age</th>
+    <th class="author">author</th>
+    <th class="description">description</th>
+   </tr>
+  </thead>
+  <tbody class="stripes2">
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/3f41bc784e7e">second a</a>
+     <span class="branchname">a-branch</span> 
+    </td>
+   </tr>
+   <tr><td colspan="3"><div class="bottomline inc-lineno"><pre class="sourcelines wrap">
+  <span id="3f41bc784e7e-l1.1" class="minusline">--- /dev/null	Thu Jan 01 00:00:00 1970 +0000</span><a href="#3f41bc784e7e-l1.1"></a>
+  <span id="3f41bc784e7e-l1.2" class="plusline">+++ b/a	Thu Jan 01 00:00:00 1970 +0000</span><a href="#3f41bc784e7e-l1.2"></a>
+  <span id="3f41bc784e7e-l1.3" class="atline">@@ -0,0 +1,1 @@</span><a href="#3f41bc784e7e-l1.3"></a>
+  <span id="3f41bc784e7e-l1.4" class="plusline">+b</span><a href="#3f41bc784e7e-l1.4"></a></pre></div></td></tr>
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/5ed941583260">first a</a>
+     <span class="tag">a-tag</span> <span class="tag">a-bookmark</span> 
+    </td>
+   </tr>
+   <tr><td colspan="3"><div class="bottomline inc-lineno"><pre class="sourcelines wrap">
+  <span id="5ed941583260-l1.1" class="minusline">--- /dev/null	Thu Jan 01 00:00:00 1970 +0000</span><a href="#5ed941583260-l1.1"></a>
+  <span id="5ed941583260-l1.2" class="plusline">+++ b/a	Thu Jan 01 00:00:00 1970 +0000</span><a href="#5ed941583260-l1.2"></a>
+  <span id="5ed941583260-l1.3" class="atline">@@ -0,0 +1,1 @@</span><a href="#5ed941583260-l1.3"></a>
+  <span id="5ed941583260-l1.4" class="plusline">+a</span><a href="#5ed941583260-l1.4"></a></pre></div></td></tr>
+  
+  </tbody>
+  </table>
+  
+  <div class="navigate">
+  <a href="/log/4/a?patch=1&revcount=30">less</a>
+  <a href="/log/4/a?patch=1&revcount=120">more</a>
+  | <a href="/log/5ed941583260/a">(0)</a> <a href="/log/tip/a">tip</a> 
+  </div>
+  
+  </div>
+  </div>
+  
+  
+  
+  </body>
+  </html>
+  
+filelog with 'linerange' and 'patch'
+
+  $ cat c
+  b
+  c
+  $ cat <<EOF > c
+  > 0
+  > 0
+  > b
+  > c+
+  > 
+  > a
+  > a
+  > 
+  > d
+  > e
+  > f
+  > EOF
+  $ hg ci -m 'make c bigger and touch its beginning' c
+  $ cat <<EOF > c
+  > 0
+  > 0
+  > b
+  > c+
+  > 
+  > a
+  > a
+  > 
+  > d
+  > e+
+  > f
+  > EOF
+  $ hg ci -m 'just touch end of c' c
+  $ cat <<EOF > c
+  > 0
+  > 0
+  > b
+  > c++
+  > 
+  > a
+  > a
+  > 
+  > d
+  > e+
+  > f
+  > EOF
+  $ hg ci -m 'touch beginning of c' c
+  $ cat <<EOF > c
+  > 0
+  > 0
+  > b-
+  > c++
+  > 
+  > a
+  > a
+  > 
+  > d
+  > e+
+  > f+
+  > EOF
+  $ hg ci -m 'touching beginning and end of c' c
+  $ hg log -r 'followlines(c, 3:4, startrev=tip) and follow(c)' -p
+  changeset:   0:6563da9dcf87
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     b
+  
+  diff -r 000000000000 -r 6563da9dcf87 b
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/b	Thu Jan 01 00:00:00 1970 +0000
+  @@ -0,0 +1,1 @@
+  +b
+  
+  changeset:   7:46c1a66bd8fc
+  branch:      a-branch
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     change c
+  
+  diff -r c9637d3cc8ef -r 46c1a66bd8fc c
+  --- a/c	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,1 +1,2 @@
+   b
+  +c
+  
+  changeset:   8:5c6574614c37
+  branch:      a-branch
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     make c bigger and touch its beginning
+  
+  diff -r 46c1a66bd8fc -r 5c6574614c37 c
+  --- a/c	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,2 +1,11 @@
+  +0
+  +0
+   b
+  -c
+  +c+
+  +
+  +a
+  +a
+  +
+  +d
+  +e
+  +f
+  
+  changeset:   10:e95928d60479
+  branch:      a-branch
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     touch beginning of c
+  
+  diff -r e1d3e9c5a23f -r e95928d60479 c
+  --- a/c	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,7 +1,7 @@
+   0
+   0
+   b
+  -c+
+  +c++
+   
+   a
+   a
+  
+  changeset:   11:fb9bc322513a
+  branch:      a-branch
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     touching beginning and end of c
+  
+  diff -r e95928d60479 -r fb9bc322513a c
+  --- a/c	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,6 +1,6 @@
+   0
+   0
+  -b
+  +b-
+   c++
+   
+   a
+  @@ -8,4 +8,4 @@
+   
+   d
+   e+
+  -f
+  +f+
+  
+  $ (get-with-headers.py localhost:$HGPORT 'log/tip/c?linerange=3:4&patch=')
+  200 Script output follows
+  
+  <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+  <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+  <head>
+  <link rel="icon" href="/static/hgicon.png" type="image/png" />
+  <meta name="robots" content="index, nofollow" />
+  <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
+  <script type="text/javascript" src="/static/mercurial.js"></script>
+  
+  <title>test: c history</title>
+  <link rel="alternate" type="application/atom+xml"
+     href="/atom-log/tip/c" title="Atom feed for test:c" />
+  <link rel="alternate" type="application/rss+xml"
+     href="/rss-log/tip/c" title="RSS feed for test:c" />
+  </head>
+  <body>
+  
+  <div class="container">
+  <div class="menu">
+  <div class="logo">
+  <a href="https://mercurial-scm.org/">
+  <img src="/static/hglogo.png" alt="mercurial" /></a>
+  </div>
+  <ul>
+  <li><a href="/shortlog/tip">log</a></li>
+  <li><a href="/graph/tip">graph</a></li>
+  <li><a href="/tags">tags</a></li>
+  <li><a href="/bookmarks">bookmarks</a></li>
+  <li><a href="/branches">branches</a></li>
+  </ul>
+  <ul>
+  <li><a href="/rev/tip">changeset</a></li>
+  <li><a href="/file/tip">browse</a></li>
+  </ul>
+  <ul>
+  <li><a href="/file/tip/c">file</a></li>
+  <li><a href="/diff/tip/c">diff</a></li>
+  <li><a href="/comparison/tip/c">comparison</a></li>
+  <li><a href="/annotate/tip/c">annotate</a></li>
+  <li class="active">file log</li>
+  <li><a href="/raw-file/tip/c">raw</a></li>
+  </ul>
+  <ul>
+  <li><a href="/help">help</a></li>
+  </ul>
+  <div class="atom-logo">
+  <a href="/atom-log/tip/c" title="subscribe to atom feed">
+  <img class="atom-logo" src="/static/feed-icon-14x14.png" alt="atom feed" />
+  </a>
+  </div>
+  </div>
+  
+  <div class="main">
+  <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
+  <h3>
+   log c @ 11:<a href="/rev/fb9bc322513a">fb9bc322513a</a>
+   <span class="branchname">a-branch</span> <span class="tag">tip</span> 
+    (following lines 3:4 <a href="/log/tip/c">back to filelog</a>)
+  </h3>
+  
+  <form class="search" action="/log">
+  
+  <p><input name="rev" id="search1" type="text" size="30" /></p>
+  <div id="hint">Find changesets by keywords (author, files, the commit message), revision
+  number or hash, or <a href="/help/revsets">revset expression</a>.</div>
+  </form>
+  
+  <div class="navigate">
+  <a href="/log/tip/c?linerange=3%3A4&patch=&revcount=30">less</a>
+  <a href="/log/tip/c?linerange=3%3A4&patch=&revcount=120">more</a>
+  |  </div>
+  
+  <table class="bigtable">
+  <thead>
+   <tr>
+    <th class="age">age</th>
+    <th class="author">author</th>
+    <th class="description">description</th>
+   </tr>
+  </thead>
+  <tbody class="stripes2">
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/fb9bc322513a">touching beginning and end of c</a>
+     <span class="branchhead">a-branch</span> <span class="tag">tip</span> 
+    </td>
+   </tr>
+   <tr><td colspan="3"><div class="bottomline inc-lineno"><pre class="sourcelines wrap">
+  <span id="fb9bc322513a-l1.1" class="minusline">--- a/c	Thu Jan 01 00:00:00 1970 +0000</span><a href="#fb9bc322513a-l1.1"></a>
+  <span id="fb9bc322513a-l1.2" class="plusline">+++ b/c	Thu Jan 01 00:00:00 1970 +0000</span><a href="#fb9bc322513a-l1.2"></a>
+  <span id="fb9bc322513a-l1.3" class="atline">@@ -1,6 +1,6 @@</span><a href="#fb9bc322513a-l1.3"></a>
+  <span id="fb9bc322513a-l1.4"> 0</span><a href="#fb9bc322513a-l1.4"></a>
+  <span id="fb9bc322513a-l1.5"> 0</span><a href="#fb9bc322513a-l1.5"></a>
+  <span id="fb9bc322513a-l1.6" class="minusline">-b</span><a href="#fb9bc322513a-l1.6"></a>
+  <span id="fb9bc322513a-l1.7" class="plusline">+b-</span><a href="#fb9bc322513a-l1.7"></a>
+  <span id="fb9bc322513a-l1.8"> c++</span><a href="#fb9bc322513a-l1.8"></a>
+  <span id="fb9bc322513a-l1.9"> </span><a href="#fb9bc322513a-l1.9"></a>
+  <span id="fb9bc322513a-l1.10"> a</span><a href="#fb9bc322513a-l1.10"></a></pre></div></td></tr>
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/e95928d60479">touch beginning of c</a>
+     <span class="branchname">a-branch</span> 
+    </td>
+   </tr>
+   <tr><td colspan="3"><div class="bottomline inc-lineno"><pre class="sourcelines wrap">
+  <span id="e95928d60479-l1.1" class="minusline">--- a/c	Thu Jan 01 00:00:00 1970 +0000</span><a href="#e95928d60479-l1.1"></a>
+  <span id="e95928d60479-l1.2" class="plusline">+++ b/c	Thu Jan 01 00:00:00 1970 +0000</span><a href="#e95928d60479-l1.2"></a>
+  <span id="e95928d60479-l1.3" class="atline">@@ -1,7 +1,7 @@</span><a href="#e95928d60479-l1.3"></a>
+  <span id="e95928d60479-l1.4"> 0</span><a href="#e95928d60479-l1.4"></a>
+  <span id="e95928d60479-l1.5"> 0</span><a href="#e95928d60479-l1.5"></a>
+  <span id="e95928d60479-l1.6"> b</span><a href="#e95928d60479-l1.6"></a>
+  <span id="e95928d60479-l1.7" class="minusline">-c+</span><a href="#e95928d60479-l1.7"></a>
+  <span id="e95928d60479-l1.8" class="plusline">+c++</span><a href="#e95928d60479-l1.8"></a>
+  <span id="e95928d60479-l1.9"> </span><a href="#e95928d60479-l1.9"></a>
+  <span id="e95928d60479-l1.10"> a</span><a href="#e95928d60479-l1.10"></a>
+  <span id="e95928d60479-l1.11"> a</span><a href="#e95928d60479-l1.11"></a></pre></div></td></tr>
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/5c6574614c37">make c bigger and touch its beginning</a>
+     <span class="branchname">a-branch</span> 
+    </td>
+   </tr>
+   <tr><td colspan="3"><div class="bottomline inc-lineno"><pre class="sourcelines wrap">
+  <span id="5c6574614c37-l1.1" class="minusline">--- a/c	Thu Jan 01 00:00:00 1970 +0000</span><a href="#5c6574614c37-l1.1"></a>
+  <span id="5c6574614c37-l1.2" class="plusline">+++ b/c	Thu Jan 01 00:00:00 1970 +0000</span><a href="#5c6574614c37-l1.2"></a>
+  <span id="5c6574614c37-l1.3" class="atline">@@ -1,2 +1,11 @@</span><a href="#5c6574614c37-l1.3"></a>
+  <span id="5c6574614c37-l1.4" class="plusline">+0</span><a href="#5c6574614c37-l1.4"></a>
+  <span id="5c6574614c37-l1.5" class="plusline">+0</span><a href="#5c6574614c37-l1.5"></a>
+  <span id="5c6574614c37-l1.6"> b</span><a href="#5c6574614c37-l1.6"></a>
+  <span id="5c6574614c37-l1.7" class="minusline">-c</span><a href="#5c6574614c37-l1.7"></a>
+  <span id="5c6574614c37-l1.8" class="plusline">+c+</span><a href="#5c6574614c37-l1.8"></a>
+  <span id="5c6574614c37-l1.9" class="plusline">+</span><a href="#5c6574614c37-l1.9"></a>
+  <span id="5c6574614c37-l1.10" class="plusline">+a</span><a href="#5c6574614c37-l1.10"></a>
+  <span id="5c6574614c37-l1.11" class="plusline">+a</span><a href="#5c6574614c37-l1.11"></a>
+  <span id="5c6574614c37-l1.12" class="plusline">+</span><a href="#5c6574614c37-l1.12"></a>
+  <span id="5c6574614c37-l1.13" class="plusline">+d</span><a href="#5c6574614c37-l1.13"></a>
+  <span id="5c6574614c37-l1.14" class="plusline">+e</span><a href="#5c6574614c37-l1.14"></a>
+  <span id="5c6574614c37-l1.15" class="plusline">+f</span><a href="#5c6574614c37-l1.15"></a></pre></div></td></tr>
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/46c1a66bd8fc">change c</a>
+     <span class="branchname">a-branch</span> 
+    </td>
+   </tr>
+   <tr><td colspan="3"><div class="bottomline inc-lineno"><pre class="sourcelines wrap">
+  <span id="46c1a66bd8fc-l1.1" class="minusline">--- a/c	Thu Jan 01 00:00:00 1970 +0000</span><a href="#46c1a66bd8fc-l1.1"></a>
+  <span id="46c1a66bd8fc-l1.2" class="plusline">+++ b/c	Thu Jan 01 00:00:00 1970 +0000</span><a href="#46c1a66bd8fc-l1.2"></a>
+  <span id="46c1a66bd8fc-l1.3" class="atline">@@ -1,1 +1,2 @@</span><a href="#46c1a66bd8fc-l1.3"></a>
+  <span id="46c1a66bd8fc-l1.4"> b</span><a href="#46c1a66bd8fc-l1.4"></a>
+  <span id="46c1a66bd8fc-l1.5" class="plusline">+c</span><a href="#46c1a66bd8fc-l1.5"></a></pre></div></td></tr>
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/6563da9dcf87">b</a>
+     
+    </td>
+   </tr>
+   <tr><td colspan="3"><div class="bottomline inc-lineno"><pre class="sourcelines wrap">
+  <span id="6563da9dcf87-l1.1" class="minusline">--- /dev/null	Thu Jan 01 00:00:00 1970 +0000</span><a href="#6563da9dcf87-l1.1"></a>
+  <span id="6563da9dcf87-l1.2" class="plusline">+++ b/b	Thu Jan 01 00:00:00 1970 +0000</span><a href="#6563da9dcf87-l1.2"></a></pre></div></td></tr>
+  
+  </tbody>
+  </table>
+  
+  <div class="navigate">
+  <a href="/log/tip/c?linerange=3%3A4&patch=&revcount=30">less</a>
+  <a href="/log/tip/c?linerange=3%3A4&patch=&revcount=120">more</a>
+  |  
+  </div>
+  
+  </div>
+  </div>
+  
+  
+  
+  </body>
+  </html>
+  
+  $ hg log -r 'followlines(c, 3:4, startrev=8, descend=True) and follow(c)' -p
+  changeset:   8:5c6574614c37
+  branch:      a-branch
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     make c bigger and touch its beginning
+  
+  diff -r 46c1a66bd8fc -r 5c6574614c37 c
+  --- a/c	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,2 +1,11 @@
+  +0
+  +0
+   b
+  -c
+  +c+
+  +
+  +a
+  +a
+  +
+  +d
+  +e
+  +f
+  
+  changeset:   10:e95928d60479
+  branch:      a-branch
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     touch beginning of c
+  
+  diff -r e1d3e9c5a23f -r e95928d60479 c
+  --- a/c	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,7 +1,7 @@
+   0
+   0
+   b
+  -c+
+  +c++
+   
+   a
+   a
+  
+  changeset:   11:fb9bc322513a
+  branch:      a-branch
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     touching beginning and end of c
+  
+  diff -r e95928d60479 -r fb9bc322513a c
+  --- a/c	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/c	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,6 +1,6 @@
+   0
+   0
+  -b
+  +b-
+   c++
+   
+   a
+  @@ -8,4 +8,4 @@
+   
+   d
+   e+
+  -f
+  +f+
+  
+  $ (get-with-headers.py localhost:$HGPORT 'log/8/c?linerange=3:4&descend=')
+  200 Script output follows
+  
+  <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+  <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+  <head>
+  <link rel="icon" href="/static/hgicon.png" type="image/png" />
+  <meta name="robots" content="index, nofollow" />
+  <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
+  <script type="text/javascript" src="/static/mercurial.js"></script>
+  
+  <title>test: c history</title>
+  <link rel="alternate" type="application/atom+xml"
+     href="/atom-log/tip/c" title="Atom feed for test:c" />
+  <link rel="alternate" type="application/rss+xml"
+     href="/rss-log/tip/c" title="RSS feed for test:c" />
+  </head>
+  <body>
+  
+  <div class="container">
+  <div class="menu">
+  <div class="logo">
+  <a href="https://mercurial-scm.org/">
+  <img src="/static/hglogo.png" alt="mercurial" /></a>
+  </div>
+  <ul>
+  <li><a href="/shortlog/8">log</a></li>
+  <li><a href="/graph/8">graph</a></li>
+  <li><a href="/tags">tags</a></li>
+  <li><a href="/bookmarks">bookmarks</a></li>
+  <li><a href="/branches">branches</a></li>
+  </ul>
+  <ul>
+  <li><a href="/rev/8">changeset</a></li>
+  <li><a href="/file/8">browse</a></li>
+  </ul>
+  <ul>
+  <li><a href="/file/8/c">file</a></li>
+  <li><a href="/diff/8/c">diff</a></li>
+  <li><a href="/comparison/8/c">comparison</a></li>
+  <li><a href="/annotate/8/c">annotate</a></li>
+  <li class="active">file log</li>
+  <li><a href="/raw-file/8/c">raw</a></li>
+  </ul>
+  <ul>
+  <li><a href="/help">help</a></li>
+  </ul>
+  <div class="atom-logo">
+  <a href="/atom-log/tip/c" title="subscribe to atom feed">
+  <img class="atom-logo" src="/static/feed-icon-14x14.png" alt="atom feed" />
+  </a>
+  </div>
+  </div>
+  
+  <div class="main">
+  <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
+  <h3>
+   log c @ 8:<a href="/rev/5c6574614c37">5c6574614c37</a>
+   <span class="branchname">a-branch</span> 
+    (following lines 3:4, descending <a href="/log/8/c">back to filelog</a>)
+  </h3>
+  
+  <form class="search" action="/log">
+  
+  <p><input name="rev" id="search1" type="text" size="30" /></p>
+  <div id="hint">Find changesets by keywords (author, files, the commit message), revision
+  number or hash, or <a href="/help/revsets">revset expression</a>.</div>
+  </form>
+  
+  <div class="navigate">
+  <a href="/log/8/c?descend=&linerange=3%3A4&revcount=30">less</a>
+  <a href="/log/8/c?descend=&linerange=3%3A4&revcount=120">more</a>
+  |  </div>
+  
+  <table class="bigtable">
+  <thead>
+   <tr>
+    <th class="age">age</th>
+    <th class="author">author</th>
+    <th class="description">description</th>
+   </tr>
+  </thead>
+  <tbody class="stripes2">
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/5c6574614c37">make c bigger and touch its beginning</a>
+     <span class="branchname">a-branch</span> 
+    </td>
+   </tr>
+   
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/e95928d60479">touch beginning of c</a>
+     <span class="branchname">a-branch</span> 
+    </td>
+   </tr>
+   
+   <tr>
+    <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>
+    <td class="author">test</td>
+    <td class="description">
+     <a href="/rev/fb9bc322513a">touching beginning and end of c</a>
+     <span class="branchhead">a-branch</span> <span class="tag">tip</span> 
+    </td>
+   </tr>
+   
+  
+  </tbody>
+  </table>
+  
+  <div class="navigate">
+  <a href="/log/8/c?descend=&linerange=3%3A4&revcount=30">less</a>
+  <a href="/log/8/c?descend=&linerange=3%3A4&revcount=120">more</a>
+  |  
+  </div>
+  
+  </div>
+  </div>
+  
+  
+  
+  </body>
+  </html>
+  
+
 rss log
 
   $ (get-with-headers.py localhost:$HGPORT 'rss-log/tip/a')
--- a/tests/test-hgweb-json.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgweb-json.t	Tue Apr 18 12:24:34 2017 -0400
@@ -1443,7 +1443,7 @@
         "topic": "branches"
       },
       {
-        "summary": "create a changegroup file",
+        "summary": "create a bundle file",
         "topic": "bundle"
       },
       {
@@ -1535,7 +1535,7 @@
         "topic": "tags"
       },
       {
-        "summary": "apply one or more changegroup files",
+        "summary": "apply one or more bundle files",
         "topic": "unbundle"
       },
       {
@@ -1549,6 +1549,14 @@
     ],
     "topics": [
       {
+        "summary": "Bundle File Formats",
+        "topic": "bundlespec"
+      },
+      {
+        "summary": "Colorizing Outputs",
+        "topic": "color"
+      },
+      {
         "summary": "Configuration Files",
         "topic": "config"
       },
@@ -1593,6 +1601,10 @@
         "topic": "merge-tools"
       },
       {
+        "summary": "Pager Support",
+        "topic": "pager"
+      },
+      {
         "summary": "File Name Patterns",
         "topic": "patterns"
       },
--- a/tests/test-hgweb-no-path-info.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgweb-no-path-info.t	Tue Apr 18 12:24:34 2017 -0400
@@ -49,7 +49,7 @@
   >     'REQUEST_METHOD': 'GET',
   >     'PATH_INFO': '/',
   >     'SCRIPT_NAME': '',
-  >     'SERVER_NAME': '127.0.0.1',
+  >     'SERVER_NAME': '$LOCALIP',
   >     'SERVER_PORT': os.environ['HGPORT'],
   >     'SERVER_PROTOCOL': 'HTTP/1.0'
   > }
@@ -79,16 +79,16 @@
   <?xml version="1.0" encoding="ascii"?>
   <feed xmlns="http://www.w3.org/2005/Atom">
    <!-- Changelog -->
-   <id>http://127.0.0.1:$HGPORT/</id> (glob)
-   <link rel="self" href="http://127.0.0.1:$HGPORT/atom-log"/> (glob)
-   <link rel="alternate" href="http://127.0.0.1:$HGPORT/"/> (glob)
+   <id>http://$LOCALIP:$HGPORT/</id> (glob)
+   <link rel="self" href="http://$LOCALIP:$HGPORT/atom-log"/> (glob)
+   <link rel="alternate" href="http://$LOCALIP:$HGPORT/"/> (glob)
    <title>repo Changelog</title>
    <updated>1970-01-01T00:00:00+00:00</updated>
   
    <entry>
     <title>[default] test</title>
-    <id>http://127.0.0.1:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob)
-    <link href="http://127.0.0.1:$HGPORT/rev/61c9426e69fe"/> (glob)
+    <id>http://$LOCALIP:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob)
+    <link href="http://$LOCALIP:$HGPORT/rev/61c9426e69fe"/> (glob)
     <author>
      <name>test</name>
      <email>&#116;&#101;&#115;&#116;</email>
--- a/tests/test-hgweb-no-request-uri.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgweb-no-request-uri.t	Tue Apr 18 12:24:34 2017 -0400
@@ -48,7 +48,7 @@
   >     'wsgi.run_once': False,
   >     'REQUEST_METHOD': 'GET',
   >     'SCRIPT_NAME': '',
-  >     'SERVER_NAME': '127.0.0.1',
+  >     'SERVER_NAME': '$LOCALIP',
   >     'SERVER_PORT': os.environ['HGPORT'],
   >     'SERVER_PROTOCOL': 'HTTP/1.0'
   > }
@@ -90,16 +90,16 @@
   <?xml version="1.0" encoding="ascii"?>
   <feed xmlns="http://www.w3.org/2005/Atom">
    <!-- Changelog -->
-   <id>http://127.0.0.1:$HGPORT/</id> (glob)
-   <link rel="self" href="http://127.0.0.1:$HGPORT/atom-log"/> (glob)
-   <link rel="alternate" href="http://127.0.0.1:$HGPORT/"/> (glob)
+   <id>http://$LOCALIP:$HGPORT/</id> (glob)
+   <link rel="self" href="http://$LOCALIP:$HGPORT/atom-log"/> (glob)
+   <link rel="alternate" href="http://$LOCALIP:$HGPORT/"/> (glob)
    <title>repo Changelog</title>
    <updated>1970-01-01T00:00:00+00:00</updated>
   
    <entry>
     <title>[default] test</title>
-    <id>http://127.0.0.1:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob)
-    <link href="http://127.0.0.1:$HGPORT/rev/61c9426e69fe"/> (glob)
+    <id>http://$LOCALIP:$HGPORT/#changeset-61c9426e69fef294feed5e2bbfc97d39944a5b1c</id> (glob)
+    <link href="http://$LOCALIP:$HGPORT/rev/61c9426e69fe"/> (glob)
     <author>
      <name>test</name>
      <email>&#116;&#101;&#115;&#116;</email>
--- a/tests/test-hgweb-non-interactive.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgweb-non-interactive.t	Tue Apr 18 12:24:34 2017 -0400
@@ -60,7 +60,7 @@
   >     'SCRIPT_NAME': '',
   >     'PATH_INFO': '',
   >     'QUERY_STRING': '',
-  >     'SERVER_NAME': '127.0.0.1',
+  >     'SERVER_NAME': '$LOCALIP',
   >     'SERVER_PORT': os.environ['HGPORT'],
   >     'SERVER_PROTOCOL': 'HTTP/1.0'
   > }
--- a/tests/test-hgweb-raw.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgweb-raw.t	Tue Apr 18 12:24:34 2017 -0400
@@ -32,7 +32,7 @@
   It is very boring to read, but computers don't
   care about things like that.
   $ cat access.log error.log
-  127.0.0.1 - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob)
+  $LOCALIP - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob)
 
   $ rm access.log error.log
   $ hg serve -p $HGPORT -A access.log -E error.log -d --pid-file=hg.pid \
@@ -53,6 +53,6 @@
   It is very boring to read, but computers don't
   care about things like that.
   $ cat access.log error.log
-  127.0.0.1 - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob)
+  $LOCALIP - - [*] "GET /?f=bf0ff59095c9;file=sub/some%20text%25.txt;style=raw HTTP/1.1" 200 - (glob)
 
   $ cd ..
--- a/tests/test-hgweb-symrev.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgweb-symrev.t	Tue Apr 18 12:24:34 2017 -0400
@@ -37,7 +37,7 @@
 
 (De)referencing symbolic revisions (paper)
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=paper' | egrep $REVLINKS
   <li><a href="/graph/tip?style=paper">graph</a></li>
   <li><a href="/rev/tip?style=paper">changeset</a></li>
   <li><a href="/file/tip?style=paper">browse</a></li>
@@ -52,7 +52,7 @@
   <a href="/shortlog/tip?revcount=120&style=paper">more</a>
   | rev 2: <a href="/shortlog/43c799df6e75?style=paper">(0)</a> <a href="/shortlog/tip?style=paper">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/tip?style=paper">log</a></li>
   <li><a href="/rev/tip?style=paper">changeset</a></li>
   <li><a href="/file/tip?style=paper">browse</a></li>
@@ -63,7 +63,7 @@
   <a href="/graph/tip?revcount=120&style=paper">more</a>
   | rev 2: <a href="/graph/43c799df6e75?style=paper">(0)</a> <a href="/graph/tip?style=paper">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/tip?style=paper">log</a></li>
   <li><a href="/graph/tip?style=paper">graph</a></li>
   <li><a href="/rev/tip?style=paper">changeset</a></li>
@@ -74,24 +74,24 @@
   <a href="/file/tip/dir/?style=paper">
   <a href="/file/tip/foo?style=paper">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=paper' | egrep $REVLINKS
   <a href="/shortlog/default?style=paper" class="open">
   <a href="/shortlog/9d8c40cba617?style=paper" class="open">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=paper' | egrep $REVLINKS
   <a href="/rev/tip?style=paper">
   <a href="/rev/9d8c40cba617?style=paper">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=paper' | egrep $REVLINKS
   <a href="/rev/xyzzy?style=paper">
   <a href="/rev/a7c1559b7bba?style=paper">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=paper&rev=all()' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=paper&rev=all()' | egrep $REVLINKS
      <a href="/rev/9d8c40cba617?style=paper">third</a>
      <a href="/rev/a7c1559b7bba?style=paper">second</a>
      <a href="/rev/43c799df6e75?style=paper">first</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=paper' | egrep $REVLINKS
    <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
    <li><a href="/graph/xyzzy?style=paper">graph</a></li>
    <li><a href="/raw-rev/xyzzy?style=paper">raw</a></li>
@@ -102,7 +102,7 @@
    <td class="author"> <a href="/rev/9d8c40cba617?style=paper">9d8c40cba617</a></td>
    <td class="files"><a href="/file/a7c1559b7bba/foo?style=paper">foo</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=paper' | egrep $REVLINKS
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
   <li><a href="/file/xyzzy?style=paper">browse</a></li>
@@ -116,7 +116,7 @@
   <a href="/shortlog/xyzzy?revcount=120&style=paper">more</a>
   | rev 1: <a href="/shortlog/43c799df6e75?style=paper">(0)</a> <a href="/shortlog/tip?style=paper">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
   <li><a href="/file/xyzzy?style=paper">browse</a></li>
@@ -127,7 +127,7 @@
   <a href="/graph/xyzzy?revcount=120&style=paper">more</a>
   | rev 1: <a href="/graph/43c799df6e75?style=paper">(0)</a> <a href="/graph/tip?style=paper">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
@@ -138,7 +138,7 @@
   <a href="/file/xyzzy/dir/?style=paper">
   <a href="/file/xyzzy/foo?style=paper">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
@@ -153,7 +153,7 @@
    <td class="author"><a href="/file/43c799df6e75/foo?style=paper">43c799df6e75</a> </td>
    <td class="author"><a href="/file/9d8c40cba617/foo?style=paper">9d8c40cba617</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=paper' | egrep $REVLINKS
      href="/atom-log/tip/foo" title="Atom feed for test:foo" />
      href="/rss-log/tip/foo" title="RSS feed for test:foo" />
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
@@ -176,7 +176,7 @@
   <a href="/log/xyzzy/foo?revcount=120&style=paper">more</a>
   | <a href="/log/43c799df6e75/foo?style=paper">(0)</a> <a href="/log/tip/foo?style=paper">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
@@ -200,7 +200,7 @@
   <a href="/diff/a7c1559b7bba/foo?style=paper">diff</a>
   <a href="/rev/a7c1559b7bba?style=paper">changeset</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
@@ -215,7 +215,7 @@
    <td><a href="/file/43c799df6e75/foo?style=paper">43c799df6e75</a> </td>
    <td><a href="/file/9d8c40cba617/foo?style=paper">9d8c40cba617</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=paper' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=paper' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=paper">log</a></li>
   <li><a href="/graph/xyzzy?style=paper">graph</a></li>
   <li><a href="/rev/xyzzy?style=paper">changeset</a></li>
@@ -232,7 +232,7 @@
 
 (De)referencing symbolic revisions (coal)
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=coal' | egrep $REVLINKS
   <li><a href="/graph/tip?style=coal">graph</a></li>
   <li><a href="/rev/tip?style=coal">changeset</a></li>
   <li><a href="/file/tip?style=coal">browse</a></li>
@@ -247,7 +247,7 @@
   <a href="/shortlog/tip?revcount=120&style=coal">more</a>
   | rev 2: <a href="/shortlog/43c799df6e75?style=coal">(0)</a> <a href="/shortlog/tip?style=coal">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/tip?style=coal">log</a></li>
   <li><a href="/rev/tip?style=coal">changeset</a></li>
   <li><a href="/file/tip?style=coal">browse</a></li>
@@ -258,7 +258,7 @@
   <a href="/graph/tip?revcount=120&style=coal">more</a>
   | rev 2: <a href="/graph/43c799df6e75?style=coal">(0)</a> <a href="/graph/tip?style=coal">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/tip?style=coal">log</a></li>
   <li><a href="/graph/tip?style=coal">graph</a></li>
   <li><a href="/rev/tip?style=coal">changeset</a></li>
@@ -269,24 +269,24 @@
   <a href="/file/tip/dir/?style=coal">
   <a href="/file/tip/foo?style=coal">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=coal' | egrep $REVLINKS
   <a href="/shortlog/default?style=coal" class="open">
   <a href="/shortlog/9d8c40cba617?style=coal" class="open">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=coal' | egrep $REVLINKS
   <a href="/rev/tip?style=coal">
   <a href="/rev/9d8c40cba617?style=coal">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=coal' | egrep $REVLINKS
   <a href="/rev/xyzzy?style=coal">
   <a href="/rev/a7c1559b7bba?style=coal">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=coal&rev=all()' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=coal&rev=all()' | egrep $REVLINKS
      <a href="/rev/9d8c40cba617?style=coal">third</a>
      <a href="/rev/a7c1559b7bba?style=coal">second</a>
      <a href="/rev/43c799df6e75?style=coal">first</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=coal' | egrep $REVLINKS
    <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
    <li><a href="/graph/xyzzy?style=coal">graph</a></li>
    <li><a href="/raw-rev/xyzzy?style=coal">raw</a></li>
@@ -297,7 +297,7 @@
    <td class="author"> <a href="/rev/9d8c40cba617?style=coal">9d8c40cba617</a></td>
    <td class="files"><a href="/file/a7c1559b7bba/foo?style=coal">foo</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=coal' | egrep $REVLINKS
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
   <li><a href="/file/xyzzy?style=coal">browse</a></li>
@@ -311,7 +311,7 @@
   <a href="/shortlog/xyzzy?revcount=120&style=coal">more</a>
   | rev 1: <a href="/shortlog/43c799df6e75?style=coal">(0)</a> <a href="/shortlog/tip?style=coal">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
   <li><a href="/file/xyzzy?style=coal">browse</a></li>
@@ -322,7 +322,7 @@
   <a href="/graph/xyzzy?revcount=120&style=coal">more</a>
   | rev 1: <a href="/graph/43c799df6e75?style=coal">(0)</a> <a href="/graph/tip?style=coal">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
@@ -333,7 +333,7 @@
   <a href="/file/xyzzy/dir/?style=coal">
   <a href="/file/xyzzy/foo?style=coal">
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
@@ -348,7 +348,7 @@
    <td class="author"><a href="/file/43c799df6e75/foo?style=coal">43c799df6e75</a> </td>
    <td class="author"><a href="/file/9d8c40cba617/foo?style=coal">9d8c40cba617</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=coal' | egrep $REVLINKS
      href="/atom-log/tip/foo" title="Atom feed for test:foo" />
      href="/rss-log/tip/foo" title="RSS feed for test:foo" />
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
@@ -371,7 +371,7 @@
   <a href="/log/xyzzy/foo?revcount=120&style=coal">more</a>
   | <a href="/log/43c799df6e75/foo?style=coal">(0)</a> <a href="/log/tip/foo?style=coal">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
@@ -395,7 +395,7 @@
   <a href="/diff/a7c1559b7bba/foo?style=coal">diff</a>
   <a href="/rev/a7c1559b7bba?style=coal">changeset</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
@@ -410,7 +410,7 @@
    <td><a href="/file/43c799df6e75/foo?style=coal">43c799df6e75</a> </td>
    <td><a href="/file/9d8c40cba617/foo?style=coal">9d8c40cba617</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=coal' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=coal' | egrep $REVLINKS
   <li><a href="/shortlog/xyzzy?style=coal">log</a></li>
   <li><a href="/graph/xyzzy?style=coal">graph</a></li>
   <li><a href="/rev/xyzzy?style=coal">changeset</a></li>
@@ -427,7 +427,7 @@
 
 (De)referencing symbolic revisions (gitweb)
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'summary?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'summary?style=gitweb' | egrep $REVLINKS
   <a href="/file?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a>  |
   <a class="list" href="/rev/9d8c40cba617?style=gitweb">
   <a href="/rev/9d8c40cba617?style=gitweb">changeset</a> |
@@ -447,7 +447,7 @@
   <a href="/log/9d8c40cba617?style=gitweb">changelog</a> |
   <a href="/file/9d8c40cba617?style=gitweb">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=gitweb' | egrep $REVLINKS
   <a href="/log/tip?style=gitweb">changelog</a> |
   <a href="/graph/tip?style=gitweb">graph</a> |
   <a href="/file/tip?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a>  |
@@ -463,7 +463,7 @@
   <a href="/file/43c799df6e75?style=gitweb">files</a>
   <a href="/shortlog/43c799df6e75?style=gitweb">(0)</a> <a href="/shortlog/tip?style=gitweb">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log?style=gitweb' | egrep $REVLINKS
   <a href="/shortlog/tip?style=gitweb">shortlog</a> |
   <a href="/graph/tip?style=gitweb">graph</a> |
   <a href="/file/tip?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a>  |
@@ -476,7 +476,7 @@
   <a href="/rev/43c799df6e75?style=gitweb">changeset</a><br/>
   <a href="/log/43c799df6e75?style=gitweb">(0)</a>  <a href="/log/tip?style=gitweb">tip</a> <br/>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=gitweb' | egrep $REVLINKS
   <a href="/shortlog/tip?style=gitweb">shortlog</a> |
   <a href="/log/tip?style=gitweb">changelog</a> |
   <a href="/file/tip?style=gitweb">files</a> |
@@ -487,25 +487,25 @@
   <a href="/graph/tip?revcount=120&style=gitweb">more</a>
   | <a href="/graph/43c799df6e75?style=gitweb">(0)</a> <a href="/graph/tip?style=gitweb">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=gitweb' | egrep $REVLINKS
   <td><a class="list" href="/rev/tip?style=gitweb"><b>tip</b></a></td>
   <a href="/rev/9d8c40cba617?style=gitweb">changeset</a> |
   <a href="/log/9d8c40cba617?style=gitweb">changelog</a> |
   <a href="/file/9d8c40cba617?style=gitweb">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=gitweb' | egrep $REVLINKS
   <td><a class="list" href="/rev/xyzzy?style=gitweb"><b>xyzzy</b></a></td>
   <a href="/rev/a7c1559b7bba?style=gitweb">changeset</a> |
   <a href="/log/a7c1559b7bba?style=gitweb">changelog</a> |
   <a href="/file/a7c1559b7bba?style=gitweb">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=gitweb' | egrep $REVLINKS
   <td class="open"><a class="list" href="/shortlog/default?style=gitweb"><b>default</b></a></td>
   <a href="/changeset/9d8c40cba617?style=gitweb">changeset</a> |
   <a href="/log/9d8c40cba617?style=gitweb">changelog</a> |
   <a href="/file/9d8c40cba617?style=gitweb">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=gitweb' | egrep $REVLINKS
   <a href="/rev/tip?style=gitweb">changeset</a>  | <a href="/archive/tip.zip">zip</a>  |
   <td><a href="/file/tip/?style=gitweb">[up]</a></td>
   <a href="/file/tip/dir?style=gitweb">dir</a>
@@ -516,7 +516,7 @@
   <a href="/log/tip/foo?style=gitweb">revisions</a> |
   <a href="/annotate/tip/foo?style=gitweb">annotate</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=gitweb&rev=all()' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=gitweb&rev=all()' | egrep $REVLINKS
   <a href="/file?style=gitweb">files</a> | <a href="/archive/tip.zip">zip</a> 
   <a class="title" href="/rev/9d8c40cba617?style=gitweb"><span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span>third<span class="logtags"> <span class="branchtag" title="default">default</span> <span class="tagtag" title="tip">tip</span> </span></a>
   <a href="/rev/9d8c40cba617?style=gitweb">changeset</a><br/>
@@ -525,7 +525,7 @@
   <a class="title" href="/rev/43c799df6e75?style=gitweb"><span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span>first<span class="logtags"> </span></a>
   <a href="/rev/43c799df6e75?style=gitweb">changeset</a><br/>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=gitweb' | egrep $REVLINKS
   <a href="/shortlog/xyzzy?style=gitweb">shortlog</a> |
   <a href="/log/xyzzy?style=gitweb">changelog</a> |
   <a href="/graph/xyzzy?style=gitweb">graph</a> |
@@ -542,7 +542,7 @@
   <a href="/comparison/a7c1559b7bba/foo?style=gitweb">comparison</a> |
   <a href="/log/a7c1559b7bba/foo?style=gitweb">revisions</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=gitweb' | egrep $REVLINKS
   <a href="/log/xyzzy?style=gitweb">changelog</a> |
   <a href="/graph/xyzzy?style=gitweb">graph</a> |
   <a href="/file/xyzzy?style=gitweb">files</a> | <a href="/archive/xyzzy.zip">zip</a>  |
@@ -555,7 +555,7 @@
   <a href="/file/43c799df6e75?style=gitweb">files</a>
   <a href="/shortlog/43c799df6e75?style=gitweb">(0)</a> <a href="/shortlog/tip?style=gitweb">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy?style=gitweb' | egrep $REVLINKS
   <a href="/shortlog/xyzzy?style=gitweb">shortlog</a> |
   <a href="/graph/xyzzy?style=gitweb">graph</a> |
   <a href="/file/xyzzy?style=gitweb">files</a> | <a href="/archive/xyzzy.zip">zip</a>  |
@@ -566,7 +566,7 @@
   <a href="/rev/43c799df6e75?style=gitweb">changeset</a><br/>
   <a href="/log/43c799df6e75?style=gitweb">(0)</a>  <a href="/log/tip?style=gitweb">tip</a> <br/>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=gitweb' | egrep $REVLINKS
   <a href="/shortlog/xyzzy?style=gitweb">shortlog</a> |
   <a href="/log/xyzzy?style=gitweb">changelog</a> |
   <a href="/file/xyzzy?style=gitweb">files</a> |
@@ -577,7 +577,7 @@
   <a href="/graph/xyzzy?revcount=120&style=gitweb">more</a>
   | <a href="/graph/43c799df6e75?style=gitweb">(0)</a> <a href="/graph/tip?style=gitweb">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=gitweb' | egrep $REVLINKS
   <a href="/rev/xyzzy?style=gitweb">changeset</a>  | <a href="/archive/xyzzy.zip">zip</a>  |
   <td><a href="/file/xyzzy/?style=gitweb">[up]</a></td>
   <a href="/file/xyzzy/dir?style=gitweb">dir</a>
@@ -588,7 +588,7 @@
   <a href="/log/xyzzy/foo?style=gitweb">revisions</a> |
   <a href="/annotate/xyzzy/foo?style=gitweb">annotate</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=gitweb' | egrep $REVLINKS
   <a href="/file/xyzzy/?style=gitweb">files</a> |
   <a href="/rev/xyzzy?style=gitweb">changeset</a> |
   <a href="/file/tip/foo?style=gitweb">latest</a> |
@@ -601,7 +601,7 @@
   <a class="list" href="/file/43c799df6e75/foo?style=gitweb">
   <a class="list" href="/file/9d8c40cba617/foo?style=gitweb">9d8c40cba617</a></td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=gitweb' | egrep $REVLINKS
   <a href="/file/xyzzy/foo?style=gitweb">file</a> |
   <a href="/annotate/xyzzy/foo?style=gitweb">annotate</a> |
   <a href="/diff/xyzzy/foo?style=gitweb">diff</a> |
@@ -616,9 +616,11 @@
   <a href="/file/43c799df6e75/foo?style=gitweb">file</a> |
   <a href="/diff/43c799df6e75/foo?style=gitweb">diff</a> |
   <a href="/annotate/43c799df6e75/foo?style=gitweb">annotate</a>
+  <a href="/log/xyzzy/foo?revcount=30&style=gitweb">less</a>
+  <a href="/log/xyzzy/foo?revcount=120&style=gitweb">more</a>
   <a href="/log/43c799df6e75/foo?style=gitweb">(0)</a> <a href="/log/tip/foo?style=gitweb">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=gitweb' | egrep $REVLINKS
   <a href="/file/xyzzy/?style=gitweb">files</a> |
   <a href="/rev/xyzzy?style=gitweb">changeset</a> |
   <a href="/file/xyzzy/foo?style=gitweb">file</a> |
@@ -640,7 +642,7 @@
   <a href="/diff/a7c1559b7bba/foo?style=gitweb">diff</a>
   <a href="/rev/a7c1559b7bba?style=gitweb">changeset</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=gitweb' | egrep $REVLINKS
   <a href="/file/xyzzy?style=gitweb">files</a> |
   <a href="/rev/xyzzy?style=gitweb">changeset</a> |
   <a href="/file/xyzzy/foo?style=gitweb">file</a> |
@@ -653,7 +655,7 @@
   <a class="list" href="/diff/43c799df6e75/foo?style=gitweb">
   <a class="list" href="/diff/9d8c40cba617/foo?style=gitweb">9d8c40cba617</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=gitweb' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=gitweb' | egrep $REVLINKS
   <a href="/file/xyzzy?style=gitweb">files</a> |
   <a href="/rev/xyzzy?style=gitweb">changeset</a> |
   <a href="/file/xyzzy/foo?style=gitweb">file</a> |
@@ -668,7 +670,7 @@
 
 (De)referencing symbolic revisions (monoblue)
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'summary?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'summary?style=monoblue' | egrep $REVLINKS
               <li><a href="/archive/tip.zip">zip</a></li>
   <a href="/rev/9d8c40cba617?style=monoblue">
   <a href="/rev/9d8c40cba617?style=monoblue">changeset</a> |
@@ -688,7 +690,7 @@
   <a href="/log/9d8c40cba617?style=monoblue">changelog</a> |
   <a href="/file/9d8c40cba617?style=monoblue">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/tip?style=monoblue">graph</a></li>
               <li><a href="/file/tip?style=monoblue">files</a></li>
               <li><a href="/archive/tip.zip">zip</a></li>
@@ -703,7 +705,7 @@
   <a href="/file/43c799df6e75?style=monoblue">files</a>
       <a href="/shortlog/43c799df6e75?style=monoblue">(0)</a> <a href="/shortlog/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/tip?style=monoblue">graph</a></li>
               <li><a href="/file/tip?style=monoblue">files</a></li>
               <li><a href="/archive/tip.zip">zip</a></li>
@@ -712,31 +714,31 @@
   <h3 class="changelog"><a class="title" href="/rev/43c799df6e75?style=monoblue">first<span class="logtags"> </span></a></h3>
   <a href="/log/43c799df6e75?style=monoblue">(0)</a>  <a href="/log/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=monoblue' | egrep $REVLINKS
               <li><a href="/file/tip?style=monoblue">files</a></li>
           <a href="/graph/tip?revcount=30&style=monoblue">less</a>
           <a href="/graph/tip?revcount=120&style=monoblue">more</a>
           | <a href="/graph/43c799df6e75?style=monoblue">(0)</a> <a href="/graph/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=monoblue' | egrep $REVLINKS
   <td><a href="/rev/tip?style=monoblue">tip</a></td>
   <a href="/rev/9d8c40cba617?style=monoblue">changeset</a> |
   <a href="/log/9d8c40cba617?style=monoblue">changelog</a> |
   <a href="/file/9d8c40cba617?style=monoblue">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'bookmarks?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'bookmarks?style=monoblue' | egrep $REVLINKS
   <td><a href="/rev/xyzzy?style=monoblue">xyzzy</a></td>
   <a href="/rev/a7c1559b7bba?style=monoblue">changeset</a> |
   <a href="/log/a7c1559b7bba?style=monoblue">changelog</a> |
   <a href="/file/a7c1559b7bba?style=monoblue">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=monoblue' | egrep $REVLINKS
   <td class="open"><a href="/shortlog/default?style=monoblue">default</a></td>
   <a href="/rev/9d8c40cba617?style=monoblue">changeset</a> |
   <a href="/log/9d8c40cba617?style=monoblue">changelog</a> |
   <a href="/file/9d8c40cba617?style=monoblue">files</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/tip?style=monoblue">graph</a></li>
           <li><a href="/rev/tip?style=monoblue">changeset</a></li>
           <li><a href="/archive/tip.zip">zip</a></li>
@@ -749,13 +751,13 @@
   <a href="/log/tip/foo?style=monoblue">revisions</a> |
   <a href="/annotate/tip/foo?style=monoblue">annotate</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=monoblue&rev=all()' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=monoblue&rev=all()' | egrep $REVLINKS
               <li><a href="/archive/tip.zip">zip</a></li>
       <h3 class="changelog"><a class="title" href="/rev/9d8c40cba617?style=monoblue">third<span class="logtags"> <span class="branchtag" title="default">default</span> <span class="tagtag" title="tip">tip</span> </span></a></h3>
   <h3 class="changelog"><a class="title" href="/rev/a7c1559b7bba?style=monoblue">second<span class="logtags"> <span class="bookmarktag" title="xyzzy">xyzzy</span> </span></a></h3>
   <h3 class="changelog"><a class="title" href="/rev/43c799df6e75?style=monoblue">first<span class="logtags"> </span></a></h3>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
           <li><a href="/raw-rev/xyzzy">raw</a></li>
@@ -771,7 +773,7 @@
   <a href="/comparison/a7c1559b7bba/foo?style=monoblue">comparison</a> |
   <a href="/log/a7c1559b7bba/foo?style=monoblue">revisions</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
               <li><a href="/archive/xyzzy.zip">zip</a></li>
@@ -783,7 +785,7 @@
   <a href="/file/43c799df6e75?style=monoblue">files</a>
       <a href="/shortlog/43c799df6e75?style=monoblue">(0)</a> <a href="/shortlog/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
               <li><a href="/archive/xyzzy.zip">zip</a></li>
@@ -791,13 +793,13 @@
   <h3 class="changelog"><a class="title" href="/rev/43c799df6e75?style=monoblue">first<span class="logtags"> </span></a></h3>
   <a href="/log/43c799df6e75?style=monoblue">(0)</a>  <a href="/log/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=monoblue' | egrep $REVLINKS
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
           <a href="/graph/xyzzy?revcount=30&style=monoblue">less</a>
           <a href="/graph/xyzzy?revcount=120&style=monoblue">more</a>
           | <a href="/graph/43c799df6e75?style=monoblue">(0)</a> <a href="/graph/tip?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
           <li><a href="/rev/xyzzy?style=monoblue">changeset</a></li>
           <li><a href="/archive/xyzzy.zip">zip</a></li>
@@ -810,7 +812,7 @@
   <a href="/log/xyzzy/foo?style=monoblue">revisions</a> |
   <a href="/annotate/xyzzy/foo?style=monoblue">annotate</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy/?style=monoblue">files</a></li>
           <li><a href="/file/tip/foo?style=monoblue">latest</a></li>
@@ -823,7 +825,7 @@
   <a href="/file/43c799df6e75/foo?style=monoblue">
   <a href="/file/9d8c40cba617/foo?style=monoblue">9d8c40cba617</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
           <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li>
@@ -841,7 +843,7 @@
   <a href="/annotate/43c799df6e75/foo?style=monoblue">annotate</a>
       <a href="/log/43c799df6e75/foo?style=monoblue">(0)</a> <a href="/log/tip/foo?style=monoblue">tip</a> 
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy/?style=monoblue">files</a></li>
           <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li>
@@ -863,7 +865,7 @@
   <a href="/diff/a7c1559b7bba/foo?style=monoblue">diff</a>
   <a href="/rev/a7c1559b7bba?style=monoblue">changeset</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
           <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li>
@@ -876,7 +878,7 @@
   <dd><a href="/diff/43c799df6e75/foo?style=monoblue">43c799df6e75</a></dd>
   <dd><a href="/diff/9d8c40cba617/foo?style=monoblue">9d8c40cba617</a></dd>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'comparison/xyzzy/foo?style=monoblue' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'comparison/xyzzy/foo?style=monoblue' | egrep $REVLINKS
               <li><a href="/graph/xyzzy?style=monoblue">graph</a></li>
               <li><a href="/file/xyzzy?style=monoblue">files</a></li>
           <li><a href="/file/xyzzy/foo?style=monoblue">file</a></li>
@@ -891,7 +893,7 @@
 
 (De)referencing symbolic revisions (spartan)
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=spartan' | egrep $REVLINKS
   <a href="/log/tip?style=spartan">changelog</a>
   <a href="/graph/tip?style=spartan">graph</a>
   <a href="/file/tip/?style=spartan">files</a>
@@ -902,7 +904,7 @@
     <td class="node"><a href="/rev/43c799df6e75?style=spartan">first</a></td>
   navigate: <small class="navigate"><a href="/shortlog/43c799df6e75?style=spartan">(0)</a> <a href="/shortlog/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log?style=spartan' | egrep $REVLINKS
   <a href="/shortlog/tip?style=spartan">shortlog</a>
   <a href="/graph/tip?style=spartan">graph</a>
   <a href="/file/tip?style=spartan">files</a>
@@ -919,20 +921,20 @@
     <td class="files"><a href="/diff/43c799df6e75/dir/bar?style=spartan">dir/bar</a> <a href="/diff/43c799df6e75/foo?style=spartan">foo</a> </td>
   navigate: <small class="navigate"><a href="/log/43c799df6e75?style=spartan">(0)</a>  <a href="/log/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph?style=spartan' | egrep $REVLINKS
   <a href="/log/tip?style=spartan">changelog</a>
   <a href="/shortlog/tip?style=spartan">shortlog</a>
   <a href="/file/tip/?style=spartan">files</a>
   navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small>
   navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'tags?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'tags?style=spartan' | egrep $REVLINKS
   <a href="/rev/9d8c40cba617?style=spartan">tip</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'branches?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'branches?style=spartan' | egrep $REVLINKS
   <a href="/shortlog/9d8c40cba617?style=spartan" class="open">default</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file?style=spartan' | egrep $REVLINKS
   <a href="/log/tip?style=spartan">changelog</a>
   <a href="/shortlog/tip?style=spartan">shortlog</a>
   <a href="/graph/tip?style=spartan">graph</a>
@@ -944,7 +946,7 @@
   <a href="/file/tip/dir/?style=spartan">
   <td><a href="/file/tip/foo?style=spartan">foo</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog?style=spartan&rev=all()' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog?style=spartan&rev=all()' | egrep $REVLINKS
   <a href="/archive/tip.zip">zip</a> 
     <td class="node"><a href="/rev/9d8c40cba617?style=spartan">9d8c40cba617</a></td>
   <a href="/rev/a7c1559b7bba?style=spartan">a7c1559b7bba</a>
@@ -960,7 +962,7 @@
     <th class="files"><a href="/file/43c799df6e75?style=spartan">files</a>:</th>
     <td class="files"><a href="/diff/43c799df6e75/dir/bar?style=spartan">dir/bar</a> <a href="/diff/43c799df6e75/foo?style=spartan">foo</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'rev/xyzzy?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'rev/xyzzy?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
@@ -972,7 +974,7 @@
   <td class="child"><a href="/rev/9d8c40cba617?style=spartan">9d8c40cba617</a></td>
    <td class="files"><a href="/file/a7c1559b7bba/foo?style=spartan">foo</a> </td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'shortlog/xyzzy?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'shortlog/xyzzy?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
   <a href="/file/xyzzy/?style=spartan">files</a>
@@ -982,7 +984,7 @@
     <td class="node"><a href="/rev/43c799df6e75?style=spartan">first</a></td>
   navigate: <small class="navigate"><a href="/shortlog/43c799df6e75?style=spartan">(0)</a> <a href="/shortlog/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy?style=spartan' | egrep $REVLINKS
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
   <a href="/file/xyzzy?style=spartan">files</a>
@@ -996,14 +998,14 @@
     <td class="files"><a href="/diff/43c799df6e75/dir/bar?style=spartan">dir/bar</a> <a href="/diff/43c799df6e75/foo?style=spartan">foo</a> </td>
   navigate: <small class="navigate"><a href="/log/43c799df6e75?style=spartan">(0)</a>  <a href="/log/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'graph/xyzzy?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'graph/xyzzy?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/file/xyzzy/?style=spartan">files</a>
   navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small>
   navigate: <small class="navigate"><a href="/graph/43c799df6e75?style=spartan">(0)</a> <a href="/graph/tip?style=spartan">tip</a> </small>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
@@ -1015,7 +1017,7 @@
   <a href="/file/xyzzy/dir/?style=spartan">
   <td><a href="/file/xyzzy/foo?style=spartan">foo</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'file/xyzzy/foo?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'file/xyzzy/foo?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
@@ -1028,7 +1030,7 @@
   <a href="/file/43c799df6e75/foo?style=spartan">
   <td><a href="/file/9d8c40cba617/foo?style=spartan">9d8c40cba617</a></td>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'log/xyzzy/foo?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'log/xyzzy/foo?style=spartan' | egrep $REVLINKS
      href="/atom-log/tip/foo" title="Atom feed for test:foo">
      href="/rss-log/tip/foo" title="RSS feed for test:foo">
   <a href="/file/xyzzy/foo?style=spartan">file</a>
@@ -1045,7 +1047,7 @@
      <a href="/diff/43c799df6e75/foo?style=spartan">(diff)</a>
      <a href="/annotate/43c799df6e75/foo?style=spartan">(annotate)</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'annotate/xyzzy/foo?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'annotate/xyzzy/foo?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
@@ -1067,7 +1069,7 @@
   <a href="/diff/a7c1559b7bba/foo?style=spartan">diff</a>
   <a href="/rev/a7c1559b7bba?style=spartan">changeset</a>
 
-  $ "$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT 'diff/xyzzy/foo?style=spartan' | egrep $REVLINKS
+  $ "$TESTDIR/get-with-headers.py" $LOCALIP:$HGPORT 'diff/xyzzy/foo?style=spartan' | egrep $REVLINKS
   <a href="/log/xyzzy?style=spartan">changelog</a>
   <a href="/shortlog/xyzzy?style=spartan">shortlog</a>
   <a href="/graph/xyzzy?style=spartan">graph</a>
--- a/tests/test-hgweb.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgweb.t	Tue Apr 18 12:24:34 2017 -0400
@@ -337,7 +337,7 @@
 
   $ get-with-headers.py --twice localhost:$HGPORT 'static/style-gitweb.css' - date etag server
   200 Script output follows
-  content-length: 6986
+  content-length: 8012
   content-type: text/css
   
   body { font-family: sans-serif; font-size: 12px; border:solid #d9d8d1; border-width:1px; margin:10px; background: white; color: black; }
@@ -487,6 +487,66 @@
   	background-color: #bfdfff;
   }
   
+  .description {
+      font-family: monospace;
+  }
+  
+  /* Followlines */
+  div.page_body pre.sourcelines > span.followlines-select:hover {
+    cursor: cell;
+  }
+  
+  pre.sourcelines > span.followlines-selected {
+    background-color: #99C7E9 !important;
+  }
+  
+  div#followlines {
+    background-color: #B7B7B7;
+    border: 1px solid #CCC;
+    border-radius: 5px;
+    padding: 4px;
+    position: fixed;
+  }
+  
+  div.followlines-cancel {
+    text-align: right;
+  }
+  
+  div.followlines-cancel > button {
+    line-height: 80%;
+    padding: 0;
+    border: 0;
+    border-radius: 2px;
+    background-color: inherit;
+    font-weight: bold;
+  }
+  
+  div.followlines-cancel > button:hover {
+    color: #FFFFFF;
+    background-color: #CF1F1F;
+  }
+  
+  div.followlines-link {
+    margin: 2px;
+    margin-top: 4px;
+    font-family: sans-serif;
+  }
+  
+  div#followlines-tooltip {
+    display: none;
+    position: fixed;
+    background-color: #ffc;
+    border: 1px solid #999;
+    padding: 2px;
+  }
+  
+  .sourcelines:hover > div#followlines-tooltip {
+    display: inline;
+  }
+  
+  .sourcelines:hover > div#followlines-tooltip.hidden {
+    display: none;
+  }
   /* Graph */
   div#wrapper {
   	position: relative;
--- a/tests/test-hgwebdir.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hgwebdir.t	Tue Apr 18 12:24:34 2017 -0400
@@ -1421,7 +1421,7 @@
   > EOF
   $ hg serve -d --pid-file=hg.pid --web-conf paths.conf \
   >     -A access-paths.log -E error-paths-9.log
-  listening at http://*:$HGPORT1/ (bound to 127.0.0.1:$HGPORT1) (glob)
+  listening at http://*:$HGPORT1/ (bound to *$LOCALIP*:$HGPORT1) (glob) (?)
   $ cat hg.pid >> $DAEMON_PIDS
   $ get-with-headers.py localhost:$HGPORT1 '?style=raw'
   200 Script output follows
@@ -1433,7 +1433,7 @@
   $ killdaemons.py
   $ hg serve -p $HGPORT2 -d -v --pid-file=hg.pid --web-conf paths.conf \
   >     -A access-paths.log -E error-paths-10.log
-  listening at http://*:$HGPORT2/ (bound to 127.0.0.1:$HGPORT2) (glob)
+  listening at http://*:$HGPORT2/ (bound to *$LOCALIP*:$HGPORT2) (glob) (?)
   $ cat hg.pid >> $DAEMON_PIDS
   $ get-with-headers.py localhost:$HGPORT2 '?style=raw'
   200 Script output follows
@@ -1566,6 +1566,135 @@
   /b/
   /c/
   
+  $ killdaemons.py
+  $ cat > paths.conf << EOF
+  > [paths]
+  > /dir1/a_repo = $root/a
+  > /dir1/a_repo/b_repo = $root/b
+  > /dir1/dir2/index = $root/b
+  > EOF
+  $ hg serve -p $HGPORT1 -d --pid-file hg.pid --webdir-conf paths.conf
+  $ cat hg.pid >> $DAEMON_PIDS
+
+  $ echo 'index file' > $root/a/index
+  $ hg --cwd $root/a ci -Am 'add index file'
+  adding index
+
+  $ get-with-headers.py localhost:$HGPORT1 '' | grep 'a_repo'
+  <td><a href="/dir1/a_repo/">dir1/a_repo</a></td>
+  <a href="/dir1/a_repo/atom-log" title="subscribe to repository atom feed">
+  <td><a href="/dir1/a_repo/b_repo/">dir1/a_repo/b_repo</a></td>
+  <a href="/dir1/a_repo/b_repo/atom-log" title="subscribe to repository atom feed">
+
+  $ get-with-headers.py localhost:$HGPORT1 'index' | grep 'a_repo'
+  <td><a href="/dir1/a_repo/">dir1/a_repo</a></td>
+  <a href="/dir1/a_repo/atom-log" title="subscribe to repository atom feed">
+  <td><a href="/dir1/a_repo/b_repo/">dir1/a_repo/b_repo</a></td>
+  <a href="/dir1/a_repo/b_repo/atom-log" title="subscribe to repository atom feed">
+
+  $ get-with-headers.py localhost:$HGPORT1 'dir1' | grep 'a_repo'
+  <td><a href="/dir1/a_repo/">a_repo</a></td>
+  <a href="/dir1/a_repo/atom-log" title="subscribe to repository atom feed">
+  <td><a href="/dir1/a_repo/b_repo/">a_repo/b_repo</a></td>
+  <a href="/dir1/a_repo/b_repo/atom-log" title="subscribe to repository atom feed">
+
+  $ get-with-headers.py localhost:$HGPORT1 'dir1/index' | grep 'a_repo'
+  <td><a href="/dir1/a_repo/">a_repo</a></td>
+  <a href="/dir1/a_repo/atom-log" title="subscribe to repository atom feed">
+  <td><a href="/dir1/a_repo/b_repo/">a_repo/b_repo</a></td>
+  <a href="/dir1/a_repo/b_repo/atom-log" title="subscribe to repository atom feed">
+
+  $ get-with-headers.py localhost:$HGPORT1 'dir1/a_repo' | grep 'a_repo'
+  <link rel="icon" href="/dir1/a_repo/static/hgicon.png" type="image/png" />
+  <link rel="stylesheet" href="/dir1/a_repo/static/style-paper.css" type="text/css" />
+  <script type="text/javascript" src="/dir1/a_repo/static/mercurial.js"></script>
+  <title>dir1/a_repo: log</title>
+     href="/dir1/a_repo/atom-log" title="Atom feed for dir1/a_repo" />
+     href="/dir1/a_repo/rss-log" title="RSS feed for dir1/a_repo" />
+  <img src="/dir1/a_repo/static/hglogo.png" alt="mercurial" /></a>
+  <li><a href="/dir1/a_repo/graph/tip">graph</a></li>
+  <li><a href="/dir1/a_repo/tags">tags</a></li>
+  <li><a href="/dir1/a_repo/bookmarks">bookmarks</a></li>
+  <li><a href="/dir1/a_repo/branches">branches</a></li>
+  <li><a href="/dir1/a_repo/rev/tip">changeset</a></li>
+  <li><a href="/dir1/a_repo/file/tip">browse</a></li>
+   <li><a href="/dir1/a_repo/help">help</a></li>
+  <a href="/dir1/a_repo/atom-log" title="subscribe to atom feed">
+  <img class="atom-logo" src="/dir1/a_repo/static/feed-icon-14x14.png" alt="atom feed" />
+  <h2 class="breadcrumb"><a href="/">Mercurial</a> &gt; <a href="/dir1">dir1</a> &gt; <a href="/dir1/a_repo">a_repo</a> </h2>
+  <form class="search" action="/dir1/a_repo/log">
+  number or hash, or <a href="/dir1/a_repo/help/revsets">revset expression</a>.</div>
+  <a href="/dir1/a_repo/shortlog/tip?revcount=30">less</a>
+  <a href="/dir1/a_repo/shortlog/tip?revcount=120">more</a>
+  | rev 1: <a href="/dir1/a_repo/shortlog/8580ff50825a">(0)</a> <a href="/dir1/a_repo/shortlog/tip">tip</a> 
+     <a href="/dir1/a_repo/rev/71a89161f014">add index file</a>
+     <a href="/dir1/a_repo/rev/8580ff50825a">a</a>
+  <a href="/dir1/a_repo/shortlog/tip?revcount=30">less</a>
+  <a href="/dir1/a_repo/shortlog/tip?revcount=120">more</a>
+  | rev 1: <a href="/dir1/a_repo/shortlog/8580ff50825a">(0)</a> <a href="/dir1/a_repo/shortlog/tip">tip</a> 
+              '/dir1/a_repo/shortlog/%next%',
+
+  $ get-with-headers.py localhost:$HGPORT1 'dir1/a_repo/index' | grep 'a_repo'
+  <h2 class="breadcrumb"><a href="/">Mercurial</a> &gt; <a href="/dir1">dir1</a> &gt; <a href="/dir1/a_repo">a_repo</a> </h2>
+  <td><a href="/dir1/a_repo/b_repo/">b_repo</a></td>
+  <a href="/dir1/a_repo/b_repo/atom-log" title="subscribe to repository atom feed">
+
+Files named 'index' are not blocked
+
+  $ get-with-headers.py localhost:$HGPORT1 'dir1/a_repo/raw-file/tip/index'
+  200 Script output follows
+  
+  index file
+
+Repos named 'index' take precedence over the index file
+
+  $ get-with-headers.py localhost:$HGPORT1 'dir1/dir2/index' | grep 'index'
+  <link rel="icon" href="/dir1/dir2/index/static/hgicon.png" type="image/png" />
+  <meta name="robots" content="index, nofollow" />
+  <link rel="stylesheet" href="/dir1/dir2/index/static/style-paper.css" type="text/css" />
+  <script type="text/javascript" src="/dir1/dir2/index/static/mercurial.js"></script>
+  <title>dir1/dir2/index: log</title>
+     href="/dir1/dir2/index/atom-log" title="Atom feed for dir1/dir2/index" />
+     href="/dir1/dir2/index/rss-log" title="RSS feed for dir1/dir2/index" />
+  <img src="/dir1/dir2/index/static/hglogo.png" alt="mercurial" /></a>
+  <li><a href="/dir1/dir2/index/graph/tip">graph</a></li>
+  <li><a href="/dir1/dir2/index/tags">tags</a></li>
+  <li><a href="/dir1/dir2/index/bookmarks">bookmarks</a></li>
+  <li><a href="/dir1/dir2/index/branches">branches</a></li>
+  <li><a href="/dir1/dir2/index/rev/tip">changeset</a></li>
+  <li><a href="/dir1/dir2/index/file/tip">browse</a></li>
+   <li><a href="/dir1/dir2/index/help">help</a></li>
+  <a href="/dir1/dir2/index/atom-log" title="subscribe to atom feed">
+  <img class="atom-logo" src="/dir1/dir2/index/static/feed-icon-14x14.png" alt="atom feed" />
+  <h2 class="breadcrumb"><a href="/">Mercurial</a> &gt; <a href="/dir1">dir1</a> &gt; <a href="/dir1/dir2">dir2</a> &gt; <a href="/dir1/dir2/index">index</a> </h2>
+  <form class="search" action="/dir1/dir2/index/log">
+  number or hash, or <a href="/dir1/dir2/index/help/revsets">revset expression</a>.</div>
+  <a href="/dir1/dir2/index/shortlog/tip?revcount=30">less</a>
+  <a href="/dir1/dir2/index/shortlog/tip?revcount=120">more</a>
+  | rev 0: <a href="/dir1/dir2/index/shortlog/39505516671b">(0)</a> <a href="/dir1/dir2/index/shortlog/tip">tip</a> 
+     <a href="/dir1/dir2/index/rev/39505516671b">b</a>
+  <a href="/dir1/dir2/index/shortlog/tip?revcount=30">less</a>
+  <a href="/dir1/dir2/index/shortlog/tip?revcount=120">more</a>
+  | rev 0: <a href="/dir1/dir2/index/shortlog/39505516671b">(0)</a> <a href="/dir1/dir2/index/shortlog/tip">tip</a> 
+              '/dir1/dir2/index/shortlog/%next%',
+
+  $ killdaemons.py
+
+  $ cat > paths.conf << EOF
+  > [paths]
+  > / = $root/a
+  > EOF
+  $ hg serve -p $HGPORT1 -d --pid-file hg.pid --webdir-conf paths.conf
+  $ cat hg.pid >> $DAEMON_PIDS
+
+  $ hg id http://localhost:$HGPORT1
+  71a89161f014
+
+  $ get-with-headers.py localhost:$HGPORT1 '' | grep 'index'
+  <meta name="robots" content="index, nofollow" />
+     <a href="/rev/71a89161f014">add index file</a>
+
+  $ killdaemons.py
 
 paths errors 1
 
--- a/tests/test-highlight.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-highlight.t	Tue Apr 18 12:24:34 2017 -0400
@@ -149,7 +149,7 @@
   <div class="overflow">
   <div class="sourcefirst linewraptoggle">line wrap: <a class="linewraplink" href="javascript:toggleLinewrap()">on</a></div>
   <div class="sourcefirst"> line source</div>
-  <pre class="sourcelines stripes4 wrap bottomline">
+  <pre class="sourcelines stripes4 wrap bottomline" data-logurl="/log/tip/primes.py">
   <span id="l1"><span class="c">#!/usr/bin/env python</span></span><a href="#l1"></a>
   <span id="l2"></span><a href="#l2"></a>
   <span id="l3"><span class="sd">&quot;&quot;&quot;Fun with generators. Corresponding Haskell implementation:</span></span><a href="#l3"></a>
@@ -184,6 +184,9 @@
   <span id="l32">    <span class="kn">print</span> <span class="s">&quot;The first </span><span class="si">%d</span><span class="s"> primes: </span><span class="si">%s</span><span class="s">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="nb">list</span><span class="p">(</span><span class="n">islice</span><span class="p">(</span><span class="n">p</span><span class="p">,</span> <span class="n">n</span><span class="p">)))</span></span><a href="#l32"></a>
   <span id="l33"></span><a href="#l33"></a></pre>
   </div>
+  
+  <script type="text/javascript" src="/static/followlines.js"></script>
+  
   </div>
   </div>
   
--- a/tests/test-histedit-arguments.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-histedit-arguments.t	Tue Apr 18 12:24:34 2017 -0400
@@ -72,7 +72,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
 Run on a revision not ancestors of the current working directory.
@@ -308,7 +308,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
 Test --continue with --keep
@@ -544,7 +544,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
   $ cd ..
--- a/tests/test-histedit-bookmark-motion.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-histedit-bookmark-motion.t	Tue Apr 18 12:24:34 2017 -0400
@@ -78,7 +78,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
   $ hg histedit 1 --commands - --verbose << EOF | grep histedit
   > pick 177f92b77385 2 c
@@ -141,7 +141,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
   $ hg histedit 1 --commands - --verbose << EOF | grep histedit
   > pick b346ab9a313d 1 c
--- a/tests/test-histedit-commute.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-histedit-commute.t	Tue Apr 18 12:24:34 2017 -0400
@@ -72,7 +72,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
 edit the history
@@ -350,7 +350,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
 should also work if a commit message is missing
--- a/tests/test-histedit-edit.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-histedit-edit.t	Tue Apr 18 12:24:34 2017 -0400
@@ -478,5 +478,5 @@
   #  p, fold = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
--- a/tests/test-histedit-fold-non-commute.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-histedit-fold-non-commute.t	Tue Apr 18 12:24:34 2017 -0400
@@ -5,6 +5,12 @@
   > histedit=
   > EOF
 
+  $ modwithdate ()
+  > {
+  >     echo $1 > $1
+  >     hg ci -m $1 -d "$2 0"
+  > }
+
   $ initrepo ()
   > {
   >     hg init $1
@@ -14,12 +20,14 @@
   >         hg add $x
   >     done
   >     hg ci -m 'Initial commit'
-  >     for x in a b c d e f ; do
-  >         echo $x > $x
-  >         hg ci -m $x
-  >     done
+  >     modwithdate a 1
+  >     modwithdate b 2
+  >     modwithdate c 3
+  >     modwithdate d 4
+  >     modwithdate e 5
+  >     modwithdate f 6
   >     echo 'I can haz no commute' > e
-  >     hg ci -m 'does not commute with e'
+  >     hg ci -m 'does not commute with e' -d '7 0'
   >     cd ..
   > }
 
@@ -34,48 +42,48 @@
   $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 5 >> $EDITED
   $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 6 >> $EDITED
   $ cat $EDITED
-  pick 65a9a84f33fd 3 c
-  pick 00f1c5383965 4 d
-  fold 39522b764e3d 7 does not commute with e
-  pick 7b4e2f4b7bcd 5 e
-  pick 500cac37a696 6 f
+  pick 092e4ce14829 3 c
+  pick ae78f4c9d74f 4 d
+  fold 42abbb61bede 7 does not commute with e
+  pick 7f3755409b00 5 e
+  pick dd184f2faeb0 6 f
 
 log before edit
   $ hg log --graph
-  @  changeset:   7:39522b764e3d
+  @  changeset:   7:42abbb61bede
   |  tag:         tip
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:07 1970 +0000
   |  summary:     does not commute with e
   |
-  o  changeset:   6:500cac37a696
+  o  changeset:   6:dd184f2faeb0
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:06 1970 +0000
   |  summary:     f
   |
-  o  changeset:   5:7b4e2f4b7bcd
+  o  changeset:   5:7f3755409b00
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:05 1970 +0000
   |  summary:     e
   |
-  o  changeset:   4:00f1c5383965
+  o  changeset:   4:ae78f4c9d74f
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:04 1970 +0000
   |  summary:     d
   |
-  o  changeset:   3:65a9a84f33fd
+  o  changeset:   3:092e4ce14829
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:03 1970 +0000
   |  summary:     c
   |
-  o  changeset:   2:da6535b52e45
+  o  changeset:   2:40ccdd8beb95
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:02 1970 +0000
   |  summary:     b
   |
-  o  changeset:   1:c1f09da44841
+  o  changeset:   1:cd997a145b29
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:01 1970 +0000
   |  summary:     a
   |
   o  changeset:   0:1715188a53c7
@@ -89,7 +97,7 @@
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   merging e
   warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
-  Fix up the change (fold 39522b764e3d)
+  Fix up the change (fold 42abbb61bede)
   (hg histedit --continue to resume)
 
 fix up
@@ -113,7 +121,7 @@
   HG: changed e
   merging e
   warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
-  Fix up the change (pick 7b4e2f4b7bcd)
+  Fix up the change (pick 7f3755409b00)
   (hg histedit --continue to resume)
 
 just continue this time
@@ -124,34 +132,34 @@
   continue: hg histedit --continue
   $ hg diff
   $ hg histedit --continue 2>&1 | fixbundle
-  7b4e2f4b7bcd: skipping changeset (no changes)
+  7f3755409b00: skipping changeset (no changes)
 
 log after edit
   $ hg log --graph
-  @  changeset:   5:d9cf42e54966
+  @  changeset:   5:1300355b1a54
   |  tag:         tip
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:06 1970 +0000
   |  summary:     f
   |
-  o  changeset:   4:10486af2e984
+  o  changeset:   4:e2ac33269083
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:07 1970 +0000
   |  summary:     d
   |
-  o  changeset:   3:65a9a84f33fd
+  o  changeset:   3:092e4ce14829
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:03 1970 +0000
   |  summary:     c
   |
-  o  changeset:   2:da6535b52e45
+  o  changeset:   2:40ccdd8beb95
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:02 1970 +0000
   |  summary:     b
   |
-  o  changeset:   1:c1f09da44841
+  o  changeset:   1:cd997a145b29
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:01 1970 +0000
   |  summary:     a
   |
   o  changeset:   0:1715188a53c7
@@ -175,7 +183,7 @@
 
   $ cd ..
 
-Repeat test using "roll", not "fold". "roll" folds in changes but drops message
+Repeat test using "roll", not "fold". "roll" folds in changes but drops message and date
 
   $ initrepo r2
   $ cd r2
@@ -189,48 +197,48 @@
   $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 5 >> $EDITED
   $ hg log --template 'pick {node|short} {rev} {desc}\n' -r 6 >> $EDITED
   $ cat $EDITED
-  pick 65a9a84f33fd 3 c
-  pick 00f1c5383965 4 d
-  roll 39522b764e3d 7 does not commute with e
-  pick 7b4e2f4b7bcd 5 e
-  pick 500cac37a696 6 f
+  pick 092e4ce14829 3 c
+  pick ae78f4c9d74f 4 d
+  roll 42abbb61bede 7 does not commute with e
+  pick 7f3755409b00 5 e
+  pick dd184f2faeb0 6 f
 
 log before edit
   $ hg log --graph
-  @  changeset:   7:39522b764e3d
+  @  changeset:   7:42abbb61bede
   |  tag:         tip
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:07 1970 +0000
   |  summary:     does not commute with e
   |
-  o  changeset:   6:500cac37a696
+  o  changeset:   6:dd184f2faeb0
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:06 1970 +0000
   |  summary:     f
   |
-  o  changeset:   5:7b4e2f4b7bcd
+  o  changeset:   5:7f3755409b00
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:05 1970 +0000
   |  summary:     e
   |
-  o  changeset:   4:00f1c5383965
+  o  changeset:   4:ae78f4c9d74f
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:04 1970 +0000
   |  summary:     d
   |
-  o  changeset:   3:65a9a84f33fd
+  o  changeset:   3:092e4ce14829
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:03 1970 +0000
   |  summary:     c
   |
-  o  changeset:   2:da6535b52e45
+  o  changeset:   2:40ccdd8beb95
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:02 1970 +0000
   |  summary:     b
   |
-  o  changeset:   1:c1f09da44841
+  o  changeset:   1:cd997a145b29
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:01 1970 +0000
   |  summary:     a
   |
   o  changeset:   0:1715188a53c7
@@ -244,7 +252,7 @@
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   merging e
   warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
-  Fix up the change (roll 39522b764e3d)
+  Fix up the change (roll 42abbb61bede)
   (hg histedit --continue to resume)
 
 fix up
@@ -255,7 +263,7 @@
   $ hg histedit --continue 2>&1 | fixbundle | grep -v '2 files removed'
   merging e
   warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
-  Fix up the change (pick 7b4e2f4b7bcd)
+  Fix up the change (pick 7f3755409b00)
   (hg histedit --continue to resume)
 
 just continue this time
@@ -264,34 +272,34 @@
   (no more unresolved files)
   continue: hg histedit --continue
   $ hg histedit --continue 2>&1 | fixbundle
-  7b4e2f4b7bcd: skipping changeset (no changes)
+  7f3755409b00: skipping changeset (no changes)
 
 log after edit
   $ hg log --graph
-  @  changeset:   5:e7c4f5d4eb75
+  @  changeset:   5:b538bcb461be
   |  tag:         tip
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:06 1970 +0000
   |  summary:     f
   |
-  o  changeset:   4:803d1bb561fc
+  o  changeset:   4:317e37cb6d66
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:04 1970 +0000
   |  summary:     d
   |
-  o  changeset:   3:65a9a84f33fd
+  o  changeset:   3:092e4ce14829
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:03 1970 +0000
   |  summary:     c
   |
-  o  changeset:   2:da6535b52e45
+  o  changeset:   2:40ccdd8beb95
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:02 1970 +0000
   |  summary:     b
   |
-  o  changeset:   1:c1f09da44841
+  o  changeset:   1:cd997a145b29
   |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
+  |  date:        Thu Jan 01 00:00:01 1970 +0000
   |  summary:     a
   |
   o  changeset:   0:1715188a53c7
@@ -316,16 +324,16 @@
 description is taken from rollup target commit
 
   $ hg log --debug --rev 4
-  changeset:   4:803d1bb561fceac3129ec778db9da249a3106fc3
+  changeset:   4:317e37cb6d66c1c84628c00e5bf4c8c292831951
   phase:       draft
-  parent:      3:65a9a84f33fdeb1ad5679b3941ec885d2b24027b
+  parent:      3:092e4ce14829f4974399ce4316d59f64ef0b6725
   parent:      -1:0000000000000000000000000000000000000000
   manifest:    4:b068a323d969f22af1296ec6a5ea9384cef437ac
   user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
+  date:        Thu Jan 01 00:00:04 1970 +0000
   files:       d e
   extra:       branch=default
-  extra:       histedit_source=00f1c53839651fa5c76d423606811ea5455a79d0,39522b764e3d26103f08bd1fa2ccd3e3d7dbcf4e
+  extra:       histedit_source=ae78f4c9d74ffa4b6cb5045001c303fe9204e890,42abbb61bede6f4366fa1e74a664343e5d558a70
   description:
   d
   
--- a/tests/test-histedit-fold.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-histedit-fold.t	Tue Apr 18 12:24:34 2017 -0400
@@ -20,52 +20,60 @@
 
 Simple folding
 --------------------
+  $ addwithdate ()
+  > {
+  >     echo $1 > $1
+  >     hg add $1
+  >     hg ci -m $1 -d "$2 0"
+  > }
+
   $ initrepo ()
   > {
   >     hg init r
   >     cd r
-  >     for x in a b c d e f ; do
-  >         echo $x > $x
-  >         hg add $x
-  >         hg ci -m $x
-  >     done
+  >     addwithdate a 1
+  >     addwithdate b 2
+  >     addwithdate c 3
+  >     addwithdate d 4
+  >     addwithdate e 5
+  >     addwithdate f 6
   > }
 
   $ initrepo
 
 log before edit
   $ hg logt --graph
-  @  5:652413bf663e f
+  @  5:178e35e0ce73 f
   |
-  o  4:e860deea161a e
+  o  4:1ddb6c90f2ee e
   |
-  o  3:055a42cdd887 d
+  o  3:532247a8969b d
   |
-  o  2:177f92b77385 c
+  o  2:ff2c9fa2018b c
   |
-  o  1:d2ae7f538514 b
+  o  1:97d72e5f12c7 b
   |
-  o  0:cb9a9f314b8b a
+  o  0:8580ff50825a a
   
 
-  $ hg histedit 177f92b77385 --commands - 2>&1 <<EOF | fixbundle
-  > pick e860deea161a e
-  > pick 652413bf663e f
-  > fold 177f92b77385 c
-  > pick 055a42cdd887 d
+  $ hg histedit ff2c9fa2018b --commands - 2>&1 <<EOF | fixbundle
+  > pick 1ddb6c90f2ee e
+  > pick 178e35e0ce73 f
+  > fold ff2c9fa2018b c
+  > pick 532247a8969b d
   > EOF
 
 log after edit
   $ hg logt --graph
-  @  4:9c277da72c9b d
+  @  4:c4d7f3def76d d
   |
-  o  3:6de59d13424a f
+  o  3:575228819b7e f
   |
-  o  2:ee283cb5f2d5 e
+  o  2:505a591af19e e
   |
-  o  1:d2ae7f538514 b
+  o  1:97d72e5f12c7 b
   |
-  o  0:cb9a9f314b8b a
+  o  0:8580ff50825a a
   
 
 post-fold manifest
@@ -78,19 +86,19 @@
   f
 
 
-check histedit_source
+check histedit_source, including that it uses the later date, from the first changeset
 
   $ hg log --debug --rev 3
-  changeset:   3:6de59d13424a8a13acd3e975514aed29dd0d9b2d
+  changeset:   3:575228819b7e6ed69e8c0a6a383ee59a80db7358
   phase:       draft
-  parent:      2:ee283cb5f2d5955443f23a27b697a04339e9a39a
+  parent:      2:505a591af19eed18f560af827b9e03d2076773dc
   parent:      -1:0000000000000000000000000000000000000000
   manifest:    3:81eede616954057198ead0b2c73b41d1f392829a
   user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
+  date:        Thu Jan 01 00:00:06 1970 +0000
   files+:      c f
   extra:       branch=default
-  extra:       histedit_source=a4f7421b80f79fcc59fff01bcbf4a53d127dd6d3,177f92b773850b59254aa5e923436f921b55483b
+  extra:       histedit_source=7cad1d7030207872dfd1c3a7cb430f24f2884086,ff2c9fa2018b15fa74b33363bda9527323e2a99f
   description:
   f
   ***
@@ -98,43 +106,43 @@
   
   
 
-rollup will fold without preserving the folded commit's message
+rollup will fold without preserving the folded commit's message or date
 
   $ OLDHGEDITOR=$HGEDITOR
   $ HGEDITOR=false
-  $ hg histedit d2ae7f538514 --commands - 2>&1 <<EOF | fixbundle
-  > pick d2ae7f538514 b
-  > roll ee283cb5f2d5 e
-  > pick 6de59d13424a f
-  > pick 9c277da72c9b d
+  $ hg histedit 97d72e5f12c7 --commands - 2>&1 <<EOF | fixbundle
+  > pick 97d72e5f12c7 b
+  > roll 505a591af19e e
+  > pick 575228819b7e f
+  > pick c4d7f3def76d d
   > EOF
 
   $ HGEDITOR=$OLDHGEDITOR
 
 log after edit
   $ hg logt --graph
-  @  3:c4a9eb7989fc d
+  @  3:bab801520cec d
   |
-  o  2:8e03a72b6f83 f
+  o  2:58c8f2bfc151 f
   |
-  o  1:391ee782c689 b
+  o  1:5d939c56c72e b
   |
-  o  0:cb9a9f314b8b a
+  o  0:8580ff50825a a
   
 
 description is taken from rollup target commit
 
   $ hg log --debug --rev 1
-  changeset:   1:391ee782c68930be438ccf4c6a403daedbfbffa5
+  changeset:   1:5d939c56c72e77e29f5167696218e2131a40f5cf
   phase:       draft
-  parent:      0:cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
+  parent:      0:8580ff50825a50c8f716709acdf8de0deddcd6ab
   parent:      -1:0000000000000000000000000000000000000000
   manifest:    1:b5e112a3a8354e269b1524729f0918662d847c38
   user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
+  date:        Thu Jan 01 00:00:02 1970 +0000
   files+:      b e
   extra:       branch=default
-  extra:       histedit_source=d2ae7f538514cd87c17547b0de4cea71fe1af9fb,ee283cb5f2d5955443f23a27b697a04339e9a39a
+  extra:       histedit_source=97d72e5f12c7e84f85064aa72e5a297142c36ed9,505a591af19eed18f560af827b9e03d2076773dc
   description:
   b
   
@@ -163,13 +171,13 @@
   > EOF
 
   $ rm -f .hg/last-message.txt
-  $ hg status --rev '8e03a72b6f83^1::c4a9eb7989fc'
+  $ hg status --rev '58c8f2bfc151^1::bab801520cec'
   A c
   A d
   A f
-  $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit 8e03a72b6f83 --commands - 2>&1 <<EOF
-  > pick 8e03a72b6f83 f
-  > fold c4a9eb7989fc d
+  $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit 58c8f2bfc151 --commands - 2>&1 <<EOF
+  > pick 58c8f2bfc151 f
+  > fold bab801520cec d
   > EOF
   allow non-folding commit
   ==== before editing
@@ -209,37 +217,37 @@
   $ cd ..
   $ rm -r r
 
-folding preserves initial author
---------------------------------
+folding preserves initial author but uses later date
+----------------------------------------------------
 
   $ initrepo
 
-  $ hg ci --user "someone else" --amend --quiet
+  $ hg ci -d '7 0' --user "someone else" --amend --quiet
 
 tip before edit
   $ hg log --rev .
-  changeset:   5:a00ad806cb55
+  changeset:   5:10c36dd37515
   tag:         tip
   user:        someone else
-  date:        Thu Jan 01 00:00:00 1970 +0000
+  date:        Thu Jan 01 00:00:07 1970 +0000
   summary:     f
   
 
   $ hg --config progress.debug=1 --debug \
-  > histedit e860deea161a --commands - 2>&1 <<EOF | \
+  > histedit 1ddb6c90f2ee --commands - 2>&1 <<EOF | \
   > egrep 'editing|unresolved'
-  > pick e860deea161a e
-  > fold a00ad806cb55 f
+  > pick 1ddb6c90f2ee e
+  > fold 10c36dd37515 f
   > EOF
-  editing: pick e860deea161a 4 e 1/2 changes (50.00%)
-  editing: fold a00ad806cb55 5 f 2/2 changes (100.00%)
+  editing: pick 1ddb6c90f2ee 4 e 1/2 changes (50.00%)
+  editing: fold 10c36dd37515 5 f 2/2 changes (100.00%)
 
-tip after edit
+tip after edit, which should use the later date, from the second changeset
   $ hg log --rev .
-  changeset:   4:698d4e8040a1
+  changeset:   4:e4f3ec5d0b40
   tag:         tip
   user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
+  date:        Thu Jan 01 00:00:07 1970 +0000
   summary:     e
   
 
--- a/tests/test-histedit-obsolete.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-histedit-obsolete.t	Tue Apr 18 12:24:34 2017 -0400
@@ -136,7 +136,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
   $ hg histedit 1 --commands - --verbose <<EOF | grep histedit
   > pick 177f92b77385 2 c
@@ -503,3 +503,74 @@
   abort: cannot edit history that contains merges
   [255]
   $ cd ..
+
+Check abort behavior
+-------------------------------------------
+
+We checks that abort properly clean the repository so the same histedit can be
+attempted later.
+
+  $ cp -R base abort
+  $ cd abort
+  $ hg histedit -r 'b449568bf7fc' --commands - << EOF
+  > pick b449568bf7fc 13 f
+  > pick 7395e1ff83bd 15 h
+  > pick 6b70183d2492 14 g
+  > pick b605fb7503f2 16 i
+  > roll 3a6c53ee7f3d 17 j
+  > edit ee118ab9fa44 18 k
+  > EOF
+  Editing (ee118ab9fa44), you may commit or record as needed now.
+  (hg histedit --continue to resume)
+  [1]
+
+  $ hg histedit --abort
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  saved backup bundle to $TESTTMP/abort/.hg/strip-backup/4dc06258baa6-dff4ef05-backup.hg (glob)
+
+  $ hg log -G
+  @  18:ee118ab9fa44 (secret) k
+  |
+  o  17:3a6c53ee7f3d (secret) j
+  |
+  o  16:b605fb7503f2 (secret) i
+  |
+  o  15:7395e1ff83bd (draft) h
+  |
+  o  14:6b70183d2492 (draft) g
+  |
+  o  13:b449568bf7fc (draft) f
+  |
+  o  12:40db8afa467b (public) c
+  |
+  o  0:cb9a9f314b8b (public) a
+  
+  $ hg histedit -r 'b449568bf7fc' --commands - << EOF
+  > pick b449568bf7fc 13 f
+  > pick 7395e1ff83bd 15 h
+  > pick 6b70183d2492 14 g
+  > pick b605fb7503f2 16 i
+  > pick 3a6c53ee7f3d 17 j
+  > edit ee118ab9fa44 18 k
+  > EOF
+  Editing (ee118ab9fa44), you may commit or record as needed now.
+  (hg histedit --continue to resume)
+  [1]
+  $ hg histedit --continue
+  $ hg log -G
+  @  23:175d6b286a22 (secret) k
+  |
+  o  22:44ca09d59ae4 (secret) j
+  |
+  o  21:31747692a644 (secret) i
+  |
+  o  20:9985cd4f21fa (draft) g
+  |
+  o  19:4dc06258baa6 (draft) h
+  |
+  o  13:b449568bf7fc (draft) f
+  |
+  o  12:40db8afa467b (public) c
+  |
+  o  0:cb9a9f314b8b (public) a
+  
--- a/tests/test-histedit-outgoing.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-histedit-outgoing.t	Tue Apr 18 12:24:34 2017 -0400
@@ -54,7 +54,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
   $ cd ..
 
@@ -88,7 +88,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
   $ cd ..
 
@@ -114,7 +114,7 @@
   #  p, pick = use commit
   #  d, drop = remove commit from history
   #  f, fold = use commit, but combine it with the one above
-  #  r, roll = like fold, but discard this commit's description
+  #  r, roll = like fold, but discard this commit's description and date
   #
 
 test to check number of roots in outgoing revisions
--- a/tests/test-hook.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-hook.t	Tue Apr 18 12:24:34 2017 -0400
@@ -29,14 +29,14 @@
   $ echo a > a
   $ hg add a
   $ hg commit -m a
-  precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
-  pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  pretxncommit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
+  precommit hook: HG_HOOKNAME=precommit HG_HOOKTYPE=precommit HG_PARENT1=0000000000000000000000000000000000000000
+  pretxnopen hook: HG_HOOKNAME=pretxnopen HG_HOOKTYPE=pretxnopen HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  pretxncommit hook: HG_HOOKNAME=pretxncommit HG_HOOKTYPE=pretxncommit HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
   0:cb9a9f314b8b
-  pretxnclose hook: HG_PENDING=$TESTTMP/a HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  commit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
-  commit.b hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
+  pretxnclose hook: HG_HOOKNAME=pretxnclose HG_HOOKTYPE=pretxnclose HG_PENDING=$TESTTMP/a HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  txnclose hook: HG_HOOKNAME=txnclose HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  commit hook: HG_HOOKNAME=commit HG_HOOKTYPE=commit HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
+  commit.b hook: HG_HOOKNAME=commit.b HG_HOOKTYPE=commit HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
 
   $ hg clone . ../b
   updating to branch default
@@ -57,65 +57,65 @@
   $ cd ../a
   $ echo b >> a
   $ hg commit -m a1 -d "1 0"
-  precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
-  pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  pretxncommit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
+  precommit hook: HG_HOOKNAME=precommit HG_HOOKTYPE=precommit HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
+  pretxnopen hook: HG_HOOKNAME=pretxnopen HG_HOOKTYPE=pretxnopen HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  pretxncommit hook: HG_HOOKNAME=pretxncommit HG_HOOKTYPE=pretxncommit HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
   1:ab228980c14d
-  pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  commit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
-  commit.b hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
+  pretxnclose hook: HG_HOOKNAME=pretxnclose HG_HOOKTYPE=pretxnclose HG_PENDING=$TESTTMP/a HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  txnclose hook: HG_HOOKNAME=txnclose HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  commit hook: HG_HOOKNAME=commit HG_HOOKTYPE=commit HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
+  commit.b hook: HG_HOOKNAME=commit.b HG_HOOKTYPE=commit HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
   $ hg update -C 0
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ echo b > b
   $ hg add b
   $ hg commit -m b -d '1 0'
-  precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
-  pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  pretxncommit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
+  precommit hook: HG_HOOKNAME=precommit HG_HOOKTYPE=precommit HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
+  pretxnopen hook: HG_HOOKNAME=pretxnopen HG_HOOKTYPE=pretxnopen HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  pretxncommit hook: HG_HOOKNAME=pretxncommit HG_HOOKTYPE=pretxncommit HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
   2:ee9deb46ab31
-  pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
+  pretxnclose hook: HG_HOOKNAME=pretxnclose HG_HOOKTYPE=pretxnclose HG_PENDING=$TESTTMP/a HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
   created new head
-  txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  commit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
-  commit.b hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
+  txnclose hook: HG_HOOKNAME=txnclose HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  commit hook: HG_HOOKNAME=commit HG_HOOKTYPE=commit HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
+  commit.b hook: HG_HOOKNAME=commit.b HG_HOOKTYPE=commit HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
   $ hg merge 1
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   $ hg commit -m merge -d '2 0'
-  precommit hook: HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
-  pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  pretxncommit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
+  precommit hook: HG_HOOKNAME=precommit HG_HOOKTYPE=precommit HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
+  pretxnopen hook: HG_HOOKNAME=pretxnopen HG_HOOKTYPE=pretxnopen HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  pretxncommit hook: HG_HOOKNAME=pretxncommit HG_HOOKTYPE=pretxncommit HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
   3:07f3376c1e65
-  pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  commit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
-  commit.b hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
+  pretxnclose hook: HG_HOOKNAME=pretxnclose HG_HOOKTYPE=pretxnclose HG_PENDING=$TESTTMP/a HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  txnclose hook: HG_HOOKNAME=txnclose HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  commit hook: HG_HOOKNAME=commit HG_HOOKTYPE=commit HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
+  commit.b hook: HG_HOOKNAME=commit.b HG_HOOKTYPE=commit HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
 
 test generic hooks
 
   $ hg id
-  pre-identify hook: HG_ARGS=id HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
+  pre-identify hook: HG_ARGS=id HG_HOOKNAME=pre-identify HG_HOOKTYPE=pre-identify HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
   abort: pre-identify hook exited with status 1
   [255]
   $ hg cat b
-  pre-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
+  pre-cat hook: HG_ARGS=cat b HG_HOOKNAME=pre-cat HG_HOOKTYPE=pre-cat HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
   b
-  post-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
+  post-cat hook: HG_ARGS=cat b HG_HOOKNAME=post-cat HG_HOOKTYPE=post-cat HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
 
   $ cd ../b
   $ hg pull ../a
   pulling from ../a
   searching for changes
-  prechangegroup hook: HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
+  prechangegroup hook: HG_HOOKNAME=prechangegroup HG_HOOKTYPE=prechangegroup HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/a
   adding changesets
   adding manifests
   adding file changes
   added 3 changesets with 2 changes to 2 files
-  changegroup hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_NODE_LAST=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
-  incoming hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
-  incoming hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
-  incoming hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_NODE_LAST=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/a
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/a
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/a
+  incoming hook: HG_HOOKNAME=incoming HG_HOOKTYPE=incoming HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/a
   (run 'hg update' to get a working copy)
 
 tag hooks can see env vars
@@ -126,19 +126,19 @@
   > tag = sh -c "HG_PARENT1= HG_PARENT2= printenv.py tag"
   > EOF
   $ hg tag -d '3 0' a
-  pretag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
-  precommit hook: HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
-  pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  pretxncommit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
+  pretag hook: HG_HOOKNAME=pretag HG_HOOKTYPE=pretag HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
+  precommit hook: HG_HOOKNAME=precommit HG_HOOKTYPE=precommit HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
+  pretxnopen hook: HG_HOOKNAME=pretxnopen HG_HOOKTYPE=pretxnopen HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  pretxncommit hook: HG_HOOKNAME=pretxncommit HG_HOOKTYPE=pretxncommit HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
   4:539e4b31b6dc
-  pretxnclose hook: HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
-  txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  commit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
-  commit.b hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
+  pretxnclose hook: HG_HOOKNAME=pretxnclose HG_HOOKTYPE=pretxnclose HG_PENDING=$TESTTMP/a HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  tag hook: HG_HOOKNAME=tag HG_HOOKTYPE=tag HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
+  txnclose hook: HG_HOOKNAME=txnclose HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  commit hook: HG_HOOKNAME=commit HG_HOOKTYPE=commit HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
+  commit.b hook: HG_HOOKNAME=commit.b HG_HOOKTYPE=commit HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
   $ hg tag -l la
-  pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
-  tag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
+  pretag hook: HG_HOOKNAME=pretag HG_HOOKTYPE=pretag HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
+  tag hook: HG_HOOKNAME=tag HG_HOOKTYPE=tag HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
 
 pretag hook can forbid tagging
 
@@ -146,13 +146,13 @@
   > pretag.forbid = sh -c "printenv.py pretag.forbid 1"
   > EOF
   $ hg tag -d '4 0' fa
-  pretag hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
-  pretag.forbid hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
+  pretag hook: HG_HOOKNAME=pretag HG_HOOKTYPE=pretag HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
+  pretag.forbid hook: HG_HOOKNAME=pretag.forbid HG_HOOKTYPE=pretag HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
   abort: pretag.forbid hook exited with status 1
   [255]
   $ hg tag -l fla
-  pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
-  pretag.forbid hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
+  pretag hook: HG_HOOKNAME=pretag HG_HOOKTYPE=pretag HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
+  pretag.forbid hook: HG_HOOKNAME=pretag.forbid HG_HOOKTYPE=pretag HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
   abort: pretag.forbid hook exited with status 1
   [255]
 
@@ -168,15 +168,15 @@
   $ hg -q tip
   4:539e4b31b6dc
   $ hg commit -m 'fail' -d '4 0'
-  precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
-  pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
-  pretxncommit hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
+  precommit hook: HG_HOOKNAME=precommit HG_HOOKTYPE=precommit HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
+  pretxnopen hook: HG_HOOKNAME=pretxnopen HG_HOOKTYPE=pretxnopen HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
+  pretxncommit hook: HG_HOOKNAME=pretxncommit HG_HOOKTYPE=pretxncommit HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
   5:6f611f8018c1
   5:6f611f8018c1
-  pretxncommit.forbid hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
+  pretxncommit.forbid hook: HG_HOOKNAME=pretxncommit.forbid1 HG_HOOKTYPE=pretxncommit HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
   transaction abort!
   txnabort python hook: txnid,txnname
-  txnabort hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
+  txnabort hook: HG_HOOKNAME=txnabort.1 HG_HOOKTYPE=txnabort HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
   rollback completed
   abort: pretxncommit.forbid1 hook exited with status 1
   [255]
@@ -204,8 +204,8 @@
   > precommit.forbid = sh -c "printenv.py precommit.forbid 1"
   > EOF
   $ hg commit -m 'fail' -d '4 0'
-  precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
-  precommit.forbid hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
+  precommit hook: HG_HOOKNAME=precommit HG_HOOKTYPE=precommit HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
+  precommit.forbid hook: HG_HOOKNAME=precommit.forbid HG_HOOKTYPE=precommit HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
   abort: precommit.forbid hook exited with status 1
   [255]
   $ hg -q tip
@@ -217,7 +217,7 @@
   > preupdate = sh -c "printenv.py preupdate"
   > EOF
   $ hg update 1
-  preupdate hook: HG_PARENT1=ab228980c14d
+  preupdate hook: HG_HOOKNAME=preupdate HG_HOOKTYPE=preupdate HG_PARENT1=ab228980c14d
   0 files updated, 0 files merged, 2 files removed, 0 files unresolved
 
 update hook
@@ -226,8 +226,8 @@
   > update = sh -c "printenv.py update"
   > EOF
   $ hg update
-  preupdate hook: HG_PARENT1=539e4b31b6dc
-  update hook: HG_ERROR=0 HG_PARENT1=539e4b31b6dc
+  preupdate hook: HG_HOOKNAME=preupdate HG_HOOKTYPE=preupdate HG_PARENT1=539e4b31b6dc
+  update hook: HG_ERROR=0 HG_HOOKNAME=update HG_HOOKTYPE=update HG_PARENT1=539e4b31b6dc
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
 pushkey hook
@@ -241,10 +241,10 @@
   pushing to ../a
   searching for changes
   no changes found
-  pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=push (glob)
-  pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_PENDING=$TESTTMP/a HG_SOURCE=push HG_TXNID=TXN:* HG_TXNNAME=push HG_URL=file:$TESTTMP/a (glob)
-  pushkey hook: HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_SOURCE=push HG_TXNID=TXN:* HG_TXNNAME=push HG_URL=file:$TESTTMP/a (glob)
+  pretxnopen hook: HG_HOOKNAME=pretxnopen HG_HOOKTYPE=pretxnopen HG_TXNID=TXN:$ID$ HG_TXNNAME=push
+  pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=pretxnclose HG_HOOKTYPE=pretxnclose HG_PENDING=$TESTTMP/a HG_SOURCE=push HG_TXNID=TXN:$ID$ HG_TXNNAME=push HG_URL=file:$TESTTMP/a
+  pushkey hook: HG_HOOKNAME=pushkey HG_HOOKTYPE=pushkey HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose HG_HOOKTYPE=txnclose HG_SOURCE=push HG_TXNID=TXN:$ID$ HG_TXNNAME=push HG_URL=file:$TESTTMP/a
   exporting bookmark foo
   [1]
   $ cd ../a
@@ -255,15 +255,15 @@
   > listkeys = sh -c "printenv.py listkeys"
   > EOF
   $ hg bookmark -r null bar
-  pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
-  pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  pretxnopen hook: HG_HOOKNAME=pretxnopen HG_HOOKTYPE=pretxnopen HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
+  pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=pretxnclose HG_HOOKTYPE=pretxnclose HG_PENDING=$TESTTMP/a HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ cd ../b
   $ hg pull -B bar ../a
   pulling from ../a
-  listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
+  listkeys hook: HG_HOOKNAME=listkeys HG_HOOKTYPE=listkeys HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
   no changes found
-  listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
+  listkeys hook: HG_HOOKNAME=listkeys HG_HOOKTYPE=listkeys HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
   adding remote bookmark bar
   $ cd ../a
 
@@ -277,11 +277,11 @@
   $ hg push -B baz ../a
   pushing to ../a
   searching for changes
-  listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
-  listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
+  listkeys hook: HG_HOOKNAME=listkeys HG_HOOKTYPE=listkeys HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
+  listkeys hook: HG_HOOKNAME=listkeys HG_HOOKTYPE=listkeys HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
   no changes found
-  pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=push (glob)
-  prepushkey.forbid hook: HG_BUNDLE2=1 HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_SOURCE=push HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
+  pretxnopen hook: HG_HOOKNAME=pretxnopen HG_HOOKTYPE=pretxnopen HG_TXNID=TXN:$ID$ HG_TXNNAME=push
+  prepushkey.forbid hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_SOURCE=push HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/a
   pushkey-abort: prepushkey hook exited with status 1
   abort: exporting bookmark baz failed!
   [255]
@@ -293,13 +293,13 @@
   > prelistkeys = sh -c "printenv.py prelistkeys.forbid 1"
   > EOF
   $ hg bookmark -r null quux
-  pretxnopen hook: HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
-  pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_PENDING=$TESTTMP/a HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
-  txnclose hook: HG_BOOKMARK_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=bookmark (glob)
+  pretxnopen hook: HG_HOOKNAME=pretxnopen HG_HOOKTYPE=pretxnopen HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
+  pretxnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=pretxnclose HG_HOOKTYPE=pretxnclose HG_PENDING=$TESTTMP/a HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
+  txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
   $ cd ../b
   $ hg pull -B quux ../a
   pulling from ../a
-  prelistkeys.forbid hook: HG_NAMESPACE=bookmarks
+  prelistkeys.forbid hook: HG_HOOKNAME=prelistkeys HG_HOOKTYPE=prelistkeys HG_NAMESPACE=bookmarks
   abort: prelistkeys hook exited with status 1
   [255]
   $ cd ../a
@@ -317,7 +317,7 @@
   $ hg pull ../a
   pulling from ../a
   searching for changes
-  prechangegroup.forbid hook: HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
+  prechangegroup.forbid hook: HG_HOOKNAME=prechangegroup.forbid HG_HOOKTYPE=prechangegroup HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/a
   abort: prechangegroup.forbid hook exited with status 1
   [255]
 
@@ -337,7 +337,7 @@
   adding file changes
   added 1 changesets with 1 changes to 1 files
   4:539e4b31b6dc
-  pretxnchangegroup.forbid hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_NODE_LAST=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=file:$TESTTMP/a (glob)
+  pretxnchangegroup.forbid hook: HG_HOOKNAME=pretxnchangegroup.forbid1 HG_HOOKTYPE=pretxnchangegroup HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_NODE_LAST=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=file:$TESTTMP/a
   transaction abort!
   rollback completed
   abort: pretxnchangegroup.forbid1 hook exited with status 1
@@ -356,8 +356,8 @@
   $ hg pull ../a
   pulling from ../a
   searching for changes
-  preoutgoing hook: HG_SOURCE=pull
-  outgoing hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
+  preoutgoing hook: HG_HOOKNAME=preoutgoing HG_HOOKTYPE=preoutgoing HG_SOURCE=pull
+  outgoing hook: HG_HOOKNAME=outgoing HG_HOOKTYPE=outgoing HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
   adding changesets
   adding manifests
   adding file changes
@@ -375,8 +375,8 @@
   $ hg pull ../a
   pulling from ../a
   searching for changes
-  preoutgoing hook: HG_SOURCE=pull
-  preoutgoing.forbid hook: HG_SOURCE=pull
+  preoutgoing hook: HG_HOOKNAME=preoutgoing HG_HOOKTYPE=preoutgoing HG_SOURCE=pull
+  preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid HG_HOOKTYPE=preoutgoing HG_SOURCE=pull
   abort: preoutgoing.forbid hook exited with status 1
   [255]
 
@@ -389,8 +389,8 @@
   > outgoing = sh -c "printenv.py outgoing"
   > EOF
   $ hg clone a c
-  preoutgoing hook: HG_SOURCE=clone
-  outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
+  preoutgoing hook: HG_HOOKNAME=preoutgoing HG_HOOKTYPE=preoutgoing HG_SOURCE=clone
+  outgoing hook: HG_HOOKNAME=outgoing HG_HOOKTYPE=outgoing HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
   updating to branch default
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ rm -rf c
@@ -401,8 +401,8 @@
   > preoutgoing.forbid = sh -c "printenv.py preoutgoing.forbid 1"
   > EOF
   $ hg clone a zzz
-  preoutgoing hook: HG_SOURCE=clone
-  preoutgoing.forbid hook: HG_SOURCE=clone
+  preoutgoing hook: HG_HOOKNAME=preoutgoing HG_HOOKTYPE=preoutgoing HG_SOURCE=clone
+  preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid HG_HOOKTYPE=preoutgoing HG_SOURCE=clone
   abort: preoutgoing.forbid hook exited with status 1
   [255]
 
@@ -775,7 +775,7 @@
   > post-init = sh -c "printenv.py post-init"
   > EOF
   $ HGRCPATH=hgrc-with-post-init-hook hg init to
-  post-init hook: HG_ARGS=init to HG_OPTS={'insecure': None, 'remotecmd': '', 'ssh': ''} HG_PATS=['to'] HG_RESULT=0
+  post-init hook: HG_ARGS=init to HG_HOOKNAME=post-init HG_HOOKTYPE=post-init HG_OPTS={'insecure': None, 'remotecmd': '', 'ssh': ''} HG_PATS=['to'] HG_RESULT=0
 
 new commits must be visible in pretxnchangegroup (issue3428)
 
@@ -832,6 +832,50 @@
   [1]
   $ cd ..
 
+check whether HG_PENDING makes pending changes only in related
+repositories visible to an external hook.
+
+(emulate a transaction running concurrently by copied
+.hg/store/00changelog.i.a in subsequent test)
+
+  $ cat > $TESTTMP/savepending.sh <<EOF
+  > cp .hg/store/00changelog.i.a  .hg/store/00changelog.i.a.saved
+  > exit 1 # to avoid adding new revision for subsequent tests
+  > EOF
+  $ cd a
+  $ hg tip -q
+  4:539e4b31b6dc
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" commit -m "invisible"
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+  $ cp .hg/store/00changelog.i.a.saved .hg/store/00changelog.i.a
+
+(check (in)visibility of new changeset while transaction running in
+repo)
+
+  $ cat > $TESTTMP/checkpending.sh <<EOF
+  > echo '@a'
+  > hg -R "$TESTTMP/a" tip -q
+  > echo '@a/nested'
+  > hg -R "$TESTTMP/a/nested" tip -q
+  > exit 1 # to avoid adding new revision for subsequent tests
+  > EOF
+  $ hg init nested
+  $ cd nested
+  $ echo a > a
+  $ hg add a
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" commit -m '#0'
+  @a
+  4:539e4b31b6dc
+  @a/nested
+  0:bf5e395ced2c
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+
 Hook from untrusted hgrc are reported as failure
 ================================================
 
@@ -860,7 +904,7 @@
   > txnclose.testing=echo txnclose hook called
   > EOF
   $ touch a && hg commit -Aqm a
-  warning: untrusted hook txnclose not executed
+  warning: untrusted hook txnclose.testing not executed
   $ hg log
   changeset:   0:3903775176ed
   tag:         tip
@@ -879,7 +923,7 @@
   $ touch b && hg commit -Aqm a
   transaction abort!
   rollback completed
-  abort: untrusted hook pretxnclose not executed
+  abort: untrusted hook pretxnclose.testing not executed
   (see 'hg help config.trusted')
   [255]
   $ hg log
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-http-bad-server.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,896 @@
+#require killdaemons serve zstd
+
+Client version is embedded in HTTP request and is effectively dynamic. Pin the
+version so behavior is deterministic.
+
+  $ cat > fakeversion.py << EOF
+  > from mercurial import util
+  > util.version = lambda: '4.2'
+  > EOF
+
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > fakeversion = `pwd`/fakeversion.py
+  > EOF
+
+  $ hg init server0
+  $ cd server0
+  $ touch foo
+  $ hg -q commit -A -m initial
+
+Also disable compression because zstd is optional and causes output to vary
+and because debugging partial responses is hard when compression is involved
+
+  $ cat > .hg/hgrc << EOF
+  > [extensions]
+  > badserver = $TESTDIR/badserverext.py
+  > [server]
+  > compressionengines = none
+  > EOF
+
+Failure to accept() socket should result in connection related error message
+
+  $ hg --config badserver.closebeforeaccept=true serve -p $HGPORT -d --pid-file=hg.pid
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  abort: error: Connection reset by peer (no-windows !)
+  abort: error: An existing connection was forcibly closed by the remote host (windows !)
+  [255]
+
+(The server exits on its own, but there is a race between that and starting a new server.
+So ensure the process is dead.)
+
+  $ killdaemons.py $DAEMON_PIDS
+
+Failure immediately after accept() should yield connection related error message
+
+  $ hg --config badserver.closeafteraccept=true serve -p $HGPORT -d --pid-file=hg.pid
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  abort: error: Connection reset by peer (no-windows !)
+  abort: error: An existing connection was forcibly closed by the remote host (windows !)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+Failure to read all bytes in initial HTTP request should yield connection related error message
+
+  $ hg --config badserver.closeafterrecvbytes=1 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+TODO this error message is not very good
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  abort: error: ''
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(1 from 65537) -> (1) G
+  read limit reached; closing socket
+
+  $ rm -f error.log
+
+Same failure, but server reads full HTTP request line
+
+  $ hg --config badserver.closeafterrecvbytes=40 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+  $ hg clone http://localhost:$HGPORT/ clone
+  abort: error: ''
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(40 from 65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+  readline(7 from -1) -> (7) Accept-
+  read limit reached; closing socket
+
+  $ rm -f error.log
+
+Failure on subsequent HTTP request on the same socket (cmd?batch)
+
+  $ hg --config badserver.closeafterrecvbytes=210 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+  $ hg clone http://localhost:$HGPORT/ clone
+  abort: error: ''
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(210 from 65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+  readline(177 from -1) -> (27) Accept-Encoding: identity\r\n
+  readline(150 from -1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(115 from -1) -> (23) host: localhost:$HGPORT\r\n
+  readline(92 from -1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(43 from -1) -> (2) \r\n
+  write(36) -> HTTP/1.1 200 Script output follows\r\n
+  write(23) -> Server: badhttpserver\r\n
+  write(37) -> Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41) -> Content-Type: application/mercurial-0.1\r\n
+  write(21) -> Content-Length: 405\r\n
+  write(2) -> \r\n
+  write(405) -> lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+  readline(41 from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+  readline(15 from -1) -> (15) Accept-Encoding
+  read limit reached; closing socket
+  readline(210 from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+  readline(184 from -1) -> (27) Accept-Encoding: identity\r\n
+  readline(157 from -1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(128 from -1) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n
+  readline(87 from -1) -> (48) x-hgproto-1: 0.1 0.2 comp=zstd,zlib,none,bzip2\r\n
+  readline(39 from -1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(4 from -1) -> (4) host
+  read limit reached; closing socket
+
+  $ rm -f error.log
+
+Failure to read getbundle HTTP request
+
+  $ hg --config badserver.closeafterrecvbytes=292 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  abort: error: ''
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(292 from 65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+  readline(259 from -1) -> (27) Accept-Encoding: identity\r\n
+  readline(232 from -1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(197 from -1) -> (23) host: localhost:$HGPORT\r\n
+  readline(174 from -1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(125 from -1) -> (2) \r\n
+  write(36) -> HTTP/1.1 200 Script output follows\r\n
+  write(23) -> Server: badhttpserver\r\n
+  write(37) -> Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41) -> Content-Type: application/mercurial-0.1\r\n
+  write(21) -> Content-Length: 405\r\n
+  write(2) -> \r\n
+  write(405) -> lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+  readline(123 from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+  readline(97 from -1) -> (27) Accept-Encoding: identity\r\n
+  readline(70 from -1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(41 from -1) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n
+  read limit reached; closing socket
+  readline(292 from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+  readline(266 from -1) -> (27) Accept-Encoding: identity\r\n
+  readline(239 from -1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(210 from -1) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n
+  readline(169 from -1) -> (48) x-hgproto-1: 0.1 0.2 comp=zstd,zlib,none,bzip2\r\n
+  readline(121 from -1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(86 from -1) -> (23) host: localhost:$HGPORT\r\n
+  readline(63 from -1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(14 from -1) -> (2) \r\n
+  write(36) -> HTTP/1.1 200 Script output follows\r\n
+  write(23) -> Server: badhttpserver\r\n
+  write(37) -> Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41) -> Content-Type: application/mercurial-0.1\r\n
+  write(20) -> Content-Length: 42\r\n
+  write(2) -> \r\n
+  write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
+  readline(12 from 65537) -> (12) GET /?cmd=ge
+  read limit reached; closing socket
+  readline(292 from 65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
+  readline(262 from -1) -> (27) Accept-Encoding: identity\r\n
+  readline(235 from -1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(206 from -1) -> (206) x-hgarg-1: bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Ali
+  read limit reached; closing socket
+
+  $ rm -f error.log
+
+Now do a variation using POST to send arguments
+
+  $ hg --config experimental.httppostargs=true --config badserver.closeafterrecvbytes=315 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  abort: error: ''
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(315 from 65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+  readline(282 from -1) -> (27) Accept-Encoding: identity\r\n
+  readline(255 from -1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(220 from -1) -> (23) host: localhost:$HGPORT\r\n
+  readline(197 from -1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(148 from -1) -> (2) \r\n
+  write(36) -> HTTP/1.1 200 Script output follows\r\n
+  write(23) -> Server: badhttpserver\r\n
+  write(37) -> Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41) -> Content-Type: application/mercurial-0.1\r\n
+  write(21) -> Content-Length: 418\r\n
+  write(2) -> \r\n
+  write(418) -> lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httppostargs httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+  readline(146 from 65537) -> (27) POST /?cmd=batch HTTP/1.1\r\n
+  readline(119 from -1) -> (27) Accept-Encoding: identity\r\n
+  readline(92 from -1) -> (41) content-type: application/mercurial-0.1\r\n
+  readline(51 from -1) -> (19) vary: X-HgProto-1\r\n
+  readline(32 from -1) -> (19) x-hgargs-post: 28\r\n
+  readline(13 from -1) -> (13) x-hgproto-1: 
+  read limit reached; closing socket
+  readline(315 from 65537) -> (27) POST /?cmd=batch HTTP/1.1\r\n
+  readline(288 from -1) -> (27) Accept-Encoding: identity\r\n
+  readline(261 from -1) -> (41) content-type: application/mercurial-0.1\r\n
+  readline(220 from -1) -> (19) vary: X-HgProto-1\r\n
+  readline(201 from -1) -> (19) x-hgargs-post: 28\r\n
+  readline(182 from -1) -> (48) x-hgproto-1: 0.1 0.2 comp=zstd,zlib,none,bzip2\r\n
+  readline(134 from -1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(99 from -1) -> (20) content-length: 28\r\n
+  readline(79 from -1) -> (23) host: localhost:$HGPORT\r\n
+  readline(56 from -1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(7 from -1) -> (2) \r\n
+  read(5 from 28) -> (5) cmds=
+  read limit reached, closing socket
+  write(36) -> HTTP/1.1 500 Internal Server Error\r\n
+
+  $ rm -f error.log
+
+Now move on to partial server responses
+
+Server sends a single character from the HTTP response line
+
+  $ hg --config badserver.closeaftersendbytes=1 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  abort: error: H
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(1 from 36) -> (0) H
+  write limit reached; closing socket
+  write(36) -> HTTP/1.1 500 Internal Server Error\r\n
+
+  $ rm -f error.log
+
+Server sends an incomplete capabilities response body
+
+  $ hg --config badserver.closeaftersendbytes=180 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  abort: HTTP request error (incomplete response; expected 385 bytes got 20)
+  (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (144) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (121) Server: badhttpserver\r\n
+  write(37 from 37) -> (84) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (43) Content-Type: application/mercurial-0.1\r\n
+  write(21 from 21) -> (22) Content-Length: 405\r\n
+  write(2 from 2) -> (20) \r\n
+  write(20 from 405) -> (0) lookup changegroupsu
+  write limit reached; closing socket
+
+  $ rm -f error.log
+
+Server sends incomplete headers for batch request
+
+  $ hg --config badserver.closeaftersendbytes=695 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+TODO this output is horrible
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  abort: 'http://localhost:$HGPORT/' does not appear to be an hg repository:
+  ---%<--- (application/mercuria)
+  
+  ---%<---
+  !
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (659) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (636) Server: badhttpserver\r\n
+  write(37 from 37) -> (599) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (558) Content-Type: application/mercurial-0.1\r\n
+  write(21 from 21) -> (537) Content-Length: 405\r\n
+  write(2 from 2) -> (535) \r\n
+  write(405 from 405) -> (130) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+  readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(-1) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n
+  readline(-1) -> (48) x-hgproto-1: 0.1 0.2 comp=zstd,zlib,none,bzip2\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (94) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (71) Server: badhttpserver\r\n
+  write(37 from 37) -> (34) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(34 from 41) -> (0) Content-Type: application/mercuria
+  write limit reached; closing socket
+  write(36) -> HTTP/1.1 500 Internal Server Error\r\n
+
+  $ rm -f error.log
+
+Server sends an incomplete HTTP response body to batch request
+
+  $ hg --config badserver.closeaftersendbytes=760 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+TODO client spews a stack due to uncaught ValueError in batch.results()
+  $ hg clone http://localhost:$HGPORT/ clone 2> /dev/null
+  [1]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (724) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (701) Server: badhttpserver\r\n
+  write(37 from 37) -> (664) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (623) Content-Type: application/mercurial-0.1\r\n
+  write(21 from 21) -> (602) Content-Length: 405\r\n
+  write(2 from 2) -> (600) \r\n
+  write(405 from 405) -> (195) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+  readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(-1) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n
+  readline(-1) -> (48) x-hgproto-1: 0.1 0.2 comp=zstd,zlib,none,bzip2\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (159) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (136) Server: badhttpserver\r\n
+  write(37 from 37) -> (99) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (58) Content-Type: application/mercurial-0.1\r\n
+  write(20 from 20) -> (38) Content-Length: 42\r\n
+  write(2 from 2) -> (36) \r\n
+  write(36 from 42) -> (0) 96ee1d7354c4ad7372047672c36a1f561e3a
+  write limit reached; closing socket
+
+  $ rm -f error.log
+
+Server sends incomplete headers for getbundle response
+
+  $ hg --config badserver.closeaftersendbytes=895 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+TODO this output is terrible
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  abort: 'http://localhost:$HGPORT/' does not appear to be an hg repository:
+  ---%<--- (application/mercuri)
+  
+  ---%<---
+  !
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (859) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (836) Server: badhttpserver\r\n
+  write(37 from 37) -> (799) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (758) Content-Type: application/mercurial-0.1\r\n
+  write(21 from 21) -> (737) Content-Length: 405\r\n
+  write(2 from 2) -> (735) \r\n
+  write(405 from 405) -> (330) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+  readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(-1) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n
+  readline(-1) -> (48) x-hgproto-1: 0.1 0.2 comp=zstd,zlib,none,bzip2\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (294) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (271) Server: badhttpserver\r\n
+  write(37 from 37) -> (234) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (193) Content-Type: application/mercurial-0.1\r\n
+  write(20 from 20) -> (173) Content-Length: 42\r\n
+  write(2 from 2) -> (171) \r\n
+  write(42 from 42) -> (129) 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
+  readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(-1) -> (396) x-hgarg-1: bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n
+  readline(-1) -> (48) x-hgproto-1: 0.1 0.2 comp=zstd,zlib,none,bzip2\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (93) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (70) Server: badhttpserver\r\n
+  write(37 from 37) -> (33) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(33 from 41) -> (0) Content-Type: application/mercuri
+  write limit reached; closing socket
+  write(36) -> HTTP/1.1 500 Internal Server Error\r\n
+
+  $ rm -f error.log
+
+Server sends empty HTTP body for getbundle
+
+  $ hg --config badserver.closeaftersendbytes=933 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  abort: HTTP request error (incomplete response)
+  (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (897) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (874) Server: badhttpserver\r\n
+  write(37 from 37) -> (837) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (796) Content-Type: application/mercurial-0.1\r\n
+  write(21 from 21) -> (775) Content-Length: 405\r\n
+  write(2 from 2) -> (773) \r\n
+  write(405 from 405) -> (368) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+  readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(-1) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n
+  readline(-1) -> (48) x-hgproto-1: 0.1 0.2 comp=zstd,zlib,none,bzip2\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (332) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (309) Server: badhttpserver\r\n
+  write(37 from 37) -> (272) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (231) Content-Type: application/mercurial-0.1\r\n
+  write(20 from 20) -> (211) Content-Length: 42\r\n
+  write(2 from 2) -> (209) \r\n
+  write(42 from 42) -> (167) 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
+  readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(-1) -> (396) x-hgarg-1: bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n
+  readline(-1) -> (48) x-hgproto-1: 0.1 0.2 comp=zstd,zlib,none,bzip2\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (131) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (108) Server: badhttpserver\r\n
+  write(37 from 37) -> (71) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (30) Content-Type: application/mercurial-0.2\r\n
+  write(28 from 28) -> (2) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (0) \r\n
+  write limit reached; closing socket
+  write(36) -> HTTP/1.1 500 Internal Server Error\r\n
+
+  $ rm -f error.log
+
+Server sends partial compression string
+
+  $ hg --config badserver.closeaftersendbytes=945 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  abort: HTTP request error (incomplete response; expected 1 bytes got 3)
+  (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ cat error.log
+  readline(65537) -> (33) GET /?cmd=capabilities HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (909) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (886) Server: badhttpserver\r\n
+  write(37 from 37) -> (849) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (808) Content-Type: application/mercurial-0.1\r\n
+  write(21 from 21) -> (787) Content-Length: 405\r\n
+  write(2 from 2) -> (785) \r\n
+  write(405 from 405) -> (380) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none
+  readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(-1) -> (41) x-hgarg-1: cmds=heads+%3Bknown+nodes%3D\r\n
+  readline(-1) -> (48) x-hgproto-1: 0.1 0.2 comp=zstd,zlib,none,bzip2\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (344) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (321) Server: badhttpserver\r\n
+  write(37 from 37) -> (284) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (243) Content-Type: application/mercurial-0.1\r\n
+  write(20 from 20) -> (223) Content-Length: 42\r\n
+  write(2 from 2) -> (221) \r\n
+  write(42 from 42) -> (179) 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n;
+  readline(65537) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
+  readline(-1) -> (27) Accept-Encoding: identity\r\n
+  readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
+  readline(-1) -> (396) x-hgarg-1: bundlecaps=HG20%2Cbundle2%3DHG20%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=1&common=0000000000000000000000000000000000000000&heads=96ee1d7354c4ad7372047672c36a1f561e3a6a4c&listkeys=phases%2Cbookmarks\r\n
+  readline(-1) -> (48) x-hgproto-1: 0.1 0.2 comp=zstd,zlib,none,bzip2\r\n
+  readline(-1) -> (35) accept: application/mercurial-0.1\r\n
+  readline(-1) -> (23) host: localhost:$HGPORT\r\n
+  readline(-1) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n
+  readline(-1) -> (2) \r\n
+  write(36 from 36) -> (143) HTTP/1.1 200 Script output follows\r\n
+  write(23 from 23) -> (120) Server: badhttpserver\r\n
+  write(37 from 37) -> (83) Date: Fri, 14 Apr 2017 00:00:00 GMT\r\n
+  write(41 from 41) -> (42) Content-Type: application/mercurial-0.2\r\n
+  write(28 from 28) -> (14) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (12) \r\n
+  write(6 from 6) -> (6) 1\\r\\n\x04\\r\\n (esc)
+  write(6 from 9) -> (0) 4\r\nnon
+  write limit reached; closing socket
+  write(27) -> 15\r\nInternal Server Error\r\n
+
+  $ rm -f error.log
+
+Server sends partial bundle2 header magic
+
+  $ hg --config badserver.closeaftersendbytes=954 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  abort: HTTP request error (incomplete response; expected 1 bytes got 3)
+  (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ tail -7 error.log
+  write(28 from 28) -> (23) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (21) \r\n
+  write(6 from 6) -> (15) 1\\r\\n\x04\\r\\n (esc)
+  write(9 from 9) -> (6) 4\r\nnone\r\n
+  write(6 from 9) -> (0) 4\r\nHG2
+  write limit reached; closing socket
+  write(27) -> 15\r\nInternal Server Error\r\n
+
+  $ rm -f error.log
+
+Server sends incomplete bundle2 stream params length
+
+  $ hg --config badserver.closeaftersendbytes=963 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  abort: HTTP request error (incomplete response; expected 1 bytes got 3)
+  (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ tail -8 error.log
+  write(28 from 28) -> (32) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (30) \r\n
+  write(6 from 6) -> (24) 1\\r\\n\x04\\r\\n (esc)
+  write(9 from 9) -> (15) 4\r\nnone\r\n
+  write(9 from 9) -> (6) 4\r\nHG20\r\n
+  write(6 from 9) -> (0) 4\\r\\n\x00\x00\x00 (esc)
+  write limit reached; closing socket
+  write(27) -> 15\r\nInternal Server Error\r\n
+
+  $ rm -f error.log
+
+Servers stops after bundle2 stream params header
+
+  $ hg --config badserver.closeaftersendbytes=966 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  abort: HTTP request error (incomplete response)
+  (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ tail -8 error.log
+  write(28 from 28) -> (35) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (33) \r\n
+  write(6 from 6) -> (27) 1\\r\\n\x04\\r\\n (esc)
+  write(9 from 9) -> (18) 4\r\nnone\r\n
+  write(9 from 9) -> (9) 4\r\nHG20\r\n
+  write(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write limit reached; closing socket
+  write(27) -> 15\r\nInternal Server Error\r\n
+
+  $ rm -f error.log
+
+Server stops sending after bundle2 part header length
+
+  $ hg --config badserver.closeaftersendbytes=975 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  abort: HTTP request error (incomplete response)
+  (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ tail -9 error.log
+  write(28 from 28) -> (44) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (42) \r\n
+  write(6 from 6) -> (36) 1\\r\\n\x04\\r\\n (esc)
+  write(9 from 9) -> (27) 4\r\nnone\r\n
+  write(9 from 9) -> (18) 4\r\nHG20\r\n
+  write(9 from 9) -> (9) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (0) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+  write limit reached; closing socket
+  write(27) -> 15\r\nInternal Server Error\r\n
+
+  $ rm -f error.log
+
+Server stops sending after bundle2 part header
+
+  $ hg --config badserver.closeaftersendbytes=1022 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  adding changesets
+  transaction abort!
+  rollback completed
+  abort: HTTP request error (incomplete response)
+  (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ tail -10 error.log
+  write(28 from 28) -> (91) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (89) \r\n
+  write(6 from 6) -> (83) 1\\r\\n\x04\\r\\n (esc)
+  write(9 from 9) -> (74) 4\r\nnone\r\n
+  write(9 from 9) -> (65) 4\r\nHG20\r\n
+  write(9 from 9) -> (56) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (47) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+  write(47 from 47) -> (0) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
+  write limit reached; closing socket
+  write(27) -> 15\r\nInternal Server Error\r\n
+
+  $ rm -f error.log
+
+Server stops after bundle2 part payload chunk size
+
+  $ hg --config badserver.closeaftersendbytes=1031 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  adding changesets
+  transaction abort!
+  rollback completed
+  abort: HTTP request error (incomplete response)
+  (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ tail -11 error.log
+  write(28 from 28) -> (100) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (98) \r\n
+  write(6 from 6) -> (92) 1\\r\\n\x04\\r\\n (esc)
+  write(9 from 9) -> (83) 4\r\nnone\r\n
+  write(9 from 9) -> (74) 4\r\nHG20\r\n
+  write(9 from 9) -> (65) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (56) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+  write(47 from 47) -> (9) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
+  write(9 from 9) -> (0) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+  write limit reached; closing socket
+  write(27) -> 15\r\nInternal Server Error\r\n
+
+  $ rm -f error.log
+
+Server stops sending in middle of bundle2 payload chunk
+
+  $ hg --config badserver.closeaftersendbytes=1504 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  adding changesets
+  transaction abort!
+  rollback completed
+  abort: HTTP request error (incomplete response)
+  (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ tail -12 error.log
+  write(28 from 28) -> (573) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (571) \r\n
+  write(6 from 6) -> (565) 1\\r\\n\x04\\r\\n (esc)
+  write(9 from 9) -> (556) 4\r\nnone\r\n
+  write(9 from 9) -> (547) 4\r\nHG20\r\n
+  write(9 from 9) -> (538) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (529) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+  write(47 from 47) -> (482) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
+  write(9 from 9) -> (473) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+  write(473 from 473) -> (0) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+  write limit reached; closing socket
+  write(27) -> 15\r\nInternal Server Error\r\n
+
+  $ rm -f error.log
+
+Server stops sending after 0 length payload chunk size
+
+  $ hg --config badserver.closeaftersendbytes=1513 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  transaction abort!
+  rollback completed
+  abort: HTTP request error (incomplete response)
+  (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
+  [255]
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ tail -13 error.log
+  write(28 from 28) -> (582) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (580) \r\n
+  write(6 from 6) -> (574) 1\\r\\n\x04\\r\\n (esc)
+  write(9 from 9) -> (565) 4\r\nnone\r\n
+  write(9 from 9) -> (556) 4\r\nHG20\r\n
+  write(9 from 9) -> (547) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (538) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+  write(47 from 47) -> (491) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
+  write(9 from 9) -> (482) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+  write(473 from 473) -> (9) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write limit reached; closing socket
+  write(27) -> 15\r\nInternal Server Error\r\n
+
+  $ rm -f error.log
+
+Server stops sending after 0 part bundle part header (indicating end of bundle2 payload)
+This is before the 0 size chunked transfer part that signals end of HTTP response.
+
+  $ hg --config badserver.closeaftersendbytes=1710 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  updating to branch default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ tail -22 error.log
+  write(28 from 28) -> (779) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (777) \r\n
+  write(6 from 6) -> (771) 1\\r\\n\x04\\r\\n (esc)
+  write(9 from 9) -> (762) 4\r\nnone\r\n
+  write(9 from 9) -> (753) 4\r\nHG20\r\n
+  write(9 from 9) -> (744) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (735) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+  write(47 from 47) -> (688) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
+  write(9 from 9) -> (679) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+  write(473 from 473) -> (206) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (197) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (188) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+  write(38 from 38) -> (150) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00	\x06namespacephases\\r\\n (esc)
+  write(9 from 9) -> (141) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+  write(64 from 64) -> (77) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c	1\npublishing	True\r\n
+  write(9 from 9) -> (68) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (59) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+  write(41 from 41) -> (18) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00		namespacebookmarks\\r\\n (esc)
+  write(9 from 9) -> (9) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (0) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write limit reached; closing socket
+  write(27) -> 15\r\nInternal Server Error\r\n
+
+  $ rm -f error.log
+  $ rm -rf clone
+
+Server sends a size 0 chunked-transfer size without terminating \r\n
+
+  $ hg --config badserver.closeaftersendbytes=1713 serve -p $HGPORT -d --pid-file=hg.pid -E error.log
+  $ cat hg.pid > $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT/ clone
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  updating to branch default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ killdaemons.py $DAEMON_PIDS
+
+  $ tail -23 error.log
+  write(28 from 28) -> (782) Transfer-Encoding: chunked\r\n
+  write(2 from 2) -> (780) \r\n
+  write(6 from 6) -> (774) 1\\r\\n\x04\\r\\n (esc)
+  write(9 from 9) -> (765) 4\r\nnone\r\n
+  write(9 from 9) -> (756) 4\r\nHG20\r\n
+  write(9 from 9) -> (747) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (738) 4\\r\\n\x00\x00\x00)\\r\\n (esc)
+  write(47 from 47) -> (691) 29\\r\\n\x0bCHANGEGROUP\x00\x00\x00\x00\x01\x01\x07\x02	\x01version02nbchanges1\\r\\n (esc)
+  write(9 from 9) -> (682) 4\\r\\n\x00\x00\x01\xd2\\r\\n (esc)
+  write(473 from 473) -> (209) 1d2\\r\\n\x00\x00\x00\xb2\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>6a3df4de388f3c4f8e28f4f9a814299a3cbb5f50\\ntest\\n0 0\\nfoo\\n\\ninitial\x00\x00\x00\x00\x00\x00\x00\xa1j=\xf4\xde8\x8f<O\x8e(\xf4\xf9\xa8\x14)\x9a<\xbb_P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00b80de5d138758541c5f05265ad144ab9fa86d1db\\n\x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00h\xb8\\r\xe5\xd18u\x85A\xc5\xf0Re\xad\x14J\xb9\xfa\x86\xd1\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL\x00\x00\x00\x00\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (200) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (191) 4\\r\\n\x00\x00\x00 \\r\\n (esc)
+  write(38 from 38) -> (153) 20\\r\\n\x08LISTKEYS\x00\x00\x00\x01\x01\x00	\x06namespacephases\\r\\n (esc)
+  write(9 from 9) -> (144) 4\\r\\n\x00\x00\x00:\\r\\n (esc)
+  write(64 from 64) -> (80) 3a\r\n96ee1d7354c4ad7372047672c36a1f561e3a6a4c	1\npublishing	True\r\n
+  write(9 from 9) -> (71) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (62) 4\\r\\n\x00\x00\x00#\\r\\n (esc)
+  write(41 from 41) -> (21) 23\\r\\n\x08LISTKEYS\x00\x00\x00\x02\x01\x00		namespacebookmarks\\r\\n (esc)
+  write(9 from 9) -> (12) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(9 from 9) -> (3) 4\\r\\n\x00\x00\x00\x00\\r\\n (esc)
+  write(3 from 5) -> (0) 0\r\n
+  write limit reached; closing socket
+  write(27) -> 15\r\nInternal Server Error\r\n
+
+  $ rm -f error.log
+  $ rm -rf clone
--- a/tests/test-http-bundle1.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-http-bundle1.t	Tue Apr 18 12:24:34 2017 -0400
@@ -28,11 +28,11 @@
 
 #if windows
   $ hg serve -p $HGPORT1 2>&1
-  abort: cannot start server at ':$HGPORT1': * (glob)
+  abort: cannot start server at 'localhost:$HGPORT1': * (glob)
   [255]
 #else
   $ hg serve -p $HGPORT1 2>&1
-  abort: cannot start server at ':$HGPORT1': Address already in use
+  abort: cannot start server at 'localhost:$HGPORT1': Address already in use
   [255]
 #endif
   $ cd ..
@@ -138,7 +138,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  changegroup hook: HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=http://localhost:$HGPORT1/ (glob)
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=http://localhost:$HGPORT1/
   (run 'hg update' to get a working copy)
   $ cd ..
 
--- a/tests/test-http-protocol.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-http-protocol.t	Tue Apr 18 12:24:34 2017 -0400
@@ -16,9 +16,9 @@
 compression formats are advertised in compression capability
 
 #if zstd
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zstd,zlib$' > /dev/null
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zstd,zlib$' > /dev/null
 #else
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zlib$' > /dev/null
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zlib$' > /dev/null
 #endif
 
   $ killdaemons.py
@@ -27,7 +27,7 @@
 
   $ hg --config server.compressionengines=none -R server serve -p $HGPORT -d --pid-file hg.pid
   $ cat hg.pid > $DAEMON_PIDS
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none$' > /dev/null
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none$' > /dev/null
 
   $ killdaemons.py
 
@@ -35,7 +35,7 @@
 
   $ hg --config server.compressionengines=none,zlib -R server serve -p $HGPORT -d --pid-file hg.pid
   $ cat hg.pid > $DAEMON_PIDS
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none,zlib$' > /dev/null
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none,zlib$' > /dev/null
 
   $ killdaemons.py
 
@@ -46,7 +46,7 @@
 
 Server should send application/mercurial-0.1 to clients if no Accept is used
 
-  $ get-with-headers.py --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
   200 Script output follows
   content-type: application/mercurial-0.1
   date: * (glob)
@@ -55,7 +55,7 @@
 
 Server should send application/mercurial-0.1 when client says it wants it
 
-  $ get-with-headers.py --hgproto '0.1' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  $ get-with-headers.py --hgproto '0.1' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
   200 Script output follows
   content-type: application/mercurial-0.1
   date: * (glob)
@@ -64,14 +64,14 @@
 
 Server should send application/mercurial-0.2 when client says it wants it
 
-  $ get-with-headers.py --hgproto '0.2' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  $ get-with-headers.py --hgproto '0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
   200 Script output follows
   content-type: application/mercurial-0.2
   date: * (glob)
   server: * (glob)
   transfer-encoding: chunked
 
-  $ get-with-headers.py --hgproto '0.1 0.2' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  $ get-with-headers.py --hgproto '0.1 0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
   200 Script output follows
   content-type: application/mercurial-0.2
   date: * (glob)
@@ -80,7 +80,7 @@
 
 Requesting a compression format that server doesn't support results will fall back to 0.1
 
-  $ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
+  $ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
   200 Script output follows
   content-type: application/mercurial-0.1
   date: * (glob)
@@ -90,7 +90,7 @@
 #if zstd
 zstd is used if available
 
-  $ get-with-headers.py --hgproto '0.2 comp=zstd' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
+  $ get-with-headers.py --hgproto '0.2 comp=zstd' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
   $ f --size --hexdump --bytes 36 --sha1 resp
   resp: size=248, sha1=4d8d8f87fb82bd542ce52881fdc94f850748
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
@@ -101,7 +101,7 @@
 
 application/mercurial-0.2 is not yet used on non-streaming responses
 
-  $ get-with-headers.py --hgproto '0.2' 127.0.0.1:$HGPORT '?cmd=heads' -
+  $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=heads' -
   200 Script output follows
   content-length: 41
   content-type: application/mercurial-0.1
@@ -118,11 +118,11 @@
 
 No Accept will send 0.1+zlib, even though "none" is preferred b/c "none" isn't supported on 0.1
 
-  $ get-with-headers.py --headeronly 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' Content-Type
+  $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' Content-Type
   200 Script output follows
   content-type: application/mercurial-0.1
 
-  $ get-with-headers.py 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
   $ f --size --hexdump --bytes 28 --sha1 resp
   resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
@@ -130,7 +130,7 @@
 
 Explicit 0.1 will send zlib because "none" isn't supported on 0.1
 
-  $ get-with-headers.py --hgproto '0.1' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ get-with-headers.py --hgproto '0.1' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
   $ f --size --hexdump --bytes 28 --sha1 resp
   resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
@@ -139,7 +139,7 @@
 0.2 with no compression will get "none" because that is server's preference
 (spec says ZL and UN are implicitly supported)
 
-  $ get-with-headers.py --hgproto '0.2' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
   $ f --size --hexdump --bytes 32 --sha1 resp
   resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
@@ -147,7 +147,7 @@
 
 Client receives server preference even if local order doesn't match
 
-  $ get-with-headers.py --hgproto '0.2 comp=zlib,none' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ get-with-headers.py --hgproto '0.2 comp=zlib,none' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
   $ f --size --hexdump --bytes 32 --sha1 resp
   resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
@@ -155,7 +155,7 @@
 
 Client receives only supported format even if not server preferred format
 
-  $ get-with-headers.py --hgproto '0.2 comp=zlib' 127.0.0.1:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
+  $ get-with-headers.py --hgproto '0.2 comp=zlib' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000'  > resp
   $ f --size --hexdump --bytes 33 --sha1 resp
   resp: size=232, sha1=a1c727f0c9693ca15742a75c30419bc36
   0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
--- a/tests/test-http-proxy.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-http-proxy.t	Tue Apr 18 12:24:34 2017 -0400
@@ -87,7 +87,7 @@
 misconfigured hosts)
 
   $ http_proxy=localhost:$HGPORT2 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ f
-  abort: error: (Connection refused|Protocol not supported) (re)
+  abort: error: (Connection refused|Protocol not supported|.* actively refused it) (re)
   [255]
 
 do not use the proxy if it is in the no list
--- a/tests/test-http.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-http.t	Tue Apr 18 12:24:34 2017 -0400
@@ -1,4 +1,4 @@
-#require serve
+#require killdaemons serve
 
   $ hg init test
   $ cd test
@@ -19,11 +19,11 @@
 
 #if windows
   $ hg serve -p $HGPORT1 2>&1
-  abort: cannot start server at ':$HGPORT1': * (glob)
+  abort: cannot start server at 'localhost:$HGPORT1': * (glob)
   [255]
 #else
   $ hg serve -p $HGPORT1 2>&1
-  abort: cannot start server at ':$HGPORT1': Address already in use
+  abort: cannot start server at 'localhost:$HGPORT1': Address already in use
   [255]
 #endif
   $ cd ..
@@ -129,7 +129,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  changegroup hook: HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=http://localhost:$HGPORT1/ (glob)
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=http://localhost:$HGPORT1/
   (run 'hg update' to get a working copy)
   $ cd ..
 
@@ -333,3 +333,64 @@
   abort: pull failed on remote
   [255]
   $ cat error.log
+
+corrupt cookies file should yield a warning
+
+  $ cat > $TESTTMP/cookies.txt << EOF
+  > bad format
+  > EOF
+
+  $ hg --config auth.cookiefile=$TESTTMP/cookies.txt id http://localhost:$HGPORT/
+  (error loading cookie file $TESTTMP/cookies.txt: '*/cookies.txt' does not look like a Netscape format cookies file; continuing without cookies) (glob)
+  56f9bc90cce6
+
+  $ killdaemons.py
+
+Create dummy authentication handler that looks for cookies. It doesn't do anything
+useful. It just raises an HTTP 500 with details about the Cookie request header.
+We raise HTTP 500 because its message is printed in the abort message.
+
+  $ cat > cookieauth.py << EOF
+  > from mercurial import util
+  > from mercurial.hgweb import common
+  > def perform_authentication(hgweb, req, op):
+  >     cookie = req.env.get('HTTP_COOKIE')
+  >     if not cookie:
+  >         raise common.ErrorResponse(common.HTTP_SERVER_ERROR, 'no-cookie')
+  >     raise common.ErrorResponse(common.HTTP_SERVER_ERROR, 'Cookie: %s' % cookie)
+  > def extsetup():
+  >     common.permhooks.insert(0, perform_authentication)
+  > EOF
+
+  $ hg serve --config extensions.cookieauth=cookieauth.py -R test -p $HGPORT -d --pid-file=pid
+  $ cat pid > $DAEMON_PIDS
+
+Request without cookie sent should fail due to lack of cookie
+
+  $ hg id http://localhost:$HGPORT
+  abort: HTTP Error 500: no-cookie
+  [255]
+
+Populate a cookies file
+
+  $ cat > cookies.txt << EOF
+  > # HTTP Cookie File
+  > # Expiration is 2030-01-01 at midnight
+  > .example.com	TRUE	/	FALSE	1893456000	hgkey	examplevalue
+  > EOF
+
+Should not send a cookie for another domain
+
+  $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
+  abort: HTTP Error 500: no-cookie
+  [255]
+
+Add a cookie entry for our test server and verify it is sent
+
+  $ cat >> cookies.txt << EOF
+  > localhost.local	FALSE	/	FALSE	1893456000	hgkey	localhostvalue
+  > EOF
+
+  $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
+  abort: HTTP Error 500: Cookie: hgkey=localhostvalue
+  [255]
--- a/tests/test-https.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-https.t	Tue Apr 18 12:24:34 2017 -0400
@@ -36,11 +36,11 @@
 
 #if windows
   $ hg serve -p $HGPORT --certificate=$PRIV 2>&1
-  abort: cannot start server at ':$HGPORT':
+  abort: cannot start server at 'localhost:$HGPORT': * (glob)
   [255]
 #else
   $ hg serve -p $HGPORT --certificate=$PRIV 2>&1
-  abort: cannot start server at ':$HGPORT': Address already in use
+  abort: cannot start server at 'localhost:$HGPORT': Address already in use
   [255]
 #endif
   $ cd ..
@@ -97,11 +97,12 @@
   [255]
 #endif
 
-Specifying a per-host certificate file that doesn't exist will abort
+Specifying a per-host certificate file that doesn't exist will abort.  The full
+C:/path/to/msysroot will print on Windows.
 
   $ hg --config hostsecurity.localhost:verifycertsfile=/does/not/exist clone https://localhost:$HGPORT/
   warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
-  abort: path specified by hostsecurity.localhost:verifycertsfile does not exist: /does/not/exist
+  abort: path specified by hostsecurity.localhost:verifycertsfile does not exist: */does/not/exist (glob)
   [255]
 
 A malformed per-host certificate file will raise an error
@@ -223,7 +224,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  changegroup hook: HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=https://localhost:$HGPORT/ (glob)
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=https://localhost:$HGPORT/
   (run 'hg update' to get a working copy)
   $ cd ..
 
@@ -278,17 +279,17 @@
 cacert mismatch
 
   $ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub.pem" \
-  > https://127.0.0.1:$HGPORT/
-  pulling from https://127.0.0.1:$HGPORT/ (glob)
-  warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
-  abort: 127.0.0.1 certificate error: certificate is for localhost (glob)
-  (set hostsecurity.127.0.0.1:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely) (glob)
+  > https://$LOCALIP:$HGPORT/
+  pulling from https://*:$HGPORT/ (glob)
+  warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  abort: $LOCALIP certificate error: certificate is for localhost (glob)
+  (set hostsecurity.$LOCALIP:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely)
   [255]
   $ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub.pem" \
-  > https://127.0.0.1:$HGPORT/ --insecure
-  pulling from https://127.0.0.1:$HGPORT/ (glob)
-  warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
-  warning: connection security to 127.0.0.1 is disabled per current settings; communication is susceptible to eavesdropping and tampering (glob)
+  > https://$LOCALIP:$HGPORT/ --insecure
+  pulling from https://*:$HGPORT/ (glob)
+  warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  warning: connection security to $LOCALIP is disabled per current settings; communication is susceptible to eavesdropping and tampering (glob)
   searching for changes
   no changes found
   $ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub-other.pem"
@@ -382,6 +383,7 @@
 - works without cacerts (hostfingerprints)
   $ hg -R copy-pull id https://localhost:$HGPORT/ --insecure --config hostfingerprints.localhost=ec:d8:7c:d6:b3:86:d0:4f:c1:b8:b4:1c:9d:8f:5e:16:8e:ef:1c:03
   warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  (SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, set the following config value in [hostsecurity] and remove the old one from [hostfingerprints] to upgrade to a more secure SHA-256 fingerprint: localhost.fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
   5fed3813f7f5
 
 - works without cacerts (hostsecurity)
@@ -396,6 +398,7 @@
 - multiple fingerprints specified and first matches
   $ hg --config 'hostfingerprints.localhost=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03, deadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/ --insecure
   warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  (SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, set the following config value in [hostsecurity] and remove the old one from [hostfingerprints] to upgrade to a more secure SHA-256 fingerprint: localhost.fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
   5fed3813f7f5
 
   $ hg --config 'hostsecurity.localhost:fingerprints=sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03, sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/
@@ -405,6 +408,7 @@
 - multiple fingerprints specified and last matches
   $ hg --config 'hostfingerprints.localhost=deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03' -R copy-pull id https://localhost:$HGPORT/ --insecure
   warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  (SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, set the following config value in [hostsecurity] and remove the old one from [hostfingerprints] to upgrade to a more secure SHA-256 fingerprint: localhost.fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
   5fed3813f7f5
 
   $ hg --config 'hostsecurity.localhost:fingerprints=sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03' -R copy-pull id https://localhost:$HGPORT/
@@ -434,8 +438,9 @@
 
 
 - ignores that certificate doesn't match hostname
-  $ hg -R copy-pull id https://127.0.0.1:$HGPORT/ --config hostfingerprints.127.0.0.1=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
-  warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  $ hg -R copy-pull id https://$LOCALIP:$HGPORT/ --config hostfingerprints.$LOCALIP=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
+  warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  (SHA-1 fingerprint for $LOCALIP found in legacy [hostfingerprints] section; if you trust this fingerprint, set the following config value in [hostsecurity] and remove the old one from [hostfingerprints] to upgrade to a more secure SHA-256 fingerprint: $LOCALIP.fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
   5fed3813f7f5
 
 Ports used by next test. Kill servers.
@@ -571,9 +576,10 @@
   warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
   searching for changes
   no changes found
-  $ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull https://127.0.0.1:$HGPORT/ --config hostfingerprints.127.0.0.1=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
-  pulling from https://127.0.0.1:$HGPORT/ (glob)
-  warning: connecting to 127.0.0.1 using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  $ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull https://localhost:$HGPORT/ --config hostfingerprints.localhost=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 --trace
+  pulling from https://*:$HGPORT/ (glob)
+  warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
+  (SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, set the following config value in [hostsecurity] and remove the old one from [hostfingerprints] to upgrade to a more secure SHA-256 fingerprint: localhost.fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
   searching for changes
   no changes found
 
--- a/tests/test-i18n.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-i18n.t	Tue Apr 18 12:24:34 2017 -0400
@@ -29,14 +29,15 @@
 
 Test keyword search in translated help text:
 
-  $ HGENCODING=UTF-8 LANGUAGE=de hg help -k blättern
+  $ HGENCODING=UTF-8 LANGUAGE=de hg help -k Aktualisiert
   Themen:
   
-   extensions Benutzung erweiterter Funktionen
+   subrepos Unterarchive
   
-  Erweiterungen:
+  Befehle:
   
-   pager Verwendet einen externen Pager zum Bl\xc3\xa4ttern in der Ausgabe von Befehlen (esc)
+   pull   Ruft \xc3\x84nderungen von der angegebenen Quelle ab (esc)
+   update Aktualisiert das Arbeitsverzeichnis (oder wechselt die Version)
 
 #endif
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-imports-checker.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,146 @@
+#require test-repo
+
+  $ . "$TESTDIR/helpers-testrepo.sh"
+  $ import_checker="$TESTDIR"/../contrib/import-checker.py
+
+Run the doctests from the import checker, and make sure
+it's working correctly.
+  $ TERM=dumb
+  $ export TERM
+  $ python -m doctest $import_checker
+
+Run additional tests for the import checker
+
+  $ mkdir testpackage
+  $ touch testpackage/__init__.py
+
+  $ cat > testpackage/multiple.py << EOF
+  > from __future__ import absolute_import
+  > import os, sys
+  > EOF
+
+  $ cat > testpackage/unsorted.py << EOF
+  > from __future__ import absolute_import
+  > import sys
+  > import os
+  > EOF
+
+  $ cat > testpackage/stdafterlocal.py << EOF
+  > from __future__ import absolute_import
+  > from . import unsorted
+  > import os
+  > EOF
+
+  $ cat > testpackage/requirerelative.py << EOF
+  > from __future__ import absolute_import
+  > import testpackage.unsorted
+  > EOF
+
+  $ cat > testpackage/importalias.py << EOF
+  > from __future__ import absolute_import
+  > import ui
+  > EOF
+
+  $ cat > testpackage/relativestdlib.py << EOF
+  > from __future__ import absolute_import
+  > from .. import os
+  > EOF
+
+  $ cat > testpackage/symbolimport.py << EOF
+  > from __future__ import absolute_import
+  > from .unsorted import foo
+  > EOF
+
+  $ cat > testpackage/latesymbolimport.py << EOF
+  > from __future__ import absolute_import
+  > from . import unsorted
+  > from mercurial.node import hex
+  > EOF
+
+  $ cat > testpackage/multiplegroups.py << EOF
+  > from __future__ import absolute_import
+  > from . import unsorted
+  > from . import more
+  > EOF
+
+  $ mkdir testpackage/subpackage
+  $ cat > testpackage/subpackage/levelpriority.py << EOF
+  > from __future__ import absolute_import
+  > from . import foo
+  > from .. import parent
+  > EOF
+
+  $ touch testpackage/subpackage/foo.py
+  $ cat > testpackage/subpackage/__init__.py << EOF
+  > from __future__ import absolute_import
+  > from . import levelpriority  # should not cause cycle
+  > EOF
+
+  $ cat > testpackage/subpackage/localimport.py << EOF
+  > from __future__ import absolute_import
+  > from . import foo
+  > def bar():
+  >     # should not cause "higher-level import should come first"
+  >     from .. import unsorted
+  >     # but other errors should be detected
+  >     from .. import more
+  >     import testpackage.subpackage.levelpriority
+  > EOF
+
+  $ cat > testpackage/importmodulefromsub.py << EOF
+  > from __future__ import absolute_import
+  > from .subpackage import foo  # not a "direct symbol import"
+  > EOF
+
+  $ cat > testpackage/importsymbolfromsub.py << EOF
+  > from __future__ import absolute_import
+  > from .subpackage import foo, nonmodule
+  > EOF
+
+  $ cat > testpackage/sortedentries.py << EOF
+  > from __future__ import absolute_import
+  > from . import (
+  >     foo,
+  >     bar,
+  > )
+  > EOF
+
+  $ cat > testpackage/importfromalias.py << EOF
+  > from __future__ import absolute_import
+  > from . import ui
+  > EOF
+
+  $ cat > testpackage/importfromrelative.py << EOF
+  > from __future__ import absolute_import
+  > from testpackage.unsorted import foo
+  > EOF
+
+  $ mkdir testpackage2
+  $ touch testpackage2/__init__.py
+
+  $ cat > testpackage2/latesymbolimport.py << EOF
+  > from __future__ import absolute_import
+  > from testpackage import unsorted
+  > from mercurial.node import hex
+  > EOF
+
+  $ python "$import_checker" testpackage*/*.py testpackage/subpackage/*.py
+  testpackage/importalias.py:2: ui module must be "as" aliased to uimod
+  testpackage/importfromalias.py:2: ui from testpackage must be "as" aliased to uimod
+  testpackage/importfromrelative.py:2: import should be relative: testpackage.unsorted
+  testpackage/importfromrelative.py:2: direct symbol import foo from testpackage.unsorted
+  testpackage/importsymbolfromsub.py:2: direct symbol import nonmodule from testpackage.subpackage
+  testpackage/latesymbolimport.py:3: symbol import follows non-symbol import: mercurial.node
+  testpackage/multiple.py:2: multiple imported names: os, sys
+  testpackage/multiplegroups.py:3: multiple "from . import" statements
+  testpackage/relativestdlib.py:2: relative import of stdlib module
+  testpackage/requirerelative.py:2: import should be relative: testpackage.unsorted
+  testpackage/sortedentries.py:2: imports from testpackage not lexically sorted: bar < foo
+  testpackage/stdafterlocal.py:3: stdlib import "os" follows local import: testpackage
+  testpackage/subpackage/levelpriority.py:3: higher-level import should come first: testpackage
+  testpackage/subpackage/localimport.py:7: multiple "from .. import" statements
+  testpackage/subpackage/localimport.py:8: import should be relative: testpackage.subpackage.levelpriority
+  testpackage/symbolimport.py:2: direct symbol import foo from testpackage.unsorted
+  testpackage/unsorted.py:3: imports not lexically sorted: os < sys
+  testpackage2/latesymbolimport.py:3: symbol import follows non-symbol import: mercurial.node
+  [1]
--- a/tests/test-largefiles-cache.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-largefiles-cache.t	Tue Apr 18 12:24:34 2017 -0400
@@ -223,7 +223,7 @@
   $ hg push http://localhost:$HGPORT1 -f --config files.usercache=nocache
   pushing to http://localhost:$HGPORT1/
   searching for changes
-  abort: remotestore: could not open file $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020: HTTP Error 403: ssl required
+  abort: remotestore: could not open file $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020: HTTP Error 403: ssl required (glob)
   [255]
 
   $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
--- a/tests/test-largefiles-misc.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-largefiles-misc.t	Tue Apr 18 12:24:34 2017 -0400
@@ -212,6 +212,18 @@
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     add files
   
+sharing a largefile repo automatically enables largefiles on the share
+
+  $ hg share --config extensions.share= . ../shared_lfrepo
+  updating working directory
+  getting changed largefiles
+  1 largefiles updated, 0 removed
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cat ../shared_lfrepo/.hg/hgrc
+  
+  [extensions]
+  largefiles=
+
 verify that large files in subrepos handled properly
   $ hg init subrepo
   $ echo "subrepo = subrepo" > .hgsub
--- a/tests/test-largefiles-small-disk.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-largefiles-small-disk.t	Tue Apr 18 12:24:34 2017 -0400
@@ -5,7 +5,11 @@
   > from mercurial import util
   > #
   > # this makes the original largefiles code abort:
+  > _origcopyfileobj = shutil.copyfileobj
   > def copyfileobj(fsrc, fdst, length=16*1024):
+  >     # allow journal files (used by transaction) to be written
+  >     if 'journal.' in fdst.name:
+  >         return _origcopyfileobj(fsrc, fdst, length)
   >     fdst.write(fsrc.read(4))
   >     raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC))
   > shutil.copyfileobj = copyfileobj
--- a/tests/test-largefiles-wireproto.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-largefiles-wireproto.t	Tue Apr 18 12:24:34 2017 -0400
@@ -347,7 +347,7 @@
   searching 2 changesets for largefiles
   verified existence of 2 revisions of 2 largefiles
   $ tail -1 access.log
-  127.0.0.1 - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  $LOCALIP - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
   $ hg -R batchverifyclone update
   getting changed largefiles
   2 largefiles updated, 0 removed
@@ -384,7 +384,7 @@
   searching 3 changesets for largefiles
   verified existence of 3 revisions of 3 largefiles
   $ tail -1 access.log
-  127.0.0.1 - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3Dc8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
+  $LOCALIP - - [*] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3Dc8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=*zlib,none,bzip2 (glob)
 
   $ killdaemons.py
 
@@ -441,7 +441,7 @@
   1 largefiles updated, 0 removed
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
+  $ killdaemons.py
   $ rm hg.pid access.log
-  $ killdaemons.py
 
 #endif
--- a/tests/test-largefiles.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-largefiles.t	Tue Apr 18 12:24:34 2017 -0400
@@ -192,7 +192,7 @@
 
   $ hg serve -d -p $HGPORT --pid-file ../hg.pid
   $ cat ../hg.pid >> $DAEMON_PIDS
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/tip/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/tip/?style=raw'
   200 Script output follows
   
   
@@ -201,7 +201,7 @@
   -rw-r--r-- 9 normal3
   
   
-  $ get-with-headers.py 127.0.0.1:$HGPORT 'file/tip/sub/?style=raw'
+  $ get-with-headers.py $LOCALIP:$HGPORT 'file/tip/sub/?style=raw'
   200 Script output follows
   
   
--- a/tests/test-lock.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-lock.py	Tue Apr 18 12:24:34 2017 -0400
@@ -10,7 +10,7 @@
 from mercurial import (
     error,
     lock,
-    scmutil,
+    vfs as vfsmod,
 )
 
 testlockname = 'testlock'
@@ -36,7 +36,7 @@
         self._acquirecalled = False
         self._releasecalled = False
         self._postreleasecalled = False
-        self.vfs = scmutil.vfs(dir, audit=False)
+        self.vfs = vfsmod.vfs(dir, audit=False)
         self._pidoffset = pidoffset
 
     def makelock(self, *args, **kwargs):
--- a/tests/test-log.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-log.t	Tue Apr 18 12:24:34 2017 -0400
@@ -148,7 +148,7 @@
 
   $ hg log -f -l1 --style something
   abort: style 'something' not found
-  (available styles: bisect, changelog, compact, default, phases, status, xml)
+  (available styles: bisect, changelog, compact, default, phases, show, status, xml)
   [255]
 
 -f, phases style
--- a/tests/test-logtoprocess.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-logtoprocess.t	Tue Apr 18 12:24:34 2017 -0400
@@ -1,3 +1,7 @@
+ATTENTION: logtoprocess runs commands asynchronously. Be sure to append "| cat"
+to hg commands, to wait for the output, if you want to test its output.
+Otherwise the test will be flaky.
+
 Test if logtoprocess correctly captures command-related log calls.
 
   $ hg init
@@ -10,6 +14,7 @@
   > def foo(ui, repo):
   >     ui.log('foo', 'a message: %(bar)s\n', bar='spam')
   > EOF
+  $ cp $HGRCPATH $HGRCPATH.bak
   $ cat >> $HGRCPATH << EOF
   > [extensions]
   > logtoprocess=
@@ -33,9 +38,8 @@
 Running a command triggers both a ui.log('command') and a
 ui.log('commandfinish') call. The foo command also uses ui.log.
 
-Use head to ensure we wait for all lines to be produced, and sort to avoid
-ordering issues between the various processes we spawn:
-  $ hg foo | head -n 17 | sort
+Use sort to avoid ordering issues between the various processes we spawn:
+  $ hg foo | cat | sort
   
   
   
@@ -52,3 +56,18 @@
   logtoprocess commandfinish output:
   logtoprocess foo output:
   spam
+
+Confirm that logging blocked time catches stdio properly:
+  $ cp $HGRCPATH.bak $HGRCPATH
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > logtoprocess=
+  > pager=
+  > [logtoprocess]
+  > uiblocked=echo "\$EVENT stdio \$OPT_STDIO_BLOCKED ms command \$OPT_COMMAND_DURATION ms"
+  > [ui]
+  > logblockedtimes=True
+  > EOF
+
+  $ hg log | cat
+  uiblocked stdio [0-9]+.[0-9]* ms command [0-9]+.[0-9]* ms (re)
--- a/tests/test-mac-packages.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-mac-packages.t	Tue Apr 18 12:24:34 2017 -0400
@@ -2,15 +2,14 @@
 
   $ . "$TESTDIR/helpers-testrepo.sh"
 
-  $ OUTPUTDIR=`pwd`
+  $ OUTPUTDIR="`pwd`"
   $ export OUTPUTDIR
   $ KEEPMPKG=yes
   $ export KEEPMPKG
 
   $ cd "$TESTDIR"/..
-  $ rm -rf dist
-  $ make osx > $OUTPUTDIR/build.log 2>&1
-  $ cd $OUTPUTDIR
+  $ make osx > "$OUTPUTDIR/build.log" 2>&1
+  $ cd "$OUTPUTDIR"
   $ ls -d *.pkg
   Mercurial-*-macosx10.*.pkg (glob)
 
@@ -25,6 +24,10 @@
   ./Library/Python/2.7/site-packages/mercurial/pure/bdiff.py	100644	0/0
   ./Library/Python/2.7/site-packages/mercurial/pure/bdiff.pyc	100644	0/0
   ./Library/Python/2.7/site-packages/mercurial/pure/bdiff.pyo	100644	0/0
+  $ grep zsh/site-functions/hg boms.txt | cut -d '	' -f 1,2,3
+  ./usr/local/share/zsh/site-functions/hg	100644	0/0
+  $ grep hg-completion.bash boms.txt | cut -d '	' -f 1,2,3
+  ./usr/local/hg/contrib/hg-completion.bash	100644	0/0
   $ egrep 'man[15]' boms.txt | cut -d '	' -f 1,2,3
   ./usr/local/share/man/man1	40755	0/0
   ./usr/local/share/man/man1/hg.1	100644	0/0
@@ -40,7 +43,7 @@
   ./Library/Python/2.7/site-packages/mercurial/localrepo.py	100644	0/0
   ./Library/Python/2.7/site-packages/mercurial/localrepo.pyc	100644	0/0
   ./Library/Python/2.7/site-packages/mercurial/localrepo.pyo	100644	0/0
-  $ grep '/hg	' boms.txt | cut -d '	' -f 1,2,3
+  $ grep 'bin/hg	' boms.txt | cut -d '	' -f 1,2,3
   ./usr/local/bin/hg	100755	0/0
 
 Make sure the built binary uses the system Python interpreter
--- a/tests/test-manifest.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-manifest.py	Tue Apr 18 12:24:34 2017 -0400
@@ -320,7 +320,7 @@
             'bar/baz/qux.py': None,
             'foo': (MISSING, (BIN_HASH_1, '')),
             }
-        self.assertEqual(want, pruned.diff(short, True))
+        self.assertEqual(want, pruned.diff(short, clean=True))
 
     def testReversedLines(self):
         backwards = ''.join(
@@ -467,5 +467,21 @@
     def parsemanifest(self, text):
         return manifestmod.treemanifest('', text)
 
+    def testWalkSubtrees(self):
+        m = self.parsemanifest(A_DEEPER_MANIFEST)
+
+        dirs = [s._dir for s in m.walksubtrees()]
+        self.assertEqual(
+            sorted(['', 'a/', 'a/c/', 'a/d/', 'a/b/', 'a/b/c/', 'a/b/d/']),
+            sorted(dirs)
+        )
+
+        match = matchmod.match('/', '', ['path:a/b/'])
+        dirs = [s._dir for s in m.walksubtrees(matcher=match)]
+        self.assertEqual(
+            sorted(['a/b/', 'a/b/c/', 'a/b/d/']),
+            sorted(dirs)
+        )
+
 if __name__ == '__main__':
     silenttestrunner.main(__name__)
--- a/tests/test-merge-criss-cross.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-merge-criss-cross.t	Tue Apr 18 12:24:34 2017 -0400
@@ -116,11 +116,11 @@
 
   $ f --dump --recurse *
   d2: directory with 2 files
-  d2/f3:
+  d2/f3: (glob)
   >>>
   0 base
   <<<
-  d2/f4:
+  d2/f4: (glob)
   >>>
   0 base
   <<<
@@ -222,11 +222,11 @@
 
   $ f --dump --recurse *
   d2: directory with 2 files
-  d2/f3:
+  d2/f3: (glob)
   >>>
   0 base
   <<<
-  d2/f4:
+  d2/f4: (glob)
   >>>
   0 base
   <<<
@@ -308,11 +308,11 @@
 
   $ f --dump --recurse *
   d2: directory with 2 files
-  d2/f3:
+  d2/f3: (glob)
   >>>
   0 base
   <<<
-  d2/f4:
+  d2/f4: (glob)
   >>>
   0 base
   <<<
--- a/tests/test-minirst.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-minirst.py	Tue Apr 18 12:24:34 2017 -0400
@@ -118,6 +118,13 @@
 | This is the first line.
   The line continues here.
 | This is the second line.
+
+Bullet lists are also detected:
+
+* This is the first bullet
+* This is the second bullet
+  It has 2 lines
+* This is the third bullet
 """
 
 debugformats('lists', lists)
--- a/tests/test-minirst.py.out	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-minirst.py.out	Tue Apr 18 12:24:34 2017 -0400
@@ -187,6 +187,12 @@
 
 This is the first line. The line continues here.
 This is the second line.
+
+Bullet lists are also detected:
+
+* This is the first bullet
+* This is the second bullet It has 2 lines
+* This is the third bullet
 ----------------------------------------------------------------------
 
 30 column format:
@@ -231,6 +237,14 @@
 This is the first line. The
 line continues here.
 This is the second line.
+
+Bullet lists are also
+detected:
+
+* This is the first bullet
+* This is the second bullet It
+  has 2 lines
+* This is the third bullet
 ----------------------------------------------------------------------
 
 html format:
@@ -276,6 +290,14 @@
  <li> This is the first line.   The line continues here.
  <li> This is the second line.
 </ol>
+<p>
+Bullet lists are also detected:
+</p>
+<ul>
+ <li> This is the first bullet
+ <li> This is the second bullet   It has 2 lines
+ <li> This is the third bullet
+</ul>
 ----------------------------------------------------------------------
 
 == options ==
--- a/tests/test-mq-qimport.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-mq-qimport.t	Tue Apr 18 12:24:34 2017 -0400
@@ -247,11 +247,28 @@
   this-name-is-better
   url.diff
 
+import patch of bad filename
+
+  $ touch '../ bad.diff'
+  $ hg qimport '../ bad.diff'
+  abort: patch name cannot begin or end with whitespace
+  [255]
+  $ touch '.hg/patches/ bad.diff'
+  $ hg qimport -e ' bad.diff'
+  abort: patch name cannot begin or end with whitespace
+  [255]
+
 qimport with bad name, should abort before reading file
 
   $ hg qimport non-existent-file --name .hg
   abort: patch name cannot begin with ".hg"
   [255]
+  $ hg qimport non-existent-file --name ' foo'
+  abort: patch name cannot begin or end with whitespace
+  [255]
+  $ hg qimport non-existent-file --name 'foo '
+  abort: patch name cannot begin or end with whitespace
+  [255]
 
 qimport http:// patch with leading slashes in url
 
--- a/tests/test-mq-qnew.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-mq-qnew.t	Tue Apr 18 12:24:34 2017 -0400
@@ -22,6 +22,8 @@
   >     hg qnew 'foo#bar'
   >     hg qnew 'foo:bar'
   >     hg qnew "`echo foo; echo bar`"
+  >     hg qnew ' foo'
+  >     hg qnew 'foo '
   > 
   >     hg qinit -c
   > 
@@ -112,6 +114,8 @@
   abort: '#' cannot be used in the name of a patch
   abort: ':' cannot be used in the name of a patch
   abort: '\n' cannot be used in the name of a patch
+  abort: patch name cannot begin or end with whitespace
+  abort: patch name cannot begin or end with whitespace
   % qnew with name containing slash
   abort: path ends in directory separator: foo/ (glob)
   abort: "foo" already exists as a directory
@@ -180,6 +184,8 @@
   abort: '#' cannot be used in the name of a patch
   abort: ':' cannot be used in the name of a patch
   abort: '\n' cannot be used in the name of a patch
+  abort: patch name cannot begin or end with whitespace
+  abort: patch name cannot begin or end with whitespace
   % qnew with name containing slash
   abort: path ends in directory separator: foo/ (glob)
   abort: "foo" already exists as a directory
@@ -313,36 +319,3 @@
   > [hooks]
   > pretxncommit.unexpectedabort =
   > EOF
-
-#if unix-permissions
-
-Test handling default message with the patch filename with tail whitespaces
-
-  $ cat > $TESTTMP/editor.sh << EOF
-  > echo "==== before editing"
-  > cat \$1
-  > echo "===="
-  > echo "[mq]: patch        " > \$1
-  > EOF
-
-  $ rm -f .hg/last-message.txt
-  $ hg status
-  $ HGEDITOR="sh $TESTTMP/editor.sh" hg qnew -e "patch "
-  ==== before editing
-  
-  
-  HG: Enter commit message.  Lines beginning with 'HG:' are removed.
-  HG: Leave message empty to use default message.
-  HG: --
-  HG: user: test
-  HG: branch 'default'
-  HG: no files changed
-  ====
-  $ cat ".hg/patches/patch "
-  # HG changeset patch
-  # Parent  0000000000000000000000000000000000000000
-  
-
-  $ cd ..
-
-#endif
--- a/tests/test-mq.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-mq.t	Tue Apr 18 12:24:34 2017 -0400
@@ -25,7 +25,7 @@
   Known patches are represented as patch files in the .hg/patches directory.
   Applied patches are both patch files and changesets.
   
-  Common tasks (use 'hg help command' for more details):
+  Common tasks (use 'hg help COMMAND' for more details):
   
     create new patch                          qnew
     import existing patch                     qimport
@@ -365,10 +365,10 @@
 
 setting columns & formatted tests truncating (issue1912)
 
-  $ COLUMNS=4 hg qseries --config ui.formatted=true
+  $ COLUMNS=4 hg qseries --config ui.formatted=true --color=no
   test.patch
   test2.patch
-  $ COLUMNS=20 hg qseries --config ui.formatted=true -vs
+  $ COLUMNS=20 hg qseries --config ui.formatted=true -vs --color=no
   0 A test.patch: f...
   1 A test2.patch: 
   $ hg qpop
--- a/tests/test-obsolete-checkheads.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-obsolete-checkheads.t	Tue Apr 18 12:24:34 2017 -0400
@@ -254,9 +254,27 @@
   @  b4952fcf48cf (public) add base
   
 
-Push should not complain about new heads.
+We do not have enought data to take the right decision, we should fail
+
+  $ hg push
+  pushing to $TESTTMP/remote (glob)
+  searching for changes
+  remote has heads on branch 'default' that are not known locally: c70b08862e08
+  abort: push creates new remote head 71e3228bffe1!
+  (pull and merge or see 'hg help push' for details about pushing new heads)
+  [255]
 
-  $ hg push --traceback
+Pulling the missing data makes it work
+
+  $ hg pull
+  pulling from $TESTTMP/remote (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  (run 'hg heads' to see heads)
+  $ hg push
   pushing to $TESTTMP/remote (glob)
   searching for changes
   adding changesets
--- a/tests/test-obsolete.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-obsolete.t	Tue Apr 18 12:24:34 2017 -0400
@@ -3,7 +3,7 @@
   > # public changeset are not obsolete
   > publish=false
   > [ui]
-  > logtemplate="{rev}:{node|short} ({phase}{if(troubles, ' {troubles}')}) [{tags} {bookmarks}] {desc|firstline}\n"
+  > logtemplate="{rev}:{node|short} ({phase}{if(obsolete, ' *{obsolete}*')}{if(troubles, ' {troubles}')}) [{tags} {bookmarks}] {desc|firstline}\n"
   > EOF
   $ mkcommit() {
   >    echo "$1" > "$1"
@@ -155,9 +155,9 @@
   5:5601fb93a350 (draft) [tip ] add new_3_c
   $ hg heads --hidden
   5:5601fb93a350 (draft) [tip ] add new_3_c
-  4:ca819180edb9 (draft) [ ] add new_2_c
-  3:cdbce2fbb163 (draft) [ ] add new_c
-  2:245bde4270cd (draft) [ ] add original_c
+  4:ca819180edb9 (draft *obsolete*) [ ] add new_2_c
+  3:cdbce2fbb163 (draft *obsolete*) [ ] add new_c
+  2:245bde4270cd (draft *obsolete*) [ ] add original_c
 
 
 check that summary does not report them
@@ -392,11 +392,11 @@
   $ hg -R clone-dest log -G --hidden
   @  6:6f9641995072 (draft) [tip ] add n3w_3_c
   |
-  | x  5:5601fb93a350 (draft) [ ] add new_3_c
+  | x  5:5601fb93a350 (draft *obsolete*) [ ] add new_3_c
   |/
-  | x  4:ca819180edb9 (draft) [ ] add new_2_c
+  | x  4:ca819180edb9 (draft *obsolete*) [ ] add new_2_c
   |/
-  | x  3:cdbce2fbb163 (draft) [ ] add new_c
+  | x  3:cdbce2fbb163 (draft *obsolete*) [ ] add new_c
   |/
   | o  2:245bde4270cd (public) [ ] add original_c
   |/
@@ -475,7 +475,7 @@
   $ hg debugobsolete | grep `getid original_d`
   94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
   $ hg log -r 'obsolete()'
-  4:94b33453f93b (draft) [ ] add original_d
+  4:94b33453f93b (draft *obsolete*) [ ] add original_d
   $ hg summary
   parent: 5:cda648ca50f5 tip (unstable)
    add original_e
@@ -487,7 +487,7 @@
   $ hg log -G -r '::unstable()'
   @  5:cda648ca50f5 (draft unstable) [tip ] add original_e
   |
-  x  4:94b33453f93b (draft) [ ] add original_d
+  x  4:94b33453f93b (draft *obsolete*) [ ] add original_d
   |
   o  3:6f9641995072 (draft) [ ] add n3w_3_c
   |
@@ -526,7 +526,7 @@
   1:7c3bad9141dc (public) [ ] add b
   2:245bde4270cd (public) [ ] add original_c
   3:6f9641995072 (draft) [ ] add n3w_3_c
-  4:94b33453f93b (draft) [ ] add original_d
+  4:94b33453f93b (draft *obsolete*) [ ] add original_d
   5:cda648ca50f5 (draft unstable) [tip ] add original_e
   $ hg push ../tmpf -f # -f because be push unstable too
   pushing to ../tmpf
@@ -550,7 +550,7 @@
   $ hg log -G
   @  5:cda648ca50f5 (draft unstable) [tip ] add original_e
   |
-  x  4:94b33453f93b (draft) [ ] add original_d
+  x  4:94b33453f93b (draft *obsolete*) [ ] add original_d
   |
   o  3:6f9641995072 (draft) [ ] add n3w_3_c
   |
@@ -588,9 +588,9 @@
   $ hg log --hidden --graph
   @  6:3de5eca88c00 (draft) [tip ] add obsolete_e
   |
-  | x  5:cda648ca50f5 (draft) [ ] add original_e
+  | x  5:cda648ca50f5 (draft *obsolete*) [ ] add original_e
   | |
-  | x  4:94b33453f93b (draft) [ ] add original_d
+  | x  4:94b33453f93b (draft *obsolete*) [ ] add original_d
   |/
   o  3:6f9641995072 (draft) [ ] add n3w_3_c
   |
@@ -811,6 +811,11 @@
   summary:     add babar
   
 
+test the "obsolete" templatekw
+
+  $ hg log -r 'obsolete()'
+  6:3de5eca88c00 (draft *obsolete*) [ ] add obsolete_e
+
 test the "troubles" templatekw
 
   $ hg log -r 'bumped() and unstable()'
@@ -825,6 +830,13 @@
   trouble:     unstable, bumped
   summary:     add babar
   
+  $ hg log -T default -r 'obsolete()'
+  changeset:   6:3de5eca88c00
+  parent:      3:6f9641995072
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     add obsolete_e
+  
 
 test summary output
 
@@ -839,6 +851,17 @@
   phases: 4 draft
   unstable: 2 changesets
   bumped: 1 changesets
+  $ hg up -r 'obsolete()'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg summary
+  parent: 6:3de5eca88c00  (obsolete)
+   add obsolete_e
+  branch: default
+  commit: (clean)
+  update: 3 new changesets (update)
+  phases: 4 draft
+  unstable: 2 changesets
+  bumped: 1 changesets
 
 Test incoming/outcoming with changesets obsoleted remotely, known locally
 ===============================================================================
@@ -927,7 +950,7 @@
   $ hg log -G
   @  3:323a9c3ddd91 (draft) [tip ] A
   |
-  | x  1:29f0c6921ddd (draft) [visible ] A
+  | x  1:29f0c6921ddd (draft *obsolete*) [visible ] A
   |/
   o  0:d20a80d4def3 (draft) [ ] base
   
@@ -980,9 +1003,9 @@
   $ hg log -G --hidden
   @  3:b7d587542d40 (draft) [tip ] B+
   |
-  | x  2:eb95e9297e18 (draft) [ ] temporary amend commit for 44526ebb0f98
+  | x  2:eb95e9297e18 (draft *obsolete*) [ ] temporary amend commit for 44526ebb0f98
   | |
-  | x  1:44526ebb0f98 (draft) [ ] B
+  | x  1:44526ebb0f98 (draft *obsolete*) [ ] B
   |/
   o  0:4b34ecfb0d56 (draft) [ ] A
   
@@ -1122,7 +1145,7 @@
   $ hg commit --amend -m "message"
   $ hg book bookb -r 13bedc178fce --hidden
   $ hg log -r 13bedc178fce
-  5:13bedc178fce (draft) [ bookb] add b
+  5:13bedc178fce (draft *obsolete*) [ bookb] add b
   $ hg book -d bookb
   $ hg log -r 13bedc178fce
   abort: hidden revision '13bedc178fce'!
@@ -1162,9 +1185,9 @@
   $ hg log -G --hidden
   @  4:b0551702f918 (draft) [tip ] 2
   |
-  | x  3:f27abbcc1f77 (draft) [ ] temporary amend commit for e008cf283490
+  | x  3:f27abbcc1f77 (draft *obsolete*) [ ] temporary amend commit for e008cf283490
   | |
-  | x  2:e008cf283490 (draft) [ ] 2
+  | x  2:e008cf283490 (draft *obsolete*) [ ] 2
   |/
   o  1:e016b03fd86f (draft) [ ] 1
   |
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-pager-legacy.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,233 @@
+  $ cat >> fakepager.py <<EOF
+  > import sys
+  > for line in sys.stdin:
+  >     sys.stdout.write('paged! %r\n' % line)
+  > EOF
+
+Enable ui.formatted because pager won't fire without it, and set up
+pager and tell it to use our fake pager that lets us see when the
+pager was running.
+  $ cat >> $HGRCPATH <<EOF
+  > [ui]
+  > formatted = yes
+  > color = no
+  > [extensions]
+  > pager=
+  > [pager]
+  > pager = python $TESTTMP/fakepager.py
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+  $ echo a >> a
+  $ hg add a
+  $ hg ci -m 'add a'
+  $ for x in `python $TESTDIR/seq.py 1 10`; do
+  >   echo a $x >> a
+  >   hg ci -m "modify a $x"
+  > done
+
+By default diff and log are paged, but summary is not:
+
+  $ hg diff -c 2 --pager=yes
+  paged! 'diff -r f4be7687d414 -r bce265549556 a\n'
+  paged! '--- a/a\tThu Jan 01 00:00:00 1970 +0000\n'
+  paged! '+++ b/a\tThu Jan 01 00:00:00 1970 +0000\n'
+  paged! '@@ -1,2 +1,3 @@\n'
+  paged! ' a\n'
+  paged! ' a 1\n'
+  paged! '+a 2\n'
+
+  $ hg log --limit 2
+  paged! 'changeset:   10:46106edeeb38\n'
+  paged! 'tag:         tip\n'
+  paged! 'user:        test\n'
+  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
+  paged! 'summary:     modify a 10\n'
+  paged! '\n'
+  paged! 'changeset:   9:6dd8ea7dd621\n'
+  paged! 'user:        test\n'
+  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
+  paged! 'summary:     modify a 9\n'
+  paged! '\n'
+
+  $ hg summary
+  parent: 10:46106edeeb38 tip
+   modify a 10
+  branch: default
+  commit: (clean)
+  update: (current)
+  phases: 11 draft
+
+We can enable the pager on summary:
+
+  $ hg --config pager.attend-summary=yes summary
+  paged! 'parent: 10:46106edeeb38 tip\n'
+  paged! ' modify a 10\n'
+  paged! 'branch: default\n'
+  paged! 'commit: (clean)\n'
+  paged! 'update: (current)\n'
+  paged! 'phases: 11 draft\n'
+
+  $ hg --config pager.attend-diff=no diff -c 2
+  diff -r f4be7687d414 -r bce265549556 a
+  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,2 +1,3 @@
+   a
+   a 1
+  +a 2
+
+If we completely change the attend list that's respected:
+  $ hg --config pager.attend=summary diff -c 2
+  diff -r f4be7687d414 -r bce265549556 a
+  --- a/a	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
+  @@ -1,2 +1,3 @@
+   a
+   a 1
+  +a 2
+
+If 'log' is in attend, then 'history' should also be paged:
+  $ hg history --limit 2 --config pager.attend=log
+  paged! 'changeset:   10:46106edeeb38\n'
+  paged! 'tag:         tip\n'
+  paged! 'user:        test\n'
+  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
+  paged! 'summary:     modify a 10\n'
+  paged! '\n'
+  paged! 'changeset:   9:6dd8ea7dd621\n'
+  paged! 'user:        test\n'
+  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
+  paged! 'summary:     modify a 9\n'
+  paged! '\n'
+
+Possible bug: history is explicitly ignored in pager config, but
+because log is in the attend list it still gets pager treatment.
+
+  $ hg history --limit 2 --config pager.attend=log \
+  >   --config pager.ignore=history
+  paged! 'changeset:   10:46106edeeb38\n'
+  paged! 'tag:         tip\n'
+  paged! 'user:        test\n'
+  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
+  paged! 'summary:     modify a 10\n'
+  paged! '\n'
+  paged! 'changeset:   9:6dd8ea7dd621\n'
+  paged! 'user:        test\n'
+  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
+  paged! 'summary:     modify a 9\n'
+  paged! '\n'
+
+Possible bug: history is explicitly marked as attend-history=no, but
+it doesn't fail to get paged because log is still in the attend list.
+
+  $ hg history --limit 2 --config pager.attend-history=no
+  paged! 'changeset:   10:46106edeeb38\n'
+  paged! 'tag:         tip\n'
+  paged! 'user:        test\n'
+  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
+  paged! 'summary:     modify a 10\n'
+  paged! '\n'
+  paged! 'changeset:   9:6dd8ea7dd621\n'
+  paged! 'user:        test\n'
+  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
+  paged! 'summary:     modify a 9\n'
+  paged! '\n'
+
+Possible bug: disabling pager for log but enabling it for history
+doesn't result in history being paged.
+
+  $ hg history --limit 2 --config pager.attend-log=no \
+  > --config pager.attend-history=yes
+  changeset:   10:46106edeeb38
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     modify a 10
+  
+  changeset:   9:6dd8ea7dd621
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     modify a 9
+  
+Pager should not start if stdout is not a tty.
+
+  $ hg log -l1 -q --config ui.formatted=False
+  10:46106edeeb38
+
+Pager with color enabled allows colors to come through by default,
+even though stdout is no longer a tty.
+  $ cat >> $HGRCPATH <<EOF
+  > [ui]
+  > color = yes
+  > [color]
+  > mode = ansi
+  > EOF
+  $ hg log --limit 3
+  paged! '\x1b[0;33mchangeset:   10:46106edeeb38\x1b[0m\n'
+  paged! 'tag:         tip\n'
+  paged! 'user:        test\n'
+  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
+  paged! 'summary:     modify a 10\n'
+  paged! '\n'
+  paged! '\x1b[0;33mchangeset:   9:6dd8ea7dd621\x1b[0m\n'
+  paged! 'user:        test\n'
+  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
+  paged! 'summary:     modify a 9\n'
+  paged! '\n'
+  paged! '\x1b[0;33mchangeset:   8:cff05a6312fe\x1b[0m\n'
+  paged! 'user:        test\n'
+  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
+  paged! 'summary:     modify a 8\n'
+  paged! '\n'
+
+Pager works with shell aliases.
+
+  $ cat >> $HGRCPATH <<EOF
+  > [alias]
+  > echoa = !echo a
+  > EOF
+
+  $ hg echoa
+  a
+  $ hg --config pager.attend-echoa=yes echoa
+  paged! 'a\n'
+
+Pager works with hg aliases including environment variables.
+
+  $ cat >> $HGRCPATH <<'EOF'
+  > [alias]
+  > printa = log -T "$A\n" -r 0
+  > EOF
+
+  $ A=1 hg --config pager.attend-printa=yes printa
+  paged! '1\n'
+  $ A=2 hg --config pager.attend-printa=yes printa
+  paged! '2\n'
+
+Something that's explicitly attended is still not paginated if the
+pager is globally set to off using a flag:
+  $ A=2 hg --config pager.attend-printa=yes printa --pager=no
+  2
+
+Pager should not override the exit code of other commands
+
+  $ cat >> $TESTTMP/fortytwo.py <<'EOF'
+  > from mercurial import cmdutil, commands
+  > cmdtable = {}
+  > command = cmdutil.command(cmdtable)
+  > @command('fortytwo', [], 'fortytwo', norepo=True)
+  > def fortytwo(ui, *opts):
+  >     ui.write('42\n')
+  >     return 42
+  > EOF
+
+  $ cat >> $HGRCPATH <<'EOF'
+  > [extensions]
+  > fortytwo = $TESTTMP/fortytwo.py
+  > EOF
+
+  $ hg fortytwo --pager=on
+  paged! '42\n'
+  [42]
--- a/tests/test-pager.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-pager.t	Tue Apr 18 12:24:34 2017 -0400
@@ -10,8 +10,7 @@
   $ cat >> $HGRCPATH <<EOF
   > [ui]
   > formatted = yes
-  > [extensions]
-  > pager=
+  > color = no
   > [pager]
   > pager = python $TESTTMP/fakepager.py
   > EOF
@@ -26,7 +25,7 @@
   >   hg ci -m "modify a $x"
   > done
 
-By default diff and log are paged, but summary is not:
+By default diff and log are paged, but id is not:
 
   $ hg diff -c 2 --pager=yes
   paged! 'diff -r f4be7687d414 -r bce265549556 a\n'
@@ -50,26 +49,17 @@
   paged! 'summary:     modify a 9\n'
   paged! '\n'
 
-  $ hg summary
-  parent: 10:46106edeeb38 tip
-   modify a 10
-  branch: default
-  commit: (clean)
-  update: (current)
-  phases: 11 draft
+  $ hg id
+  46106edeeb38 tip
 
-We can enable the pager on summary:
+We can enable the pager on id:
 
-  $ hg --config pager.attend-summary=yes summary
-  paged! 'parent: 10:46106edeeb38 tip\n'
-  paged! ' modify a 10\n'
-  paged! 'branch: default\n'
-  paged! 'commit: (clean)\n'
-  paged! 'update: (current)\n'
-  paged! 'phases: 11 draft\n'
-
-If we completely change the attend list that's respected:
+BROKEN: should be paged
+  $ hg --config pager.attend-id=yes id
+  46106edeeb38 tip
 
+Setting attend-$COMMAND to a false value works, even with pager in
+core:
   $ hg --config pager.attend-diff=no diff -c 2
   diff -r f4be7687d414 -r bce265549556 a
   --- a/a	Thu Jan 01 00:00:00 1970 +0000
@@ -79,15 +69,6 @@
    a 1
   +a 2
 
-  $ hg --config pager.attend=summary diff -c 2
-  diff -r f4be7687d414 -r bce265549556 a
-  --- a/a	Thu Jan 01 00:00:00 1970 +0000
-  +++ b/a	Thu Jan 01 00:00:00 1970 +0000
-  @@ -1,2 +1,3 @@
-   a
-   a 1
-  +a 2
-
 If 'log' is in attend, then 'history' should also be paged:
   $ hg history --limit 2 --config pager.attend=log
   paged! 'changeset:   10:46106edeeb38\n'
@@ -102,66 +83,22 @@
   paged! 'summary:     modify a 9\n'
   paged! '\n'
 
-Possible bug: history is explicitly ignored in pager config, but
-because log is in the attend list it still gets pager treatment.
-
-  $ hg history --limit 2 --config pager.attend=log \
-  >   --config pager.ignore=history
-  paged! 'changeset:   10:46106edeeb38\n'
-  paged! 'tag:         tip\n'
-  paged! 'user:        test\n'
-  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
-  paged! 'summary:     modify a 10\n'
-  paged! '\n'
-  paged! 'changeset:   9:6dd8ea7dd621\n'
-  paged! 'user:        test\n'
-  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
-  paged! 'summary:     modify a 9\n'
-  paged! '\n'
-
-Possible bug: history is explicitly marked as attend-history=no, but
-it doesn't fail to get paged because log is still in the attend list.
-
-  $ hg history --limit 2 --config pager.attend-history=no
-  paged! 'changeset:   10:46106edeeb38\n'
-  paged! 'tag:         tip\n'
-  paged! 'user:        test\n'
-  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
-  paged! 'summary:     modify a 10\n'
-  paged! '\n'
-  paged! 'changeset:   9:6dd8ea7dd621\n'
-  paged! 'user:        test\n'
-  paged! 'date:        Thu Jan 01 00:00:00 1970 +0000\n'
-  paged! 'summary:     modify a 9\n'
-  paged! '\n'
-
-Possible bug: disabling pager for log but enabling it for history
-doesn't result in history being paged.
-
-  $ hg history --limit 2 --config pager.attend-log=no \
-  > --config pager.attend-history=yes
-  changeset:   10:46106edeeb38
-  tag:         tip
-  user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
-  summary:     modify a 10
-  
-  changeset:   9:6dd8ea7dd621
-  user:        test
-  date:        Thu Jan 01 00:00:00 1970 +0000
-  summary:     modify a 9
-  
-
 Pager should not start if stdout is not a tty.
 
   $ hg log -l1 -q --config ui.formatted=False
   10:46106edeeb38
 
+Pager should be disabled if pager.pager is empty (otherwise the output would
+be silently lost.)
+
+  $ hg log -l1 -q --config pager.pager=
+  10:46106edeeb38
+
 Pager with color enabled allows colors to come through by default,
 even though stdout is no longer a tty.
   $ cat >> $HGRCPATH <<EOF
-  > [extensions]
-  > color=
+  > [ui]
+  > color = yes
   > [color]
   > mode = ansi
   > EOF
@@ -183,6 +120,33 @@
   paged! 'summary:     modify a 8\n'
   paged! '\n'
 
+An invalid pager command name is reported sensibly if we don't have to
+use shell=True in the subprocess call:
+  $ hg log --limit 3 --config pager.pager=this-command-better-never-exist
+  missing pager command 'this-command-better-never-exist', skipping pager
+  \x1b[0;33mchangeset:   10:46106edeeb38\x1b[0m (esc)
+  tag:         tip
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     modify a 10
+  
+  \x1b[0;33mchangeset:   9:6dd8ea7dd621\x1b[0m (esc)
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     modify a 9
+  
+  \x1b[0;33mchangeset:   8:cff05a6312fe\x1b[0m (esc)
+  user:        test
+  date:        Thu Jan 01 00:00:00 1970 +0000
+  summary:     modify a 8
+  
+
+A complicated pager command gets worse behavior. Bonus points if you can
+improve this.
+  $ hg log --limit 3 \
+  >   --config pager.pager='this-command-better-never-exist --seriously' \
+  >  2>/dev/null || true
+
 Pager works with shell aliases.
 
   $ cat >> $HGRCPATH <<EOF
@@ -192,8 +156,9 @@
 
   $ hg echoa
   a
+BROKEN: should be paged
   $ hg --config pager.attend-echoa=yes echoa
-  paged! 'a\n'
+  a
 
 Pager works with hg aliases including environment variables.
 
@@ -207,6 +172,11 @@
   $ A=2 hg --config pager.attend-printa=yes printa
   paged! '2\n'
 
+Something that's explicitly attended is still not paginated if the
+pager is globally set to off using a flag:
+  $ A=2 hg --config pager.attend-printa=yes printa --pager=no
+  2
+
 Pager should not override the exit code of other commands
 
   $ cat >> $TESTTMP/fortytwo.py <<'EOF'
@@ -227,3 +197,87 @@
   $ hg fortytwo --pager=on
   paged! '42\n'
   [42]
+
+A command that asks for paging using ui.pager() directly works:
+  $ hg blame a
+  paged! ' 0: a\n'
+  paged! ' 1: a 1\n'
+  paged! ' 2: a 2\n'
+  paged! ' 3: a 3\n'
+  paged! ' 4: a 4\n'
+  paged! ' 5: a 5\n'
+  paged! ' 6: a 6\n'
+  paged! ' 7: a 7\n'
+  paged! ' 8: a 8\n'
+  paged! ' 9: a 9\n'
+  paged! '10: a 10\n'
+but not with HGPLAIN
+  $ HGPLAIN=1 hg blame a
+   0: a
+   1: a 1
+   2: a 2
+   3: a 3
+   4: a 4
+   5: a 5
+   6: a 6
+   7: a 7
+   8: a 8
+   9: a 9
+  10: a 10
+explicit flags work too:
+  $ hg blame --pager=no a
+   0: a
+   1: a 1
+   2: a 2
+   3: a 3
+   4: a 4
+   5: a 5
+   6: a 6
+   7: a 7
+   8: a 8
+   9: a 9
+  10: a 10
+
+Put annotate in the ignore list for pager:
+  $ cat >> $HGRCPATH <<EOF
+  > [pager]
+  > ignore = annotate
+  > EOF
+  $ hg blame a
+   0: a
+   1: a 1
+   2: a 2
+   3: a 3
+   4: a 4
+   5: a 5
+   6: a 6
+   7: a 7
+   8: a 8
+   9: a 9
+  10: a 10
+
+Environment variables like LESS and LV are set automatically:
+  $ cat > $TESTTMP/printlesslv.py <<EOF
+  > import os, sys
+  > sys.stdin.read()
+  > for name in ['LESS', 'LV']:
+  >     sys.stdout.write(('%s=%s\n') % (name, os.environ.get(name, '-')))
+  > sys.stdout.flush()
+  > EOF
+
+  $ cat >> $HGRCPATH <<EOF
+  > [alias]
+  > noop = log -r 0 -T ''
+  > [ui]
+  > formatted=1
+  > [pager]
+  > pager = $PYTHON $TESTTMP/printlesslv.py
+  > EOF
+  $ unset LESS
+  $ unset LV
+  $ hg noop --pager=on
+  LESS=FRX
+  LV=-c
+  $ LESS=EFGH hg noop --pager=on
+  LESS=EFGH
+  LV=-c
--- a/tests/test-parseindex.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-parseindex.t	Tue Apr 18 12:24:34 2017 -0400
@@ -26,7 +26,7 @@
   summary:     change foo
   
   $ cat >> test.py << EOF
-  > from mercurial import changelog, scmutil
+  > from mercurial import changelog, vfs
   > from mercurial.node import *
   > 
   > class singlebyteread(object):
@@ -42,7 +42,7 @@
   >         return getattr(self.real, key)
   > 
   > def opener(*args):
-  >     o = scmutil.opener(*args)
+  >     o = vfs.vfs(*args)
   >     def wrapper(*a):
   >         f = o(*a)
   >         return singlebyteread(f)
@@ -67,8 +67,8 @@
   $ cd a
 
   $ python <<EOF
-  > from mercurial import changelog, scmutil
-  > cl = changelog.changelog(scmutil.vfs('.hg/store'))
+  > from mercurial import changelog, vfs
+  > cl = changelog.changelog(vfs.vfs('.hg/store'))
   > print 'good heads:'
   > for head in [0, len(cl) - 1, -1]:
   >     print'%s: %r' % (head, cl.reachableroots(0, [head], [0]))
@@ -147,8 +147,8 @@
 
   $ cat <<EOF > test.py
   > import sys
-  > from mercurial import changelog, scmutil
-  > cl = changelog.changelog(scmutil.vfs(sys.argv[1]))
+  > from mercurial import changelog, vfs
+  > cl = changelog.changelog(vfs.vfs(sys.argv[1]))
   > n0, n1 = cl.node(0), cl.node(1)
   > ops = [
   >     ('reachableroots',
--- a/tests/test-patchbomb-tls.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-patchbomb-tls.t	Tue Apr 18 12:24:34 2017 -0400
@@ -7,13 +7,9 @@
 
   $ python "$TESTDIR/dummysmtpd.py" -p $HGPORT --pid-file a.pid -d \
   > --tls smtps --certificate `pwd`/server.pem
-  listening at localhost:$HGPORT
+  listening at localhost:$HGPORT (?)
   $ cat a.pid >> $DAEMON_PIDS
 
-Ensure hg email output is sent to stdout:
-
-  $ unset PAGER
-
 Set up repository:
 
   $ hg init t
--- a/tests/test-patchbomb.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-patchbomb.t	Tue Apr 18 12:24:34 2017 -0400
@@ -28,9 +28,6 @@
   $ echo "[extensions]" >> $HGRCPATH
   $ echo "patchbomb=" >> $HGRCPATH
 
-Ensure hg email output is sent to stdout
-  $ unset PAGER
-
   $ hg init t
   $ cd t
   $ echo a > a
@@ -2371,6 +2368,128 @@
   
   
 
+test flag template:
+  $ echo foo > intro.text
+  $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -r 0:1 \
+  > --desc intro.text --subject test \
+  > --config patchbomb.flagtemplate='R{rev}'
+  this patch series consists of 2 patches.
+  
+  Cc: 
+  
+  displaying [PATCH 0 of 2 R1] test ...
+  Content-Type: text/plain; charset="us-ascii"
+  MIME-Version: 1.0
+  Content-Transfer-Encoding: 7bit
+  Subject: [PATCH 0 of 2 R1] test
+  Message-Id: <patchbomb.60@*> (glob)
+  User-Agent: Mercurial-patchbomb/* (glob)
+  Date: Thu, 01 Jan 1970 00:01:00 +0000
+  From: quux
+  To: foo
+  
+  foo
+  
+  displaying [PATCH 1 of 2 R0] a ...
+  Content-Type: text/plain; charset="us-ascii"
+  MIME-Version: 1.0
+  Content-Transfer-Encoding: 7bit
+  Subject: [PATCH 1 of 2 R0] a
+  X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
+  X-Mercurial-Series-Index: 1
+  X-Mercurial-Series-Total: 2
+  Message-Id: <8580ff50825a50c8f716.61@*> (glob)
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
+  In-Reply-To: <patchbomb.60@*> (glob)
+  References: <patchbomb.60@*> (glob)
+  User-Agent: Mercurial-patchbomb/* (glob)
+  Date: Thu, 01 Jan 1970 00:01:01 +0000
+  From: quux
+  To: foo
+  
+  # HG changeset patch
+  # User test
+  # Date 1 0
+  #      Thu Jan 01 00:00:01 1970 +0000
+  # Node ID 8580ff50825a50c8f716709acdf8de0deddcd6ab
+  # Parent  0000000000000000000000000000000000000000
+  a
+  
+  diff -r 000000000000 -r 8580ff50825a a
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	Thu Jan 01 00:00:01 1970 +0000
+  @@ -0,0 +1,1 @@
+  +a
+  
+  displaying [PATCH 2 of 2 R1] b ...
+  Content-Type: text/plain; charset="us-ascii"
+  MIME-Version: 1.0
+  Content-Transfer-Encoding: 7bit
+  Subject: [PATCH 2 of 2 R1] b
+  X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9
+  X-Mercurial-Series-Index: 2
+  X-Mercurial-Series-Total: 2
+  Message-Id: <97d72e5f12c7e84f8506.62@*> (glob)
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob)
+  In-Reply-To: <patchbomb.60@*> (glob)
+  References: <patchbomb.60@*> (glob)
+  User-Agent: Mercurial-patchbomb/* (glob)
+  Date: Thu, 01 Jan 1970 00:01:02 +0000
+  From: quux
+  To: foo
+  
+  # HG changeset patch
+  # User test
+  # Date 2 0
+  #      Thu Jan 01 00:00:02 1970 +0000
+  # Node ID 97d72e5f12c7e84f85064aa72e5a297142c36ed9
+  # Parent  8580ff50825a50c8f716709acdf8de0deddcd6ab
+  b
+  
+  diff -r 8580ff50825a -r 97d72e5f12c7 b
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/b	Thu Jan 01 00:00:02 1970 +0000
+  @@ -0,0 +1,1 @@
+  +b
+  
+
+test flag template plus --flag:
+  $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -r 0 --flag 'V2' \
+  > --config patchbomb.flagtemplate='{branch} {flags}'
+  this patch series consists of 1 patches.
+  
+  Cc: 
+  
+  displaying [PATCH default V2] a ...
+  Content-Type: text/plain; charset="us-ascii"
+  MIME-Version: 1.0
+  Content-Transfer-Encoding: 7bit
+  Subject: [PATCH default V2] a
+  X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab
+  X-Mercurial-Series-Index: 1
+  X-Mercurial-Series-Total: 1
+  Message-Id: <8580ff50825a50c8f716.60@*> (glob)
+  X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob)
+  User-Agent: Mercurial-patchbomb/* (glob)
+  Date: Thu, 01 Jan 1970 00:01:00 +0000
+  From: quux
+  To: foo
+  
+  # HG changeset patch
+  # User test
+  # Date 1 0
+  #      Thu Jan 01 00:00:01 1970 +0000
+  # Node ID 8580ff50825a50c8f716709acdf8de0deddcd6ab
+  # Parent  0000000000000000000000000000000000000000
+  a
+  
+  diff -r 000000000000 -r 8580ff50825a a
+  --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+  +++ b/a	Thu Jan 01 00:00:01 1970 +0000
+  @@ -0,0 +1,1 @@
+  +a
+  
+
 test multi-byte domain parsing:
   $ UUML=`$PYTHON -c 'import sys; sys.stdout.write("\374")'`
   $ HGENCODING=iso-8859-1
@@ -2727,6 +2846,7 @@
   @@ -0,0 +1,1 @@
   +d
   
+#if no-windows
 
 Set up a fake sendmail program
 
@@ -2907,3 +3027,4 @@
   (use 'hg push $TESTTMP/t3 -r ff2c9fa2018b -r 3b6f1ec9dde9')
   [255]
 
+#endif
--- a/tests/test-phases.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-phases.t	Tue Apr 18 12:24:34 2017 -0400
@@ -479,12 +479,8 @@
   o  0 public A
   
 
-move changeset forward and backward and test kill switch
+move changeset forward and backward
 
-  $ cat <<EOF >> $HGRCPATH
-  > [experimental]
-  > nativephaseskillswitch = true
-  > EOF
   $ hg phase --draft --force 1::4
   $ hg log -G --template "{rev} {phase} {desc}\n"
   @    7 secret merge B' and E
@@ -505,10 +501,6 @@
   
 test partial failure
 
-  $ cat <<EOF >> $HGRCPATH
-  > [experimental]
-  > nativephaseskillswitch = false
-  > EOF
   $ hg phase --public 7
   $ hg phase --draft '5 or 7'
   cannot move 1 changesets to a higher phase, use --force
@@ -590,3 +582,47 @@
   crosschecking files in changesets and manifests
   checking files
   7 files, 8 changesets, 7 total revisions
+
+  $ cd ..
+
+check whether HG_PENDING makes pending changes only in related
+repositories visible to an external hook.
+
+(emulate a transaction running concurrently by copied
+.hg/phaseroots.pending in subsequent test)
+
+  $ cat > $TESTTMP/savepending.sh <<EOF
+  > cp .hg/store/phaseroots.pending  .hg/store/phaseroots.pending.saved
+  > exit 1 # to avoid changing phase for subsequent tests
+  > EOF
+  $ cd push-dest
+  $ hg phase 6
+  6: draft
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" phase -f -s 6
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+  $ cp .hg/store/phaseroots.pending.saved .hg/store/phaseroots.pending
+
+(check (in)visibility of phaseroot while transaction running in repo)
+
+  $ cat > $TESTTMP/checkpending.sh <<EOF
+  > echo '@initialrepo'
+  > hg -R "$TESTTMP/initialrepo" phase 7
+  > echo '@push-dest'
+  > hg -R "$TESTTMP/push-dest" phase 6
+  > exit 1 # to avoid changing phase for subsequent tests
+  > EOF
+  $ cd ../initialrepo
+  $ hg phase 7
+  7: public
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" phase -f -s 7
+  @initialrepo
+  7: secret
+  @push-dest
+  6: draft
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
--- a/tests/test-pull-update.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-pull-update.t	Tue Apr 18 12:24:34 2017 -0400
@@ -16,6 +16,21 @@
   $ echo 1.2 > foo
   $ hg ci -Am m
 
+Should respect config to disable dirty update
+  $ hg co -qC 0
+  $ echo 2 > foo
+  $ hg --config experimental.updatecheck=abort pull -u ../tt
+  pulling from ../tt
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  abort: uncommitted changes
+  [255]
+  $ hg --config extensions.strip= strip --no-backup tip
+  $ hg co -qC tip
+
 Should not update to the other topological branch:
 
   $ hg pull -u ../tt
--- a/tests/test-pull.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-pull.t	Tue Apr 18 12:24:34 2017 -0400
@@ -88,7 +88,11 @@
   abort: file:// URLs can only refer to localhost
   [255]
 
+MSYS changes 'file:' into 'file;'
+
+#if no-msys
   $ hg pull -q file:../test  # no-msys
+#endif
 
 It's tricky to make file:// URLs working on every platform with
 regular shell commands.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-partial-C2.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,82 @@
+====================================
+Testing head checking code: Case C-2
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category C: case were the branch is only partially obsoleted
+TestCase 2: 2 changeset branch, only the base is rewritten
+
+.. old-state:
+..
+.. * 2 changeset branch
+..
+.. new-state:
+..
+.. * 1 new changesets branches superceeding only the base of the old one
+.. * The old branch is still alive (base is obsolete, head is alive)
+..
+.. expected-result:
+..
+.. * push denied
+..
+.. graph-summary:
+..
+..   B ○
+..     |
+..   A ø⇠◔ A'
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir C2
+  $ cd C2
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/C2/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg log -G --hidden
+  @  f6082bc4ffef (draft): A1
+  |
+  | o  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push --rev 'desc(A1)'
+  pushing to $TESTTMP/C2/server (glob)
+  searching for changes
+  abort: push creates new remote head f6082bc4ffef!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-partial-C3.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,82 @@
+====================================
+Testing head checking code: Case C-3
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category C: case were the branch is only partially obsoleted
+TestCase 3: 2 changeset branch, only the head is pruned
+
+.. old-state:
+..
+.. * 2 changeset branch
+..
+.. new-state:
+..
+.. * old head is pruned
+.. * 1 new unrelated branch
+..
+.. expected-result:
+..
+.. * push denied
+..
+.. graph-summary:
+..
+..   B ⊗
+..     |
+..   A ◔ ◔ C
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir C3
+  $ cd C3
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/C3/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit C0
+  created new head
+  $ hg debugobsolete --record-parents `getid "desc(B0)"`
+  $ hg log -G --hidden
+  @  0f88766e02d6 (draft): C0
+  |
+  | x  d73caddc5533 (draft): B0
+  | |
+  | o  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/C3/server (glob)
+  searching for changes
+  abort: push creates new remote head 0f88766e02d6!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-partial-C4.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,82 @@
+====================================
+Testing head checking code: Case C-4
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category C: case were the branch is only partially obsoleted
+TestCase 4: 2 changeset branch, only the base is pruned
+
+.. old-state:
+..
+.. * 2 changeset branch
+..
+.. new-state:
+..
+.. * old base is pruned
+.. * 1 new unrelated branch
+..
+.. expected-result:
+..
+.. * push denied
+..
+.. graph-summary:
+..
+..   B ◔
+..     |
+..   A ⊗ ◔ C
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir C4
+  $ cd C4
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/C4/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit C0
+  created new head
+  $ hg debugobsolete --record-parents `getid "desc(A0)"`
+  $ hg log -G --hidden
+  @  0f88766e02d6 (draft): C0
+  |
+  | o  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push --rev 'desc(C0)'
+  pushing to $TESTTMP/C4/server (glob)
+  searching for changes
+  abort: push creates new remote head 0f88766e02d6!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-pruned-B1.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,72 @@
+====================================
+Testing head checking code: Case B-1
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category B: simple case involving pruned changesets
+TestCase 1: single pruned changeset
+
+.. old-state:
+..
+.. * 1 changeset branch
+..
+.. new-state:
+..
+.. * old branch is pruned
+.. * 1 new unrelated branch
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..       ◔ B
+..       |
+..   A ⊗ |
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir B1
+  $ cd B1
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd client
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B0
+  created new head
+  $ hg debugobsolete --record-parents `getid "desc(A0)"`
+  $ hg log -G --hidden
+  @  74ff5441d343 (draft): B0
+  |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/B1/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  1 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-pruned-B2.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,85 @@
+====================================
+Testing head checking code: Case B-2
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category B: simple case involving pruned changesets
+TestCase 2: multi-changeset branch, head is pruned, rest is superceeded
+
+.. old-state:
+..
+.. * 2 changeset branch
+..
+.. new-state:
+..
+.. * old head is pruned
+.. * 1 new branch succeeding to the other changeset in the old branch
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   B ⊗
+..     |
+..   A ø⇠◔ A'
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir B2
+  $ cd B2
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/B2/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg debugobsolete --record-parents `getid "desc(B0)"`
+  $ hg log -G --hidden
+  @  f6082bc4ffef (draft): A1
+  |
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/B2/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  2 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-pruned-B3.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,86 @@
+====================================
+Testing head checking code: Case B-3
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category B: simple case involving pruned changesets
+TestCase 3: multi-changeset branch, other is pruned, rest is superceeded
+
+.. old-state:
+..
+.. * 2 changeset branch
+..
+.. new-state:
+..
+.. * old head is superceeded
+.. * old other is pruned
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   B ø⇠◔ B'
+..     | |
+..   A ⊗ |
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir B3
+  $ cd B3
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/B3/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B1
+  created new head
+  $ hg debugobsolete --record-parents `getid "desc(A0)"`
+  $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  $ hg log -G --hidden
+  @  25c56d33e4c4 (draft): B1
+  |
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/B3/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  2 new obsolescence markers
+
+  $ cd ../..
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-pruned-B4.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,86 @@
+====================================
+Testing head checking code: Case B-4
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category B: simple case involving pruned changesets
+TestCase 4: multi-changeset branch, all are pruned
+
+.. old-state:
+..
+.. * 2 changeset branch
+..
+.. new-state:
+..
+.. * old branch is pruned
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   B ⊗
+..     |
+..   A ⊗
+..     |
+..     | ◔ C
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir B4
+  $ cd B4
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/B4/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit C0
+  created new head
+  $ hg debugobsolete --record-parents `getid "desc(A0)"`
+  $ hg debugobsolete --record-parents `getid "desc(B0)"`
+  $ hg log -G --hidden
+  @  0f88766e02d6 (draft): C0
+  |
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/B4/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  2 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-pruned-B5.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,92 @@
+====================================
+Testing head checking code: Case B-5
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category B: simple case involving pruned changesets
+TestCase 5: multi-changeset branch, mix of pruned and superceeded
+
+.. old-state:
+..
+.. * 3 changeset branch
+..
+.. new-state:
+..
+.. * old head is pruned
+.. * old mid is superceeded
+.. * old root is pruned
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   B ⊗
+..     |
+..   A ø⇠◔ A'
+..     | |
+..   B ⊗ |
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir B5
+  $ cd B5
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ mkcommit C0
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/B5/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B1
+  created new head
+  $ hg debugobsolete --record-parents `getid "desc(A0)"`
+  $ hg debugobsolete `getid "desc(B0)"` `getid "desc(B1)"`
+  $ hg debugobsolete --record-parents `getid "desc(C0)"`
+  $ hg log -G --hidden
+  @  25c56d33e4c4 (draft): B1
+  |
+  | x  821fb21d0dd2 (draft): C0
+  | |
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/B5/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  3 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-pruned-B6.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,78 @@
+====================================
+Testing head checking code: Case B-6
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category B: simple case involving pruned changesets
+TestCase 6: single changesets, pruned then superseeded (on a new changeset)
+
+.. old-state:
+..
+.. * 1 changeset branch
+..
+.. new-state:
+..
+.. * old branch is rewritten onto another one,
+.. * the new version is then pruned.
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   A ø⇠⊗ A'
+..     | |
+..     | ◔ B
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir B6
+  $ cd B6
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd client
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B0
+  created new head
+  $ mkcommit A1
+  $ hg up 'desc(B0)'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
+  $ hg debugobsolete --record-parents `getid "desc(A1)"`
+  $ hg log -G --hidden
+  x  ba93660aff8d (draft): A1
+  |
+  @  74ff5441d343 (draft): B0
+  |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/B6/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  2 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-pruned-B7.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,77 @@
+====================================
+Testing head checking code: Case B-7
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category B: simple case involving pruned changesets
+TestCase 7: single changesets, pruned then superseeded (on an existing changeset)
+
+.. old-state:
+..
+.. * 1 changeset branch
+..
+.. new-state:
+..
+.. * old branch is rewritten onto the common set,
+.. * the new version is then pruned.
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   A ø⇠⊗ A'
+.. B ◔ | |
+..    \|/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir B7
+  $ cd B7
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd client
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B0
+  created new head
+  $ mkcommit A1
+  $ hg up 'desc(B0)'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
+  $ hg debugobsolete --record-parents `getid "desc(A1)"`
+  $ hg log -G --hidden
+  x  ba93660aff8d (draft): A1
+  |
+  @  74ff5441d343 (draft): B0
+  |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/B7/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  2 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-pruned-B8.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,98 @@
+====================================
+Testing head checking code: Case B-2
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category B: simple case involving pruned changesets
+TestCase 2: multi-changeset branch, head is pruned, rest is superceeded, through other
+
+.. old-state:
+..
+.. * 2 changeset branch
+..
+.. new-state:
+..
+.. * old head is rewritten then pruned
+.. * 1 new branch succeeding to the other changeset in the old branch (through another obsolete branch)
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   B ø⇠⊗ B'
+..     | | A'
+..   A ø⇠ø⇠◔ A''
+..     |/ /
+..     | /
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir B8
+  $ cd B8
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/B8/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ mkcommit B1
+  $ hg up 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit A2
+  created new head
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  $ hg debugobsolete --record-parents `getid "desc(B1)"`
+  $ hg debugobsolete `getid "desc(A1)" ` `getid "desc(A2)"`
+  $ hg log -G --hidden
+  @  c1f8d089020f (draft): A2
+  |
+  | x  262c8c798096 (draft): B1
+  | |
+  | x  f6082bc4ffef (draft): A1
+  |/
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/B8/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  4 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-superceed-A1.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,69 @@
+====================================
+Testing head checking code: Case A-1
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: simple case involving a branch being superceeded by another.
+TestCase 1: single-changeset branch
+
+.. old-state:
+..
+.. * 1 changeset branch
+..
+.. new-state:
+..
+.. * 1 changeset branch succeeding to A
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   A ø⇠◔ A'
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir A1
+  $ cd A1
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd client
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg log -G --hidden
+  @  f6082bc4ffef (draft): A1
+  |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/A1/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  1 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-superceed-A2.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,87 @@
+====================================
+Testing head checking code: Case A-2
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: simple case involving a branch being superceeded by another.
+TestCase 2: multi-changeset branch
+
+.. old-state:
+..
+.. * 1 branch with 2 changesets
+..
+.. new-state:
+..
+.. * another 2-changeset branch succeeding the old one
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   B ø⇠◔ B'
+..     | |
+..   A ø⇠◔ A'
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir A2
+  $ cd A2
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/A2/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ mkcommit B1
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  $ hg log -G --hidden
+  @  262c8c798096 (draft): B1
+  |
+  o  f6082bc4ffef (draft): A1
+  |
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/A2/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  2 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-superceed-A3.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,90 @@
+====================================
+Testing head checking code: Case A-3
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: simple case involving a branch being superceeded by another.
+TestCase 3: multi-changeset branch with reordering
+
+Push should be allowed
+.. old-state:
+..
+.. * 2 changeset branch
+..
+.. new-state:
+..
+.. * 2 changeset branch succeeding the old one with reordering
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   B ø⇠⇠
+..     | ⇡
+..   A ø⇠⇠⇠○ A'
+..     | ⇡/
+..     | ○ B'
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir A3
+  $ cd A3
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/A3/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B1
+  created new head
+  $ mkcommit A1
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  $ hg log -G --hidden
+  @  c1c7524e9488 (draft): A1
+  |
+  o  25c56d33e4c4 (draft): B1
+  |
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/A3/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  2 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-superceed-A4.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,74 @@
+====================================
+Testing head checking code: Case A-4
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: simple case involving a branch being superceeded by another.
+TestCase 4: New changeset as children of the successor
+
+.. old-state:
+..
+.. * 1-changeset branch
+..
+.. new-state:
+..
+.. * 2-changeset branch, first is a successor, but head is new
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..       ◔ B
+..       |
+..   A ø⇠◔ A'
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir A4
+  $ cd A4
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd client
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ mkcommit B0
+  $ hg log -G --hidden
+  @  f40ded968333 (draft): B0
+  |
+  o  f6082bc4ffef (draft): A1
+  |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/A4/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  1 new obsolescence markers
+
+  $ cd ../../
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-superceed-A5.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,75 @@
+====================================
+Testing head checking code: Case A-5
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: simple case involving a branch being superceeded by another.
+TestCase 5: New changeset as parent of the successor
+
+.. old-state:
+..
+.. * 1-changeset branch
+..
+.. new-state:
+..
+.. * 2rchangeset branch, head is a successor, but other is new
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   A ø⇠◔ A'
+..     | |
+..     | ◔ B
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir A5
+  $ cd A5
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd client
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B0
+  created new head
+  $ mkcommit A1
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg log -G --hidden
+  @  ba93660aff8d (draft): A1
+  |
+  o  74ff5441d343 (draft): B0
+  |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/A5/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  1 new obsolescence markers
+
+  $ cd ../..
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-superceed-A6.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,98 @@
+====================================
+Testing head checking code: Case A-6
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: simple case involving a branch being superceeded by another.
+TestCase 6: multi-changeset branch, split on multiple other, (base on its own branch), same number of head
+
+.. old-state:
+..
+.. * 2 branch (1-changeset, and 2-changesets)
+..
+.. new-state:
+..
+.. * 1 new branch superceeding the base of the old-2-changesets-branch,
+.. * 1 new changesets on the old-1-changeset-branch superceeding the head of the other
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+.. B'◔⇢ø B
+..   | |
+.. A | ø⇠◔ A'
+..   | |/
+.. C ● |
+..    \|
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir A6
+  $ cd A6
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ hg up 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit C0
+  created new head
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/A6/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ hg up 'desc(C0)'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B1
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  $ hg log -G --hidden
+  @  d70a1f75a020 (draft): B1
+  |
+  | o  f6082bc4ffef (draft): A1
+  | |
+  o |  0f88766e02d6 (draft): C0
+  |/
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/A6/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  2 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-superceed-A7.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,98 @@
+====================================
+Testing head checking code: Case A-7
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: simple case involving a branch being superceeded by another.
+TestCase 7: multi-changeset branch, split on multiple other, (head on its own branch), same number of head
+
+.. old-state:
+..
+.. * 2 branch (1-changeset, and 2-changesets)
+..
+.. new-state:
+..
+.. * 1 new branch superceeding the head of the old-2-changesets-branch,
+.. * 1 new changesets on the old-1-changeset-branch superceeding the base of the other
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..   B ø⇠◔ B'
+..     | |
+.. A'◔⇢ø |
+..   | |/
+.. C ● |
+..    \|
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir A7
+  $ cd A7
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ hg up 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit C0
+  created new head
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/A7/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+  $ hg up 'desc(C0)'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  $ hg up 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit B1
+  created new head
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  $ hg log -G --hidden
+  @  25c56d33e4c4 (draft): B1
+  |
+  | o  a0802eb7fc1b (draft): A1
+  | |
+  | o  0f88766e02d6 (draft): C0
+  |/
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/A7/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  2 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-superceed-A8.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,79 @@
+====================================
+Testing head checking code: Case A-8
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category A: simple case involving a branch being superceeded by another.
+TestCase 8: single-changeset branch indirect rewrite
+
+.. old-state:
+..
+.. * 1-changeset branch
+..
+.. new-state:
+..
+.. * 1-changeset branch succeeding to A, through another unpushed changesets
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..       A'
+..   A ø⇠ø⇠◔ A''
+..     |/ /
+..     | /
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir A8
+  $ cd A8
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd client
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A2
+  created new head
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg debugobsolete `getid "desc(A1)" ` `getid "desc(A2)"`
+  $ hg log -G --hidden
+  @  c1f8d089020f (draft): A2
+  |
+  | x  f6082bc4ffef (draft): A1
+  |/
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push
+  pushing to $TESTTMP/A8/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  2 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-unpushed-D1.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,77 @@
+====================================
+Testing head checking code: Case D-1
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category D: remote head is "obs-affected" locally, but result is not part of the push
+TestCase 1: remote head is rewritten, but successors is not part of the push
+
+.. old-state:
+..
+.. * 1 changeset branch
+..
+.. new-state:
+..
+.. * 1 changeset branch succeeding the old branch
+.. * 1 new unrelated branch
+..
+.. expected-result:
+..
+.. * pushing only the unrelated branch: denied
+..
+.. graph-summary:
+..
+..   A ø⇠○ A'
+..     |/
+..     | ◔ B
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir D1
+  $ cd D1
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd client
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B0
+  created new head
+  $ hg log -G --hidden
+  @  74ff5441d343 (draft): B0
+  |
+  | o  f6082bc4ffef (draft): A1
+  |/
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push -r 'desc(B0)'
+  pushing to $TESTTMP/D1/server (glob)
+  searching for changes
+  abort: push creates new remote head 74ff5441d343!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+
+  $ cd ../..
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-unpushed-D2.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,93 @@
+====================================
+Testing head checking code: Case D-2
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category D: remote head is "obs-affected" locally, but result is not part of the push
+TestCase 1: remote branch has 2 changes, head is pruned, other is rewritten but result is not pushed
+
+.. old-state:
+..
+.. * 1 changeset branch
+..
+.. new-state:
+..
+.. * old head is pruned
+.. * 1 new branch succeeding to the other changeset in the old branch
+.. * 1 new unrelated branch
+..
+.. expected-result:
+..
+.. * push allowed
+.. * pushing only the unrelated branch: denied
+..
+.. graph-summary:
+..
+..   B ⊗
+..     |
+..   A ø⇠○ A'
+..     |/
+..     | ◔ C
+..     |/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir D2
+  $ cd D2
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/D2/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg debugobsolete --record-parents `getid "desc(B0)"`
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit C0
+  created new head
+  $ hg log -G --hidden
+  @  0f88766e02d6 (draft): C0
+  |
+  | o  f6082bc4ffef (draft): A1
+  |/
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push --rev 'desc(C0)'
+  pushing to $TESTTMP/D2/server (glob)
+  searching for changes
+  abort: push creates new remote head 0f88766e02d6!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-unpushed-D3.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,110 @@
+====================================
+Testing head checking code: Case D-3
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category D: remote head is "obs-affected" locally, but result is not part of the push
+TestCase 3: multi-changeset branch, split on multiple new others, only one of them is pushed
+
+.. old-state:
+..
+.. * 2 changesets branch
+..
+.. new-state:
+..
+.. * 2 new branches, each superseding one changeset in the old one.
+..
+.. expected-result:
+..
+.. * pushing only one of the resulting branch (either of them)
+.. * push denied
+..
+.. graph-summary:
+..
+.. B'◔⇢ø B
+..   | |
+.. A | ø⇠◔ A'
+..   | |/
+..    \|
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir D3
+  $ cd D3
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ hg up 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/D3/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  (run 'hg update' to get a working copy)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ hg up '0'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B1
+  created new head
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  $ hg log -G --hidden
+  @  25c56d33e4c4 (draft): B1
+  |
+  | o  f6082bc4ffef (draft): A1
+  |/
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push --rev 'desc(A1)'
+  pushing to $TESTTMP/D3/server (glob)
+  searching for changes
+  abort: push creates new remote head f6082bc4ffef!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+  $ hg push --rev 'desc(B1)'
+  pushing to $TESTTMP/D3/server (glob)
+  searching for changes
+  abort: push creates new remote head 25c56d33e4c4!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+
+Extra testing
+-------------
+
+In this case, even a bare push is creating more heads
+
+  $ hg push
+  pushing to $TESTTMP/D3/server (glob)
+  searching for changes
+  abort: push creates new remote head 25c56d33e4c4!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-unpushed-D4.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,122 @@
+====================================
+Testing head checking code: Case D-4
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category D: remote head is "obs-affected" locally, but result is not part of the push
+TestCase 4: multi-changeset branch, split on multiple other, (base on its own new branch)
+
+.. old-state:
+..
+.. * 2 branch (1 changeset, and 2 changesets)
+..
+.. new-state:
+..
+.. * 1 new branch superceeding the base of the old-2-changesets-branch,
+.. * 1 new changesets on the old-1-changeset-branch superceeding the head of the other
+..
+.. expected-result:
+..
+.. * push the new branch only -> push denied (variant a)
+.. * push the existing branch only -> push allowed (variant b)
+.. (pushing all is tested as case A-7)
+..
+.. graph-summary:
+..
+.. (variant a)
+..
+.. B'○⇢ø B
+..   | |
+.. A | ø⇠◔ A'
+..   | |/
+.. C ● |
+..    \|
+..     ●
+..
+.. or (variant b)
+..
+.. B'◔⇢ø B
+..   | |
+.. A | ø⇠○ A'
+..   | |/
+.. C ● |
+..    \|
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir D4
+  $ cd D4
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ hg up 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit C0
+  created new head
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/D4/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  created new head
+  $ hg up 'desc(C0)'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B1
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  $ hg log -G --hidden
+  @  d70a1f75a020 (draft): B1
+  |
+  | o  f6082bc4ffef (draft): A1
+  | |
+  o |  0f88766e02d6 (draft): C0
+  |/
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing (new branch only)
+--------------------------------
+
+  $ hg push --rev 'desc(A1)'
+  pushing to $TESTTMP/D4/server (glob)
+  searching for changes
+  abort: push creates new remote head f6082bc4ffef!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+
+Actual testing (existing branch only)
+------------------------------------
+
+  $ hg push --rev 'desc(B1)'
+  pushing to $TESTTMP/D4/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  1 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-unpushed-D5.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,107 @@
+====================================
+Testing head checking code: Case D-5
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category D: remote head is "obs-affected" locally, but result is not part of the push
+TestCase 5: multi-changeset branch, split on multiple other, (head on its own new branch)
+
+.. old-state:
+..
+.. * 2 branch (1 changeset, and 2 changesets)
+..
+.. new-state:
+..
+.. * 1 new branch superceeding the head of the old-2-changesets-branch,
+.. * 1 new changesets on the old-1-changeset-branch superceeding the base of the other
+..
+.. expected-result:
+..
+.. * push the new branch only -> push denied
+.. * push the existing branch only -> push allowed
+..   /!\ This push create unstability/orphaning on the other hand and we should
+..  probably detect/warn agains that.
+..
+.. graph-summary:
+..
+..   B ø⇠◔ B'
+..     | |
+.. A'◔⇢ø |
+..   | |/
+.. C ● |
+..    \|
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir D5
+  $ cd D5
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd server
+  $ mkcommit B0
+  $ hg up 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit C0
+  created new head
+  $ cd ../client
+  $ hg pull
+  pulling from $TESTTMP/D5/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files (+1 heads)
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+  $ hg up 'desc(C0)'
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit A1
+  $ hg up 0
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit B1
+  created new head
+  $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
+  $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
+  $ hg log -G --hidden
+  @  25c56d33e4c4 (draft): B1
+  |
+  | o  a0802eb7fc1b (draft): A1
+  | |
+  | o  0f88766e02d6 (draft): C0
+  |/
+  | x  d73caddc5533 (draft): B0
+  | |
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push --rev 'desc(B1)'
+  pushing to $TESTTMP/D5/server (glob)
+  searching for changes
+  abort: push creates new remote head 25c56d33e4c4!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+  $ hg push --rev 'desc(A1)'
+  pushing to $TESTTMP/D5/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  1 new obsolescence markers
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-unpushed-D6.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,82 @@
+====================================
+Testing head checking code: Case D-6
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category D: remote head is "obs-affected" locally, but result is not part of the push
+TestCase 6: single changeset, superseeded then pruned (on a new changeset unpushed) changeset
+
+This is a partial push variation of case B-6
+
+.. old-state:
+..
+.. * 1 changeset branch
+..
+.. new-state:
+..
+.. * old branch is rewritten onto another one,
+.. * the new version is then pruned.
+..
+.. expected-result:
+..
+.. * push denied
+..
+.. graph-summary:
+..
+..   A ø⇠⊗ A'
+..     | |
+.. C ◔ | ○ B
+..    \|/
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir D6
+  $ cd D6
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd client
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B0
+  created new head
+  $ mkcommit A1
+  $ hg up '0'
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit C0
+  created new head
+  $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
+  $ hg debugobsolete --record-parents `getid "desc(A1)"`
+  $ hg log -G --hidden
+  @  0f88766e02d6 (draft): C0
+  |
+  | x  ba93660aff8d (draft): A1
+  | |
+  | o  74ff5441d343 (draft): B0
+  |/
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push --rev 'desc(C0)'
+  pushing to $TESTTMP/D6/server (glob)
+  searching for changes
+  abort: push creates new remote head 0f88766e02d6!
+  (merge or see 'hg help push' for details about pushing new heads)
+  [255]
+
+  $ cd ../..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-push-checkheads-unpushed-D7.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,96 @@
+====================================
+Testing head checking code: Case D-7
+====================================
+
+Mercurial checks for the introduction of new heads on push. Evolution comes
+into play to detect if existing branches on the server are being replaced by
+some of the new one we push.
+
+This case is part of a series of tests checking this behavior.
+
+Category D: remote head is "obs-affected" locally, but result is not part of the push
+TestCase 7: single changesets, superseeded multiple time then pruned (on a new changeset unpushed) changeset
+
+This is a partial push variation of B6
+
+.. old-state:
+..
+.. * 1 changeset branch
+..
+.. new-state:
+..
+.. * old branch is rewritten onto another one,
+.. * The rewriting it again rewritten on the root
+.. * the new version is then pruned.
+..
+.. expected-result:
+..
+.. * push allowed
+..
+.. graph-summary:
+..
+..       A'
+..   A ø⇠ø⇠⊗ A''
+..     | | |
+.. C ◔ | ◔ | B
+..    \|/ /
+..     | /
+..     |/
+..     |
+..     ●
+
+  $ . $TESTDIR/testlib/push-checkheads-util.sh
+
+Test setup
+----------
+
+  $ mkdir D7
+  $ cd D7
+  $ setuprepos
+  creating basic server and client repo
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd client
+  $ hg up 0
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit B0
+  created new head
+  $ mkcommit A1
+  $ hg up '0'
+  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ mkcommit A2
+  created new head
+  $ hg up '0'
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ mkcommit C0
+  created new head
+  $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"`
+  $ hg debugobsolete `getid "desc(A1)"` `getid "desc(A2)"`
+  $ hg debugobsolete --record-parents `getid "desc(A2)"`
+  $ hg log -G --hidden
+  @  0f88766e02d6 (draft): C0
+  |
+  | x  c1f8d089020f (draft): A2
+  |/
+  | x  ba93660aff8d (draft): A1
+  | |
+  | o  74ff5441d343 (draft): B0
+  |/
+  | x  8aaa48160adc (draft): A0
+  |/
+  o  1e4be0697311 (public): root
+  
+
+Actual testing
+--------------
+
+  $ hg push --rev 'desc(C0)'
+  pushing to $TESTTMP/D7/server (glob)
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files (+1 heads)
+  3 new obsolescence markers
+
+  $ cd ../..
--- a/tests/test-push-http-bundle1.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-push-http-bundle1.t	Tue Apr 18 12:24:34 2017 -0400
@@ -79,7 +79,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -95,7 +95,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -111,7 +111,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
--- a/tests/test-push-http.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-push-http.t	Tue Apr 18 12:24:34 2017 -0400
@@ -69,8 +69,8 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
-  remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: pushkey hook: HG_HOOKNAME=pushkey HG_HOOKTYPE=pushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
+  remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -86,8 +86,8 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
-  remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: pushkey hook: HG_HOOKNAME=pushkey HG_HOOKTYPE=pushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
+  remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -103,8 +103,8 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: pushkey hook: HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
-  remote: changegroup hook: HG_BUNDLE2=1 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: pushkey hook: HG_HOOKNAME=pushkey HG_HOOKTYPE=pushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
+  remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
@@ -125,7 +125,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: prepushkey hook: HG_BUNDLE2=1 HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
   remote: pushkey-abort: prepushkey hook exited with status 1
   remote: transaction abort!
   remote: rollback completed
@@ -145,7 +145,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files
-  remote: prepushkey hook: HG_BUNDLE2=1 HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:http:127.0.0.1: (glob)
+  remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob)
   % serve errors
   $ hg rollback
   repository tip rolled back to revision 0 (undo serve)
--- a/tests/test-qrecord.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-qrecord.t	Tue Apr 18 12:24:34 2017 -0400
@@ -239,6 +239,12 @@
   $ hg qrecord .hg
   abort: patch name cannot begin with ".hg"
   [255]
+  $ hg qrecord ' foo'
+  abort: patch name cannot begin or end with whitespace
+  [255]
+  $ hg qrecord 'foo '
+  abort: patch name cannot begin or end with whitespace
+  [255]
 
 qrecord a.patch
 
--- a/tests/test-rebase-abort.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-rebase-abort.t	Tue Apr 18 12:24:34 2017 -0400
@@ -374,10 +374,11 @@
   $ hg --config extensions.n=$TESTDIR/failfilemerge.py rebase -s 3 -d tip
   rebasing 3:3a71550954f1 "b"
   rebasing 4:e80b69427d80 "c"
+  transaction abort!
+  rollback completed
   abort: ^C
   [255]
   $ hg rebase --abort
-  saved backup bundle to $TESTTMP/interrupted/.hg/strip-backup/3d8812cf300d-93041a90-backup.hg (glob)
   rebase aborted
   $ hg log -G --template "{rev} {desc} {bookmarks}"
   o  6 no-a
@@ -398,7 +399,7 @@
   parent: 0:df4f53cec30a 
    base
   branch: default
-  commit: (clean)
+  commit: 1 unknown (clean)
   update: 6 new changesets (update)
   phases: 7 draft
 
--- a/tests/test-rebase-collapse.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-rebase-collapse.t	Tue Apr 18 12:24:34 2017 -0400
@@ -572,6 +572,8 @@
   o  0: 'A'
   
   $ hg rebase --keepbranches --collapse -s 1 -d 3
+  transaction abort!
+  rollback completed
   abort: cannot collapse multiple named branches
   [255]
 
--- a/tests/test-rebase-conflicts.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-rebase-conflicts.t	Tue Apr 18 12:24:34 2017 -0400
@@ -218,13 +218,13 @@
   
   $ hg rebase -s9 -d2 --debug # use debug to really check merge base used
   rebase onto 4bc80088dc6b starting from e31216eec445
+  rebase status stored
   ignoring null merge rebase of 3
   ignoring null merge rebase of 4
   ignoring null merge rebase of 6
   ignoring null merge rebase of 8
   rebasing 9:e31216eec445 "more changes to f1"
    future parents are 2 and -1
-  rebase status stored
    update to 2:4bc80088dc6b
   resolving manifests
    branchmerge: False, force: True, partial: False
@@ -250,7 +250,6 @@
   rebased as 19c888675e13
   rebasing 10:2f2496ddf49d "merge" (tip)
    future parents are 11 and 7
-  rebase status stored
    already in target
    merge against 10:2f2496ddf49d
      detach base 9:e31216eec445
@@ -268,6 +267,7 @@
   committing changelog
   rebased as 2a7f09cac94c
   rebase merging completed
+  rebase status stored
   update back to initial working directory parent
   resolving manifests
    branchmerge: False, force: False, partial: False
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rebase-dest.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,78 @@
+Require a destination
+  $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > rebase =
+  > [commands]
+  > rebase.requiredest = True
+  > EOF
+  $ hg init repo
+  $ cd repo
+  $ echo a >> a
+  $ hg commit -qAm aa
+  $ echo b >> b
+  $ hg commit -qAm bb
+  $ hg up ".^"
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo c >> c
+  $ hg commit -qAm cc
+  $ hg rebase
+  abort: you must specify a destination
+  (use: hg rebase -d REV)
+  [255]
+  $ hg rebase -d 1
+  rebasing 2:5db65b93a12b "cc" (tip)
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/5db65b93a12b-4fb789ec-backup.hg (glob)
+  $ hg rebase -d 0 -r . -q
+  $ HGPLAIN=1 hg rebase
+  rebasing 2:889b0bc6a730 "cc" (tip)
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/889b0bc6a730-41ec4f81-backup.hg (glob)
+  $ hg rebase -d 0 -r . -q
+  $ hg --config commands.rebase.requiredest=False rebase
+  rebasing 2:279de9495438 "cc" (tip)
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/279de9495438-ab0a5128-backup.hg (glob)
+
+Requiring dest should not break continue or other rebase options
+  $ hg up 1 -q
+  $ echo d >> c
+  $ hg commit -qAm dc
+  $ hg log -G -T '{rev} {desc}'
+  @  3 dc
+  |
+  | o  2 cc
+  |/
+  o  1 bb
+  |
+  o  0 aa
+  
+  $ hg rebase -d 2
+  rebasing 3:0537f6b50def "dc" (tip)
+  merging c
+  warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
+  unresolved conflicts (see hg resolve, then hg rebase --continue)
+  [1]
+  $ echo d > c
+  $ hg resolve --mark --all
+  (no more unresolved files)
+  continue: hg rebase --continue
+  $ hg rebase --continue
+  rebasing 3:0537f6b50def "dc" (tip)
+  saved backup bundle to $TESTTMP/repo/.hg/strip-backup/0537f6b50def-be4c7386-backup.hg (glob)
+
+  $ cd ..
+
+Check rebase.requiredest interaction with pull --rebase
+  $ hg clone repo clone
+  updating to branch default
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd repo
+  $ echo e > e
+  $ hg commit -qAm ee
+  $ cd ..
+  $ cd clone
+  $ echo f > f
+  $ hg commit -qAm ff
+  $ hg pull --rebase
+  abort: rebase destination required by configuration
+  (use hg pull followed by hg rebase -d DEST)
+  [255]
+
--- a/tests/test-rebase-named-branches.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-rebase-named-branches.t	Tue Apr 18 12:24:34 2017 -0400
@@ -387,4 +387,23 @@
   o  0: '0'
   
 
+  $ hg up -cr 1
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg branch x
+  marked working directory as branch x
+  $ hg rebase -r 3:: -d .
+  rebasing 3:76abc1c6f8c7 "b1"
+  rebasing 4:8427af5d86f2 "c2 closed" (tip)
+  note: rebase of 4:8427af5d86f2 created no changes to commit
+  saved backup bundle to $TESTTMP/case2/.hg/strip-backup/76abc1c6f8c7-cd698d13-backup.hg (glob)
+  $ hg tglog
+  o  3: 'b1' x
+  |
+  | o  2: 'c1' c
+  | |
+  @ |  1: 'b2' b
+  |/
+  o  0: '0'
+  
+
   $ cd ..
--- a/tests/test-rebase-obsolete.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-rebase-obsolete.t	Tue Apr 18 12:24:34 2017 -0400
@@ -175,7 +175,7 @@
   32af7686d403cf45b5d95f2d70cebea587ac806a 0 {5fddd98957c8a54a4d436dfe1da9d87f21a1b97b} (*) {'user': 'test'} (glob)
 
 
-More complex case were part of the rebase set were already rebased
+More complex case where part of the rebase set were already rebased
 
   $ hg rebase --rev 'desc(D)' --dest 'desc(H)'
   rebasing 9:08483444fef9 "D"
@@ -272,6 +272,35 @@
   D
   
   
+Start rebase from a commit that is obsolete but not hidden only because it's
+a working copy parent. We should be moved back to the starting commit as usual
+even though it is hidden (until we're moved there).
+
+  $ hg --hidden up -qr 'first(hidden())'
+  $ hg rebase --rev 13 --dest 15
+  rebasing 13:98f6af4ee953 "C"
+  $ hg log -G
+  o  16:294a2b93eb4d C
+  |
+  o  15:627d46148090 D
+  |
+  | o  12:462a34d07e59 B
+  | |
+  | o  11:4596109a6a43 D
+  | |
+  | o  7:02de42196ebe H
+  | |
+  +---o  6:eea13746799a G
+  | |/
+  | o  5:24b6387c8c8c F
+  | |
+  o |  4:9520eea781bc E
+  |/
+  | @  1:42ccdea3bb16 B
+  |/
+  o  0:cd010b8cd998 A
+  
+
   $ cd ..
 
 collapse rebase
--- a/tests/test-rebase-scenario-global.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-rebase-scenario-global.t	Tue Apr 18 12:24:34 2017 -0400
@@ -270,9 +270,12 @@
 
   $ hg rebase -s 6 -d 1
   rebasing 6:eea13746799a "G"
+  transaction abort!
+  rollback completed
   abort: cannot use revision 6 as base, result would have 3 parents
   [255]
-
+  $ hg rebase --abort
+  rebase aborted
 
 These will abort gracefully (using --base):
 
@@ -328,6 +331,10 @@
   abort: can't rebase public changeset e1c4361dd923
   (see 'hg help phases' for details)
   [255]
+  $ hg rebase -d 5 -r '1 + (6::)'
+  abort: can't rebase public changeset e1c4361dd923
+  (see 'hg help phases' for details)
+  [255]
 
   $ hg rebase -d 5 -b 6 --keep
   rebasing 6:e1c4361dd923 "C"
@@ -773,7 +780,7 @@
 Get back to the root of cwd-vanish. Note that even though `cd ..`
 works on most systems, it does not work on FreeBSD 10, so we use an
 absolute path to get back to the repository.
-  $ cd $TESTTMP/cwd-vanish
+  $ cd $TESTTMP
 
 Test that rebase is done in topo order (issue5370)
 
@@ -819,7 +826,7 @@
   rebasing 4:82ae8dc7a9b7 "E"
   rebasing 3:ab709c9f7171 "D"
   rebasing 5:412b391de760 "F"
-  saved backup bundle to $TESTTMP/cwd-vanish/order/.hg/strip-backup/76035bbd54bd-e341bc99-backup.hg (glob)
+  saved backup bundle to $TESTTMP/order/.hg/strip-backup/76035bbd54bd-e341bc99-backup.hg (glob)
 
   $ hg tglog
   o  6: 'F'
@@ -840,7 +847,7 @@
 Test experimental revset
 ========================
 
-  $ cd ..
+  $ cd ../cwd-vanish
 
 Make the repo a bit more interesting
 
--- a/tests/test-revert-interactive.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-revert-interactive.t	Tue Apr 18 12:24:34 2017 -0400
@@ -380,29 +380,29 @@
   3 hunks, 3 lines changed
   examine changes to 'folder1/g'? [Ynesfdaq?] y
   
-  @@ -1,5 +1,4 @@
-  -firstline
+  @@ -1,4 +1,5 @@
+  +firstline
    c
    1
    2
    3
   discard change 1/3 to 'folder1/g'? [Ynesfdaq?] y
   
-  @@ -2,7 +1,7 @@
+  @@ -1,7 +2,7 @@
    c
    1
    2
    3
-  - 3
-  +4
+  -4
+  + 3
    5
    d
   discard change 2/3 to 'folder1/g'? [Ynesfdaq?] y
   
-  @@ -7,3 +6,2 @@
+  @@ -6,2 +7,3 @@
    5
    d
-  -lastline
+  +lastline
   discard change 3/3 to 'folder1/g'? [Ynesfdaq?] n
   
   $ hg diff --nodates
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-revlog-raw.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,293 @@
+# test revlog interaction about raw data (flagprocessor)
+
+from __future__ import absolute_import, print_function
+
+import sys
+
+from mercurial import (
+    encoding,
+    node,
+    revlog,
+    transaction,
+    vfs,
+)
+
+# TESTTMP is optional. This makes it convenient to run without run-tests.py
+tvfs = vfs.vfs(encoding.environ.get('TESTTMP', b'/tmp'))
+
+# Enable generaldelta otherwise revlog won't use delta as expected by the test
+tvfs.options = {'generaldelta': True, 'revlogv1': True}
+
+# The test wants to control whether to use delta explicitly, based on
+# "storedeltachains".
+revlog.revlog._isgooddelta = lambda self, d, textlen: self.storedeltachains
+
+def abort(msg):
+    print('abort: %s' % msg)
+    # Return 0 so run-tests.py could compare the output.
+    sys.exit()
+
+# Register a revlog processor for flag EXTSTORED.
+#
+# It simply prepends a fixed header, and replaces '1' to 'i'. So it has
+# insertion and replacement, and may be interesting to test revlog's line-based
+# deltas.
+_extheader = b'E\n'
+
+def readprocessor(self, rawtext):
+    # True: the returned text could be used to verify hash
+    text = rawtext[len(_extheader):].replace(b'i', b'1')
+    return text, True
+
+def writeprocessor(self, text):
+    # False: the returned rawtext shouldn't be used to verify hash
+    rawtext = _extheader + text.replace(b'1', b'i')
+    return rawtext, False
+
+def rawprocessor(self, rawtext):
+    # False: do not verify hash. Only the content returned by "readprocessor"
+    # can be used to verify hash.
+    return False
+
+revlog.addflagprocessor(revlog.REVIDX_EXTSTORED,
+                        (readprocessor, writeprocessor, rawprocessor))
+
+# Utilities about reading and appending revlog
+
+def newtransaction():
+    # A transaction is required to write revlogs
+    report = lambda msg: None
+    return transaction.transaction(report, tvfs, {'plain': tvfs}, b'journal')
+
+def newrevlog(name=b'_testrevlog.i', recreate=False):
+    if recreate:
+        tvfs.tryunlink(name)
+    rlog = revlog.revlog(tvfs, name)
+    return rlog
+
+def appendrev(rlog, text, tr, isext=False, isdelta=True):
+    '''Append a revision. If isext is True, set the EXTSTORED flag so flag
+    processor will be used (and rawtext is different from text). If isdelta is
+    True, force the revision to be a delta, otherwise it's full text.
+    '''
+    nextrev = len(rlog)
+    p1 = rlog.node(nextrev - 1)
+    p2 = node.nullid
+    if isext:
+        flags = revlog.REVIDX_EXTSTORED
+    else:
+        flags = revlog.REVIDX_DEFAULT_FLAGS
+    # Change storedeltachains temporarily, to override revlog's delta decision
+    rlog.storedeltachains = isdelta
+    try:
+        rlog.addrevision(text, tr, nextrev, p1, p2, flags=flags)
+        return nextrev
+    except Exception as ex:
+        abort('rev %d: failed to append: %s' % (nextrev, ex))
+    finally:
+        # Restore storedeltachains. It is always True, see revlog.__init__
+        rlog.storedeltachains = True
+
+def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True):
+    '''Copy revlog to destname using revlog.addgroup. Return the copied revlog.
+
+    This emulates push or pull. They use changegroup. Changegroup requires
+    repo to work. We don't have a repo, so a dummy changegroup is used.
+
+    If optimaldelta is True, use optimized delta parent, so the destination
+    revlog could probably reuse it. Otherwise it builds sub-optimal delta, and
+    the destination revlog needs more work to use it.
+
+    This exercises some revlog.addgroup (and revlog._addrevision(text=None))
+    code path, which is not covered by "appendrev" alone.
+    '''
+    class dummychangegroup(object):
+        @staticmethod
+        def deltachunk(pnode):
+            pnode = pnode or node.nullid
+            parentrev = rlog.rev(pnode)
+            r = parentrev + 1
+            if r >= len(rlog):
+                return {}
+            if optimaldelta:
+                deltaparent = parentrev
+            else:
+                # suboptimal deltaparent
+                deltaparent = min(0, parentrev)
+            return {'node': rlog.node(r), 'p1': pnode, 'p2': node.nullid,
+                    'cs': rlog.node(rlog.linkrev(r)), 'flags': rlog.flags(r),
+                    'deltabase': rlog.node(deltaparent),
+                    'delta': rlog.revdiff(deltaparent, r)}
+
+    def linkmap(lnode):
+        return rlog.rev(lnode)
+
+    dlog = newrevlog(destname, recreate=True)
+    dlog.addgroup(dummychangegroup(), linkmap, tr)
+    return dlog
+
+def lowlevelcopy(rlog, tr, destname=b'_destrevlog.i'):
+    '''Like addgroupcopy, but use the low level revlog._addrevision directly.
+
+    It exercises some code paths that are hard to reach easily otherwise.
+    '''
+    dlog = newrevlog(destname, recreate=True)
+    for r in rlog:
+        p1 = rlog.node(r - 1)
+        p2 = node.nullid
+        if r == 0:
+            text = rlog.revision(r, raw=True)
+            cachedelta = None
+        else:
+            # deltaparent is more interesting if it has the EXTSTORED flag.
+            deltaparent = max([0] + [p for p in range(r - 2) if rlog.flags(p)])
+            text = None
+            cachedelta = (deltaparent, rlog.revdiff(deltaparent, r))
+        flags = rlog.flags(r)
+        ifh = dlog.opener(dlog.indexfile, 'a+')
+        dfh = None
+        if not dlog._inline:
+            dfh = dlog.opener(dlog.datafile, 'a+')
+        dlog._addrevision(rlog.node(r), text, tr, r, p1, p2, flags, cachedelta,
+                          ifh, dfh)
+    return dlog
+
+# Utilities to generate revisions for testing
+
+def genbits(n):
+    '''Given a number n, generate (2 ** (n * 2) + 1) numbers in range(2 ** n).
+    i.e. the generated numbers have a width of n bits.
+
+    The combination of two adjacent numbers will cover all possible cases.
+    That is to say, given any x, y where both x, and y are in range(2 ** n),
+    there is an x followed immediately by y in the generated sequence.
+    '''
+    m = 2 ** n
+
+    # Gray Code. See https://en.wikipedia.org/wiki/Gray_code
+    gray = lambda x: x ^ (x >> 1)
+    reversegray = dict((gray(i), i) for i in range(m))
+
+    # Generate (n * 2) bit gray code, yield lower n bits as X, and look for
+    # the next unused gray code where higher n bits equal to X.
+
+    # For gray codes whose higher bits are X, a[X] of them have been used.
+    a = [0] * m
+
+    # Iterate from 0.
+    x = 0
+    yield x
+    for i in range(m * m):
+        x = reversegray[x]
+        y = gray(a[x] + x * m) & (m - 1)
+        assert a[x] < m
+        a[x] += 1
+        x = y
+        yield x
+
+def gentext(rev):
+    '''Given a revision number, generate dummy text'''
+    return b''.join(b'%d\n' % j for j in range(-1, rev % 5))
+
+def writecases(rlog, tr):
+    '''Write some revisions interested to the test.
+
+    The test is interested in 3 properties of a revision:
+
+        - Is it a delta or a full text? (isdelta)
+          This is to catch some delta application issues.
+        - Does it have a flag of EXTSTORED? (isext)
+          This is to catch some flag processor issues. Especially when
+          interacted with revlog deltas.
+        - Is its text empty? (isempty)
+          This is less important. It is intended to try to catch some careless
+          checks like "if text" instead of "if text is None". Note: if flag
+          processor is involved, raw text may be not empty.
+
+    Write 65 revisions. So that all combinations of the above flags for
+    adjacent revisions are covered. That is to say,
+
+        len(set(
+            (r.delta, r.ext, r.empty, (r+1).delta, (r+1).ext, (r+1).empty)
+            for r in range(len(rlog) - 1)
+           )) is 64.
+
+    Where "r.delta", "r.ext", and "r.empty" are booleans matching properties
+    mentioned above.
+
+    Return expected [(text, rawtext)].
+    '''
+    result = []
+    for i, x in enumerate(genbits(3)):
+        isdelta, isext, isempty = bool(x & 1), bool(x & 2), bool(x & 4)
+        if isempty:
+            text = b''
+        else:
+            text = gentext(i)
+        rev = appendrev(rlog, text, tr, isext=isext, isdelta=isdelta)
+
+        # Verify text, rawtext, and rawsize
+        if isext:
+            rawtext = writeprocessor(None, text)[0]
+        else:
+            rawtext = text
+        if rlog.rawsize(rev) != len(rawtext):
+            abort('rev %d: wrong rawsize' % rev)
+        if rlog.revision(rev, raw=False) != text:
+            abort('rev %d: wrong text' % rev)
+        if rlog.revision(rev, raw=True) != rawtext:
+            abort('rev %d: wrong rawtext' % rev)
+        result.append((text, rawtext))
+
+        # Verify flags like isdelta, isext work as expected
+        if bool(rlog.deltaparent(rev) > -1) != isdelta:
+            abort('rev %d: isdelta is ineffective' % rev)
+        if bool(rlog.flags(rev)) != isext:
+            abort('rev %d: isext is ineffective' % rev)
+    return result
+
+# Main test and checking
+
+def checkrevlog(rlog, expected):
+    '''Check if revlog has expected contents. expected is [(text, rawtext)]'''
+    # Test using different access orders. This could expose some issues
+    # depending on revlog caching (see revlog._cache).
+    for r0 in range(len(rlog) - 1):
+        r1 = r0 + 1
+        for revorder in [[r0, r1], [r1, r0]]:
+            for raworder in [[True], [False], [True, False], [False, True]]:
+                nlog = newrevlog()
+                for rev in revorder:
+                    for raw in raworder:
+                        t = nlog.revision(rev, raw=raw)
+                        if t != expected[rev][int(raw)]:
+                            abort('rev %d: corrupted %stext'
+                                  % (rev, raw and 'raw' or ''))
+
+def maintest():
+    expected = rl = None
+    with newtransaction() as tr:
+        rl = newrevlog(recreate=True)
+        expected = writecases(rl, tr)
+        checkrevlog(rl, expected)
+        print('local test passed')
+        # Copy via revlog.addgroup
+        rl1 = addgroupcopy(rl, tr)
+        checkrevlog(rl1, expected)
+        rl2 = addgroupcopy(rl, tr, optimaldelta=False)
+        checkrevlog(rl2, expected)
+        print('addgroupcopy test passed')
+        # Copy via revlog.clone
+        rl3 = newrevlog(name='_destrevlog3.i', recreate=True)
+        rl.clone(tr, rl3)
+        checkrevlog(rl3, expected)
+        print('clone test passed')
+        # Copy via low-level revlog._addrevision
+        rl4 = lowlevelcopy(rl, tr)
+        checkrevlog(rl4, expected)
+        print('lowlevelcopy test passed')
+
+try:
+    maintest()
+except Exception as ex:
+    abort('crashed: %s' % ex)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-revlog-raw.py.out	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,4 @@
+local test passed
+addgroupcopy test passed
+clone test passed
+lowlevelcopy test passed
--- a/tests/test-revset.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-revset.t	Tue Apr 18 12:24:34 2017 -0400
@@ -40,6 +40,8 @@
   >     cmdutil,
   >     node as nodemod,
   >     revset,
+  >     revsetlang,
+  >     smartset,
   > )
   > cmdtable = {}
   > command = cmdutil.command(cmdtable)
@@ -49,17 +51,18 @@
   > def debugrevlistspec(ui, repo, fmt, *args, **opts):
   >     if opts['bin']:
   >         args = map(nodemod.bin, args)
-  >     expr = revset.formatspec(fmt, list(args))
+  >     expr = revsetlang.formatspec(fmt, list(args))
   >     if ui.verbose:
-  >         tree = revset.parse(expr, lookup=repo.__contains__)
-  >         ui.note(revset.prettyformat(tree), "\n")
+  >         tree = revsetlang.parse(expr, lookup=repo.__contains__)
+  >         ui.note(revsetlang.prettyformat(tree), "\n")
   >         if opts["optimize"]:
-  >             opttree = revset.optimize(revset.analyze(tree))
-  >             ui.note("* optimized:\n", revset.prettyformat(opttree), "\n")
+  >             opttree = revsetlang.optimize(revsetlang.analyze(tree))
+  >             ui.note("* optimized:\n", revsetlang.prettyformat(opttree),
+  >                     "\n")
   >     func = revset.match(ui, expr, repo)
   >     revs = func(repo)
   >     if ui.verbose:
-  >         ui.note("* set:\n", revset.prettyformatset(revs), "\n")
+  >         ui.note("* set:\n", smartset.prettyformat(revs), "\n")
   >     for c in revs:
   >         ui.write("%s\n" % c)
   > EOF
@@ -451,7 +454,7 @@
   0
 
   $ log 'extra(branch, a, b)'
-  hg: parse error: extra takes at most 2 arguments
+  hg: parse error: extra takes at most 2 positional arguments
   [255]
   $ log 'extra(a, label=b)'
   hg: parse error: extra got multiple values for keyword argument 'label'
@@ -1417,19 +1420,19 @@
       define)
     (or
       (list
+        ('symbol', '2')
         (range
           ('symbol', '0')
           ('symbol', '1')
-          follow)
-        ('symbol', '2'))
+          follow))
       follow)
     define)
   * set:
   <filteredset
     <spanset- 0:2>,
     <addset
-      <spanset+ 0:1>,
-      <baseset [2]>>>
+      <baseset [2]>,
+      <spanset+ 0:1>>>
   2
   1
   0
@@ -1914,6 +1917,69 @@
   1
   0
 
+ 'A + B' can be rewritten to 'B + A' by weight only when the order doesn't
+ matter (e.g. 'X & (A + B)' can be 'X & (B + A)', but '(A + B) & X' can't):
+
+  $ try -p optimized '0:2 & (reverse(contains("a")) + 2)'
+  * optimized:
+  (and
+    (range
+      ('symbol', '0')
+      ('symbol', '2')
+      define)
+    (or
+      (list
+        ('symbol', '2')
+        (func
+          ('symbol', 'reverse')
+          (func
+            ('symbol', 'contains')
+            ('string', 'a')
+            define)
+          follow))
+      follow)
+    define)
+  * set:
+  <filteredset
+    <spanset+ 0:2>,
+    <addset
+      <baseset [2]>,
+      <filteredset
+        <fullreposet+ 0:9>,
+        <contains 'a'>>>>
+  0
+  1
+  2
+
+  $ try -p optimized '(reverse(contains("a")) + 2) & 0:2'
+  * optimized:
+  (and
+    (range
+      ('symbol', '0')
+      ('symbol', '2')
+      follow)
+    (or
+      (list
+        (func
+          ('symbol', 'reverse')
+          (func
+            ('symbol', 'contains')
+            ('string', 'a')
+            define)
+          define)
+        ('symbol', '2'))
+      define)
+    define)
+  * set:
+  <addset
+    <filteredset
+      <spanset- 0:2>,
+      <contains 'a'>>,
+    <baseset [2]>>
+  1
+  0
+  2
+
 test sort revset
 --------------------------------------------
 
--- a/tests/test-rollback.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-rollback.t	Tue Apr 18 12:24:34 2017 -0400
@@ -148,6 +148,8 @@
   working directory now based on revision 0
   $ hg id default
   791dd2169706
+
+  $ killdaemons.py
 #endif
 
 update to older changeset and then refuse rollback, because
--- a/tests/test-run-tests.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-run-tests.t	Tue Apr 18 12:24:34 2017 -0400
@@ -39,6 +39,19 @@
   $ rm hg
 #endif
 
+Features for testing optional lines
+===================================
+
+  $ cat > hghaveaddon.py <<EOF
+  > import hghave
+  > @hghave.check("custom", "custom hghave feature")
+  > def has_custom():
+  >     return True
+  > @hghave.check("missing", "missing hghave feature")
+  > def has_missing():
+  >     return False
+  > EOF
+
 an empty test
 =======================
 
@@ -55,7 +68,10 @@
   >   $ echo babar
   >   babar
   >   $ echo xyzzy
+  >   dont_print (?)
+  >   nothing[42]line (re) (?)
   >   never*happens (glob) (?)
+  >   more_nothing (?)
   >   xyzzy
   >   nor this (?)
   >   $ printf 'abc\ndef\nxyz\n'
@@ -64,6 +80,13 @@
   >   def (?)
   >   456 (?)
   >   xyz
+  >   $ printf 'zyx\nwvu\ntsr\n'
+  >   abc (?)
+  >   zyx (custom !)
+  >   wvu
+  >   no_print (no-custom !)
+  >   tsr (no-missing !)
+  >   missing (missing !)
   > EOF
 
   $ rt
@@ -104,6 +127,10 @@
   > this test is still more bytes than success.
   > pad pad pad pad............................................................
   > pad pad pad pad............................................................
+  > pad pad pad pad............................................................
+  > pad pad pad pad............................................................
+  > pad pad pad pad............................................................
+  > pad pad pad pad............................................................
   > EOF
 
   >>> fh = open('test-failure-unicode.t', 'wb')
@@ -316,8 +343,8 @@
   *SALT* 0 0 (glob)
   + echo babar
   babar
-  + echo *SALT* 6 0 (glob)
-  *SALT* 6 0 (glob)
+  + echo *SALT* 10 0 (glob)
+  *SALT* 10 0 (glob)
   *+ echo *SALT* 0 0 (glob)
   *SALT* 0 0 (glob)
   + echo babar
@@ -326,14 +353,20 @@
   *SALT* 2 0 (glob)
   + echo xyzzy
   xyzzy
-  + echo *SALT* 6 0 (glob)
-  *SALT* 6 0 (glob)
+  + echo *SALT* 9 0 (glob)
+  *SALT* 9 0 (glob)
   + printf *abc\ndef\nxyz\n* (glob)
   abc
   def
   xyz
-  + echo *SALT* 12 0 (glob)
-  *SALT* 12 0 (glob)
+  + echo *SALT* 15 0 (glob)
+  *SALT* 15 0 (glob)
+  + printf *zyx\nwvu\ntsr\n* (glob)
+  zyx
+  wvu
+  tsr
+  + echo *SALT* 22 0 (glob)
+  *SALT* 22 0 (glob)
   .
   # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
 
@@ -412,6 +445,10 @@
   this test is still more bytes than success.
   pad pad pad pad............................................................
   pad pad pad pad............................................................
+  pad pad pad pad............................................................
+  pad pad pad pad............................................................
+  pad pad pad pad............................................................
+  pad pad pad pad............................................................
 
 Interactive with custom view
 
@@ -449,13 +486,15 @@
   
   --- $TESTTMP/test-failure.t
   +++ $TESTTMP/test-failure.t.err
-  @@ -1,11 +1,11 @@
+  @@ -1,5 +1,5 @@
      $ echo babar
   -  rataxes
   +  babar
    This is a noop statement so that
    this test is still more bytes than success.
    pad pad pad pad............................................................
+  @@ -9,7 +9,7 @@
+   pad pad pad pad............................................................
    pad pad pad pad............................................................
      $ echo 'saved backup bundle to $TESTTMP/foo.hg'
   -  saved backup bundle to $TESTTMP/foo.hg
@@ -473,6 +512,10 @@
   this test is still more bytes than success.
   pad pad pad pad............................................................
   pad pad pad pad............................................................
+  pad pad pad pad............................................................
+  pad pad pad pad............................................................
+  pad pad pad pad............................................................
+  pad pad pad pad............................................................
     $ echo 'saved backup bundle to $TESTTMP/foo.hg'
     saved backup bundle to $TESTTMP/foo.hg (glob)<
     $ echo 'saved backup bundle to $TESTTMP/foo.hg'
@@ -735,9 +778,11 @@
 
   $ rm -f test-glob-backslash.t
 
-Test globbing of 127.0.0.1
+Test globbing of local IP addresses
   $ echo 172.16.18.1
-  127.0.0.1 (glob)
+  $LOCALIP (glob)
+  $ echo dead:beef::1
+  $LOCALIP (glob)
 
 Test reusability for third party tools
 ======================================
--- a/tests/test-serve.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-serve.t	Tue Apr 18 12:24:34 2017 -0400
@@ -34,18 +34,18 @@
 With -v
 
   $ hgserve
-  listening at http://localhost/ (bound to 127.0.0.1:HGPORT1) (glob)
+  listening at http://localhost/ (bound to *$LOCALIP*:HGPORT1) (glob) (?)
   % errors
 
 With -v and -p HGPORT2
 
   $ hgserve -p "$HGPORT2"
-  listening at http://localhost/ (bound to 127.0.0.1:HGPORT2) (glob)
+  listening at http://localhost/ (bound to *$LOCALIP*:HGPORT2) (glob) (?)
   % errors
 
 With -v and -p daytime (should fail because low port)
 
-#if no-root
+#if no-root no-windows
   $ KILLQUIETLY=Y
   $ hgserve -p daytime
   abort: cannot start server at 'localhost:13': Permission denied
@@ -57,25 +57,25 @@
 With --prefix foo
 
   $ hgserve --prefix foo
-  listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob)
+  listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob) (?)
   % errors
 
 With --prefix /foo
 
   $ hgserve --prefix /foo
-  listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob)
+  listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob) (?)
   % errors
 
 With --prefix foo/
 
   $ hgserve --prefix foo/
-  listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob)
+  listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob) (?)
   % errors
 
 With --prefix /foo/
 
   $ hgserve --prefix /foo/
-  listening at http://localhost/foo/ (bound to 127.0.0.1:HGPORT1) (glob)
+  listening at http://localhost/foo/ (bound to *$LOCALIP*:HGPORT1) (glob) (?)
   % errors
 
   $ cd ..
--- a/tests/test-share.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-share.t	Tue Apr 18 12:24:34 2017 -0400
@@ -114,6 +114,8 @@
   $ test -d .hg/store
   $ test -f .hg/sharedpath
   [1]
+  $ grep shared .hg/requires
+  [1]
   $ hg unshare
   abort: this is not a shared repo
   [255]
@@ -154,6 +156,67 @@
    * bm1                       2:c2e0ac586386
      bm3                       2:c2e0ac586386
 
+check whether HG_PENDING makes pending changes only in relatd
+repositories visible to an external hook.
+
+In "hg share" case, another transaction can't run in other
+repositories sharing same source repository, because starting
+transaction requires locking store of source repository.
+
+Therefore, this test scenario ignores checking visibility of
+.hg/bookmakrs.pending in repo2, which shares repo1 without bookmarks.
+
+  $ cat > $TESTTMP/checkbookmarks.sh <<EOF
+  > echo "@repo1"
+  > hg -R "$TESTTMP/repo1" bookmarks
+  > echo "@repo2"
+  > hg -R "$TESTTMP/repo2" bookmarks
+  > echo "@repo3"
+  > hg -R "$TESTTMP/repo3" bookmarks
+  > exit 1 # to avoid adding new bookmark for subsequent tests
+  > EOF
+
+  $ cd ../repo1
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
+  @repo1
+     bm1                       2:c2e0ac586386
+     bm3                       2:c2e0ac586386
+   * bmX                       2:c2e0ac586386
+  @repo2
+   * bm2                       3:0e6e70d1d5f1
+  @repo3
+     bm1                       2:c2e0ac586386
+   * bm3                       2:c2e0ac586386
+     bmX                       2:c2e0ac586386
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+  $ hg book bm1
+
+FYI, in contrast to above test, bmX is invisible in repo1 (= shared
+src), because (1) HG_PENDING refers only repo3 and (2)
+"bookmarks.pending" is written only into repo3.
+
+  $ cd ../repo3
+  $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
+  @repo1
+   * bm1                       2:c2e0ac586386
+     bm3                       2:c2e0ac586386
+  @repo2
+   * bm2                       3:0e6e70d1d5f1
+  @repo3
+     bm1                       2:c2e0ac586386
+     bm3                       2:c2e0ac586386
+   * bmX                       2:c2e0ac586386
+  transaction abort!
+  rollback completed
+  abort: pretxnclose hook exited with status 1
+  [255]
+  $ hg book bm3
+
+  $ cd ../repo1
+
 test that commits work
 
   $ echo 'shared bookmarks' > a
@@ -177,6 +240,14 @@
      bm3                       4:62f4ded848e4
   $ cd ..
 
+non largefiles repos won't enable largefiles
+
+  $ hg share --config extensions.largefiles= repo3 sharedrepo
+  updating working directory
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ [ -f sharedrepo/.hg/hgrc ]
+  [1]
+
 test pushing bookmarks works
 
   $ hg clone repo3 repo4
@@ -297,6 +368,56 @@
      bm4                       5:92793bfc8cad
   $ cd ..
 
+test shared clones using relative paths work
+
+  $ mkdir thisdir
+  $ hg init thisdir/orig
+  $ hg share -U thisdir/orig thisdir/abs
+  $ hg share -U --relative thisdir/abs thisdir/rel
+  $ cat thisdir/rel/.hg/sharedpath
+  ../../orig/.hg (no-eol) (glob)
+  $ grep shared thisdir/*/.hg/requires
+  thisdir/abs/.hg/requires:shared
+  thisdir/rel/.hg/requires:shared
+  thisdir/rel/.hg/requires:relshared
+
+test that relative shared paths aren't relative to $PWD
+
+  $ cd thisdir
+  $ hg -R rel root
+  $TESTTMP/thisdir/rel (glob)
+  $ cd ..
+
+now test that relative paths really are relative, survive across
+renames and changes of PWD
+
+  $ hg -R thisdir/abs root
+  $TESTTMP/thisdir/abs (glob)
+  $ hg -R thisdir/rel root
+  $TESTTMP/thisdir/rel (glob)
+  $ mv thisdir thatdir
+  $ hg -R thatdir/abs root
+  abort: .hg/sharedpath points to nonexistent directory $TESTTMP/thisdir/orig/.hg! (glob)
+  [255]
+  $ hg -R thatdir/rel root
+  $TESTTMP/thatdir/rel (glob)
+
+test unshare relshared repo
+
+  $ cd thatdir/rel
+  $ hg unshare
+  $ test -d .hg/store
+  $ test -f .hg/sharedpath
+  [1]
+  $ grep shared .hg/requires
+  [1]
+  $ hg unshare
+  abort: this is not a shared repo
+  [255]
+  $ cd ../..
+
+  $ rm -r thatdir
+
 Explicitly kill daemons to let the test exit on Windows
 
   $ killdaemons.py
--- a/tests/test-shelve.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-shelve.t	Tue Apr 18 12:24:34 2017 -0400
@@ -493,7 +493,7 @@
   $ ln -s foo a/a
   $ hg shelve -q -n symlink a/a
   $ hg status a/a
-  $ hg unshelve -q symlink
+  $ hg unshelve -q -n symlink
   $ hg status a/a
   M a/a
   $ hg revert a/a
@@ -1692,7 +1692,7 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ echo 3 >> file && hg ci -Am 13
   $ hg shelve --list
-  default         (1s ago)    changes to: 1
+  default         (*s ago)    changes to: 1 (glob)
   $ hg unshelve --keep
   unshelving change 'default'
   rebasing shelved changes
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-show-underway.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,168 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > show =
+  > EOF
+
+  $ hg init repo0
+  $ cd repo0
+
+Command works on an empty repo
+
+  $ hg show underway
+
+Single draft changeset shown
+
+  $ echo 0 > foo
+  $ hg -q commit -A -m 'commit 0'
+
+  $ hg show underway
+  @  9f171 commit 0
+
+Even when it isn't the wdir
+
+  $ hg -q up null
+
+  $ hg show underway
+  o  9f171 commit 0
+
+Single changeset is still there when public because it is a head
+
+  $ hg phase --public -r 0
+  $ hg show underway
+  o  9f171 commit 0
+
+A draft child will show both it and public parent
+
+  $ hg -q up 0
+  $ echo 1 > foo
+  $ hg commit -m 'commit 1'
+
+  $ hg show underway
+  @  181cc commit 1
+  o  9f171 commit 0
+
+Multiple draft children will be shown
+
+  $ echo 2 > foo
+  $ hg commit -m 'commit 2'
+
+  $ hg show underway
+  @  128c8 commit 2
+  o  181cc commit 1
+  o  9f171 commit 0
+
+Bumping first draft changeset to public will hide its parent
+
+  $ hg phase --public -r 1
+  $ hg show underway
+  @  128c8 commit 2
+  o  181cc commit 1
+  |
+  ~
+
+Multiple DAG heads will be shown
+
+  $ hg -q up -r 1
+  $ echo 3 > foo
+  $ hg commit -m 'commit 3'
+  created new head
+
+  $ hg show underway
+  @  f0abc commit 3
+  | o  128c8 commit 2
+  |/
+  o  181cc commit 1
+  |
+  ~
+
+Even when wdir is something else
+
+  $ hg -q up null
+
+  $ hg show underway
+  o  f0abc commit 3
+  | o  128c8 commit 2
+  |/
+  o  181cc commit 1
+  |
+  ~
+
+Draft child shows public head (multiple heads)
+
+  $ hg -q up 0
+  $ echo 4 > foo
+  $ hg commit -m 'commit 4'
+  created new head
+
+  $ hg show underway
+  @  668ca commit 4
+  | o  f0abc commit 3
+  | | o  128c8 commit 2
+  | |/
+  | o  181cc commit 1
+  |/
+  o  9f171 commit 0
+
+  $ cd ..
+
+Branch name appears in output
+
+  $ hg init branches
+  $ cd branches
+  $ echo 0 > foo
+  $ hg -q commit -A -m 'commit 0'
+  $ echo 1 > foo
+  $ hg commit -m 'commit 1'
+  $ echo 2 > foo
+  $ hg commit -m 'commit 2'
+  $ hg phase --public -r .
+  $ hg -q up -r 1
+  $ hg branch mybranch
+  marked working directory as branch mybranch
+  (branches are permanent and global, did you want a bookmark?)
+  $ echo 3 > foo
+  $ hg commit -m 'commit 3'
+  $ echo 4 > foo
+  $ hg commit -m 'commit 4'
+
+  $ hg show underway
+  @  f8dd3 (mybranch) commit 4
+  o  90cfc (mybranch) commit 3
+  | o  128c8 commit 2
+  |/
+  o  181cc commit 1
+  |
+  ~
+
+  $ cd ..
+
+Bookmark name appears in output
+
+  $ hg init bookmarks
+  $ cd bookmarks
+  $ echo 0 > foo
+  $ hg -q commit -A -m 'commit 0'
+  $ echo 1 > foo
+  $ hg commit -m 'commit 1'
+  $ echo 2 > foo
+  $ hg commit -m 'commit 2'
+  $ hg phase --public -r .
+  $ hg bookmark @
+  $ hg -q up -r 1
+  $ echo 3 > foo
+  $ hg commit -m 'commit 3'
+  created new head
+  $ echo 4 > foo
+  $ hg commit -m 'commit 4'
+  $ hg bookmark mybook
+
+  $ hg show underway
+  @  cac82 (mybook) commit 4
+  o  f0abc commit 3
+  | o  128c8 (@) commit 2
+  |/
+  o  181cc commit 1
+  |
+  ~
+
+  $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-show.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,130 @@
+  $ cat >> $HGRCPATH << EOF
+  > [extensions]
+  > show =
+  > EOF
+
+No arguments shows available views
+
+  $ hg init empty
+  $ cd empty
+  $ hg show
+  available views:
+  
+  bookmarks -- bookmarks and their associated changeset
+  underway -- changesets that aren't finished
+  
+  abort: no view requested
+  (use "hg show VIEW" to choose a view)
+  [255]
+
+`hg help show` prints available views
+
+  $ hg help show
+  hg show VIEW
+  
+  show various repository information
+  
+      A requested view of repository data is displayed.
+  
+      If no view is requested, the list of available views is shown and the
+      command aborts.
+  
+      Note:
+         There are no backwards compatibility guarantees for the output of this
+         command. Output may change in any future Mercurial release.
+  
+         Consumers wanting stable command output should specify a template via
+         "-T/--template".
+  
+      List of available views:
+  
+      bookmarks   bookmarks and their associated changeset
+  
+      underway    changesets that aren't finished
+  
+  (use 'hg help -e show' to show help for the show extension)
+  
+  options:
+  
+   -T --template TEMPLATE display with template
+  
+  (some details hidden, use --verbose to show complete help)
+
+Unknown view prints error
+
+  $ hg show badview
+  abort: unknown view: badview
+  (run "hg show" to see available views)
+  [255]
+
+HGPLAIN results in abort
+
+  $ HGPLAIN=1 hg show bookmarks
+  abort: must specify a template in plain mode
+  (invoke with -T/--template to control output format)
+  [255]
+
+But not if a template is specified
+
+  $ HGPLAIN=1 hg show bookmarks -T '{bookmark}\n'
+  (no bookmarks set)
+
+  $ cd ..
+
+bookmarks view with no bookmarks prints empty message
+
+  $ hg init books
+  $ cd books
+  $ touch f0
+  $ hg -q commit -A -m initial
+
+  $ hg show bookmarks
+  (no bookmarks set)
+
+bookmarks view shows bookmarks in an aligned table
+
+  $ echo book1 > f0
+  $ hg commit -m 'commit for book1'
+  $ echo book2 > f0
+  $ hg commit -m 'commit for book2'
+
+  $ hg bookmark -r 1 book1
+  $ hg bookmark a-longer-bookmark
+
+  $ hg show bookmarks
+  * a-longer-bookmark    7b570
+    book1                b757f
+
+A custom bookmarks template works
+
+  $ hg show bookmarks -T '{node} {bookmark} {active}\n'
+  7b5709ab64cbc34da9b4367b64afff47f2c4ee83 a-longer-bookmark True
+  b757f780b8ffd71267c6ccb32e0882d9d32a8cc0 book1 False
+
+bookmarks JSON works
+
+  $ hg show bookmarks -T json
+  [
+   {
+    "active": true,
+    "bookmark": "a-longer-bookmark",
+    "longestbookmarklen": 17,
+    "node": "7b5709ab64cbc34da9b4367b64afff47f2c4ee83"
+   },
+   {
+    "active": false,
+    "bookmark": "book1",
+    "longestbookmarklen": 17,
+    "node": "b757f780b8ffd71267c6ccb32e0882d9d32a8cc0"
+   }
+  ]
+
+JSON works with no bookmarks
+
+  $ hg book -d a-longer-bookmark
+  $ hg book -d book1
+  $ hg show bookmarks -T json
+  [
+  ]
+
+  $ cd ..
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-simplekeyvaluefile.py	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,73 @@
+from __future__ import absolute_import
+
+import unittest
+import silenttestrunner
+
+from mercurial import (
+    error,
+    scmutil,
+)
+
+class mockfile(object):
+    def __init__(self, name, fs):
+        self.name = name
+        self.fs = fs
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args, **kwargs):
+        pass
+
+    def write(self, text):
+        self.fs.contents[self.name] = text
+
+    def read(self):
+        return self.fs.contents[self.name]
+
+class mockvfs(object):
+    def __init__(self):
+        self.contents = {}
+
+    def read(self, path):
+        return mockfile(path, self).read()
+
+    def readlines(self, path):
+        return mockfile(path, self).read().split('\n')
+
+    def __call__(self, path, mode, atomictemp):
+        return mockfile(path, self)
+
+class testsimplekeyvaluefile(unittest.TestCase):
+    def setUp(self):
+        self.vfs = mockvfs()
+
+    def testbasicwriting(self):
+        d = {'key1': 'value1', 'Key2': 'value2'}
+        scmutil.simplekeyvaluefile(self.vfs, 'kvfile').write(d)
+        self.assertEqual(sorted(self.vfs.read('kvfile').split('\n')),
+                         ['', 'Key2=value2', 'key1=value1'])
+
+    def testinvalidkeys(self):
+        d = {'0key1': 'value1', 'Key2': 'value2'}
+        self.assertRaises(error.ProgrammingError,
+                          scmutil.simplekeyvaluefile(self.vfs, 'kvfile').write,
+                          d)
+        d = {'key1@': 'value1', 'Key2': 'value2'}
+        self.assertRaises(error.ProgrammingError,
+                          scmutil.simplekeyvaluefile(self.vfs, 'kvfile').write,
+                          d)
+
+    def testinvalidvalues(self):
+        d = {'key1': 'value1', 'Key2': 'value2\n'}
+        self.assertRaises(error.ProgrammingError,
+                          scmutil.simplekeyvaluefile(self.vfs, 'kvfile').write,
+                          d)
+
+    def testcorruptedfile(self):
+        self.vfs.contents['badfile'] = 'ababagalamaga\n'
+        self.assertRaises(error.CorruptedState,
+                          scmutil.simplekeyvaluefile(self.vfs, 'badfile').read)
+
+if __name__ == "__main__":
+    silenttestrunner.main(__name__)
--- a/tests/test-ssh-bundle1.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-ssh-bundle1.t	Tue Apr 18 12:24:34 2017 -0400
@@ -494,7 +494,7 @@
   Got arguments 1:user@dummy 2:hg -R local serve --stdio
   Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:ssh:$LOCALIP
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
@@ -504,7 +504,7 @@
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:ssh:$LOCALIP
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg init 'a repo'
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
@@ -512,7 +512,7 @@
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:ssh:$LOCALIP
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
 
 remote hook failure is attributed to remote
--- a/tests/test-ssh.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-ssh.t	Tue Apr 18 12:24:34 2017 -0400
@@ -511,7 +511,7 @@
   Got arguments 1:user@dummy 2:hg -R local serve --stdio
   Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_NODE_LAST=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:ssh:$LOCALIP
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
@@ -521,7 +521,7 @@
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_NODE_LAST=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:ssh:$LOCALIP
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
   Got arguments 1:user@dummy 2:hg init 'a repo'
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
@@ -529,7 +529,7 @@
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
   Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
-  changegroup-in-remote hook: HG_BUNDLE2=1 HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:* HG_URL=remote:ssh:127.0.0.1 (glob)
+  changegroup-in-remote hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_NODE_LAST=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:ssh:$LOCALIP
   Got arguments 1:user@dummy 2:hg -R remote serve --stdio
 
 remote hook failure is attributed to remote
--- a/tests/test-static-http.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-static-http.t	Tue Apr 18 12:24:34 2017 -0400
@@ -64,7 +64,7 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  changegroup hook: HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_NODE_LAST=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_SOURCE=pull HG_TXNID=TXN:* HG_URL=http://localhost:$HGPORT/remote (glob)
+  changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_NODE_LAST=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=http://localhost:$HGPORT/remote
   (run 'hg update' to get a working copy)
 
 trying to push
--- a/tests/test-status-color.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-status-color.t	Tue Apr 18 12:24:34 2017 -0400
@@ -1,6 +1,6 @@
   $ cat <<EOF >> $HGRCPATH
-  > [extensions]
-  > color =
+  > [ui]
+  > color = always
   > [color]
   > mode = ansi
   > EOF
@@ -14,7 +14,7 @@
 
 hg status in repo root:
 
-  $ hg status --color=always
+  $ hg status
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
@@ -41,7 +41,7 @@
 
 hg status . in repo root:
 
-  $ hg status --color=always .
+  $ hg status .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
@@ -49,17 +49,17 @@
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
 
-  $ hg status --color=always --cwd a
+  $ hg status --cwd a
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
-  $ hg status --color=always --cwd a .
+  $ hg status --cwd a .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
-  $ hg status --color=always --cwd a ..
+  $ hg status --cwd a ..
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/1/in_b_1\x1b[0m (esc)
@@ -67,18 +67,18 @@
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
 
-  $ hg status --color=always --cwd b
+  $ hg status --cwd b
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
-  $ hg status --color=always --cwd b .
+  $ hg status --cwd b .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
-  $ hg status --color=always --cwd b ..
+  $ hg status --cwd b ..
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
@@ -86,43 +86,43 @@
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
 
-  $ hg status --color=always --cwd a/1
+  $ hg status --cwd a/1
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
-  $ hg status --color=always --cwd a/1 .
+  $ hg status --cwd a/1 .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
-  $ hg status --color=always --cwd a/1 ..
+  $ hg status --cwd a/1 ..
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_a\x1b[0m (esc)
 
-  $ hg status --color=always --cwd b/1
+  $ hg status --cwd b/1
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
-  $ hg status --color=always --cwd b/1 .
+  $ hg status --cwd b/1 .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
-  $ hg status --color=always --cwd b/1 ..
+  $ hg status --cwd b/1 ..
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
 
-  $ hg status --color=always --cwd b/2
+  $ hg status --cwd b/2
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
-  $ hg status --color=always --cwd b/2 .
+  $ hg status --cwd b/2 .
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
-  $ hg status --color=always --cwd b/2 ..
+  $ hg status --cwd b/2 ..
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../1/in_b_1\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
   \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
@@ -137,7 +137,7 @@
   ? in_root
 
 Make sure ui.formatted=False works
-  $ hg status --config ui.formatted=False
+  $ hg status --color=auto --config ui.formatted=False
   ? a/1/in_a_1
   ? a/in_a
   ? b/1/in_b_1
@@ -179,7 +179,7 @@
 
 hg status:
 
-  $ hg status --color=always
+  $ hg status
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
   \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
   \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
@@ -187,7 +187,7 @@
 
 hg status modified added removed deleted unknown never-existed ignored:
 
-  $ hg status --color=always modified added removed deleted unknown never-existed ignored
+  $ hg status modified added removed deleted unknown never-existed ignored
   never-existed: * (glob)
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
   \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
@@ -198,7 +198,7 @@
 
 hg status -C:
 
-  $ hg status --color=always -C
+  $ hg status -C
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
   \x1b[0;0m  modified\x1b[0m (esc)
@@ -208,7 +208,7 @@
 
 hg status -A:
 
-  $ hg status --color=always -A
+  $ hg status -A
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
   \x1b[0;0m  modified\x1b[0m (esc)
@@ -226,7 +226,7 @@
 
   $ mkdir "$TESTTMP/terminfo"
   $ TERMINFO="$TESTTMP/terminfo" tic "$TESTDIR/hgterm.ti"
-  $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --color=always -A
+  $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo -A
   \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1madded\x1b[30m (esc)
   \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1mcopied\x1b[30m (esc)
   \x1b[30m\x1b[30m  modified\x1b[30m (esc)
@@ -245,7 +245,7 @@
   > # We can override what's in the terminfo database, too
   > terminfo.bold = \E[2m
   > EOF
-  $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --config color.status.clean=dim --color=always -A
+  $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --config color.status.clean=dim -A
   \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2madded\x1b[30m (esc)
   \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2mcopied\x1b[30m (esc)
   \x1b[30m\x1b[30m  modified\x1b[30m (esc)
@@ -265,11 +265,11 @@
 
 hg status ignoreddir/file:
 
-  $ hg status --color=always ignoreddir/file
+  $ hg status ignoreddir/file
 
 hg status -i ignoreddir/file:
 
-  $ hg status --color=always -i ignoreddir/file
+  $ hg status -i ignoreddir/file
   \x1b[0;30;1mI \x1b[0m\x1b[0;30;1mignoreddir/file\x1b[0m (esc)
   $ cd ..
 
@@ -293,7 +293,9 @@
 
 test unknown color
 
-  $ hg --config color.status.modified=periwinkle status --color=always
+  $ hg --config color.status.modified=periwinkle status
+  ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
+  ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
   ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
   M modified
   \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
@@ -307,8 +309,8 @@
 If result is not as expected, raise error
 
   $ assert() {
-  >     hg status --color=always $1 > ../a
-  >     hg status --color=always $2 > ../b
+  >     hg status $1 > ../a
+  >     hg status $2 > ../b
   >     if diff ../a ../b > /dev/null; then
   >         out=0
   >     else
@@ -367,7 +369,7 @@
 
 hg resolve with one unresolved, one resolved:
 
-  $ hg resolve --color=always -l
+  $ hg resolve -l
   \x1b[0;31;1mU \x1b[0m\x1b[0;31;1ma\x1b[0m (esc)
   \x1b[0;32;1mR \x1b[0m\x1b[0;32;1mb\x1b[0m (esc)
 
--- a/tests/test-status.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-status.t	Tue Apr 18 12:24:34 2017 -0400
@@ -107,6 +107,27 @@
   ? a/in_a
   ? b/in_b
 
+relative paths can be requested
+
+  $ cat >> $HGRCPATH <<EOF
+  > [commands]
+  > status.relative = True
+  > EOF
+  $ hg status --cwd a
+  ? 1/in_a_1
+  ? in_a
+  ? ../b/1/in_b_1
+  ? ../b/2/in_b_2
+  ? ../b/in_b
+  ? ../in_root
+  $ HGPLAIN=1 hg status --cwd a
+  ? a/1/in_a_1 (glob)
+  ? a/in_a (glob)
+  ? b/1/in_b_1 (glob)
+  ? b/2/in_b_2 (glob)
+  ? b/in_b (glob)
+  ? in_root
+
   $ cd ..
 
   $ hg init repo2
@@ -402,6 +423,8 @@
 
 #endif
 
+  $ cd ..
+
 hg status of binary file starting with '\1\n', a separator for metadata:
 
   $ hg init repo5
--- a/tests/test-subrepo-deep-nested-change.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-subrepo-deep-nested-change.t	Tue Apr 18 12:24:34 2017 -0400
@@ -73,6 +73,43 @@
   adding main/main (glob)
   $ hg commit -R main -m "main import"
 
+#if serve
+
+Unfortunately, subrepos not at their nominal location cannot be cloned.  But
+they are still served from their location within the local repository.  The only
+reason why 'main' can be cloned via the filesystem is because 'sub1' and 'sub2'
+are also available as siblings of 'main'.
+
+  $ hg serve -R main --debug -S -p $HGPORT -d --pid-file=hg1.pid -E error.log -A access.log
+  adding  = $TESTTMP/main (glob)
+  adding sub1 = $TESTTMP/main/sub1 (glob)
+  adding sub1/sub2 = $TESTTMP/main/sub1/sub2 (glob)
+  listening at http://*:$HGPORT/ (bound to *:$HGPORT) (glob) (?)
+  adding  = $TESTTMP/main (glob) (?)
+  adding sub1 = $TESTTMP/main/sub1 (glob) (?)
+  adding sub1/sub2 = $TESTTMP/main/sub1/sub2 (glob) (?)
+  $ cat hg1.pid >> $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT httpclone --config progress.disable=True
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 3 changes to 3 files
+  updating to branch default
+  abort: HTTP Error 404: Not Found
+  [255]
+
+  $ cat access.log
+  * "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
+  * "GET /?cmd=batch HTTP/1.1" 200 - * (glob)
+  * "GET /?cmd=getbundle HTTP/1.1" 200 - * (glob)
+  * "GET /../sub1?cmd=capabilities HTTP/1.1" 404 - (glob)
+
+  $ killdaemons.py
+  $ rm hg1.pid error.log access.log
+#endif
+
 Cleaning both repositories, just as a clone -U
 
   $ hg up -C -R sub2 null
--- a/tests/test-subrepo-recursion.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-subrepo-recursion.t	Tue Apr 18 12:24:34 2017 -0400
@@ -251,6 +251,60 @@
    z1
   +z2
 
+#if serve
+  $ cd ..
+  $ hg serve -R repo --debug -S -p $HGPORT -d --pid-file=hg1.pid -E error.log -A access.log
+  adding  = $TESTTMP/repo (glob)
+  adding foo = $TESTTMP/repo/foo (glob)
+  adding foo/bar = $TESTTMP/repo/foo/bar (glob)
+  listening at http://*:$HGPORT/ (bound to *:$HGPORT) (glob) (?)
+  adding  = $TESTTMP/repo (glob) (?)
+  adding foo = $TESTTMP/repo/foo (glob) (?)
+  adding foo/bar = $TESTTMP/repo/foo/bar (glob) (?)
+  $ cat hg1.pid >> $DAEMON_PIDS
+
+  $ hg clone http://localhost:$HGPORT clone  --config progress.disable=True
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 5 changes to 3 files
+  updating to branch default
+  cloning subrepo foo from http://localhost:$HGPORT/foo
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 4 changesets with 7 changes to 3 files
+  cloning subrepo foo/bar from http://localhost:$HGPORT/foo/bar (glob)
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 3 changes to 1 files
+  3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ cat clone/foo/bar/z.txt
+  z1
+  z2
+  z3
+
+  $ cat access.log
+  * "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
+  * "GET /?cmd=batch HTTP/1.1" 200 - * (glob)
+  * "GET /?cmd=getbundle HTTP/1.1" 200 - * (glob)
+  * "GET /foo?cmd=capabilities HTTP/1.1" 200 - (glob)
+  * "GET /foo?cmd=batch HTTP/1.1" 200 - * (glob)
+  * "GET /foo?cmd=getbundle HTTP/1.1" 200 - * (glob)
+  * "GET /foo/bar?cmd=capabilities HTTP/1.1" 200 - (glob)
+  * "GET /foo/bar?cmd=batch HTTP/1.1" 200 - * (glob)
+  * "GET /foo/bar?cmd=getbundle HTTP/1.1" 200 - * (glob)
+
+  $ killdaemons.py
+  $ rm hg1.pid error.log access.log
+  $ cd repo
+#endif
+
 Enable progress extension for archive tests:
 
   $ cp $HGRCPATH $HGRCPATH.no-progress
--- a/tests/test-subrepo.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-subrepo.t	Tue Apr 18 12:24:34 2017 -0400
@@ -311,6 +311,7 @@
    branchmerge: True, force: False, partial: False
    ancestor: 6747d179aa9a, local: 20a0db6fbf6c+, remote: 7af322bc1198
    preserving t for resolve of t
+  starting 4 threads for background file closing (?)
    t: versions differ -> m (premerge)
   picked tool ':merge' for t (binary False symlink False changedelete False)
   merging t
--- a/tests/test-tag.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-tag.t	Tue Apr 18 12:24:34 2017 -0400
@@ -1,3 +1,19 @@
+  $ cat >> $HGRCPATH << EOF
+  > [experimental]
+  > hook-track-tags=1
+  > [hooks]
+  > txnclose.track-tag=sh ${TESTTMP}/taghook.sh
+  > EOF
+
+  $ cat << EOF > taghook.sh
+  > #!/bin/sh
+  > # escape the "$" otherwise the test runner interpret it when writting the
+  > # file...
+  > if [ -n "\$HG_TAG_MOVED" ]; then
+  >     echo 'hook: tag changes detected'
+  >     sed 's/^/hook: /' .hg/changes/tags.changes
+  > fi
+  > EOF
   $ hg init test
   $ cd test
 
@@ -20,6 +36,8 @@
 specified)
 
   $ HGEDITOR=cat hg tag "bleah"
+  hook: tag changes detected
+  hook: +A acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
   $ hg history
   changeset:   1:d4f0d2909abc
   tag:         tip
@@ -68,10 +86,22 @@
   [255]
 
   $ hg tag -r 0 "bleah0"
+  hook: tag changes detected
+  hook: +A acb14030fe0a21b60322c440ad2d20cf7685a376 bleah0
   $ hg tag -l -r 1 "bleah1"
   $ hg tag gack gawk gorp
+  hook: tag changes detected
+  hook: +A 336fccc858a4eb69609a291105009e484a6b6b8d gack
+  hook: +A 336fccc858a4eb69609a291105009e484a6b6b8d gawk
+  hook: +A 336fccc858a4eb69609a291105009e484a6b6b8d gorp
   $ hg tag -f gack
+  hook: tag changes detected
+  hook: -M 336fccc858a4eb69609a291105009e484a6b6b8d gack
+  hook: +M 799667b6f2d9b957f73fa644a918c2df22bab58f gack
   $ hg tag --remove gack gorp
+  hook: tag changes detected
+  hook: -R 799667b6f2d9b957f73fa644a918c2df22bab58f gack
+  hook: -R 336fccc858a4eb69609a291105009e484a6b6b8d gorp
 
   $ hg tag "bleah "
   abort: tag 'bleah' already exists (use -f to force)
@@ -83,7 +113,11 @@
   abort: tag 'bleah' already exists (use -f to force)
   [255]
   $ hg tag -r 0 "  bleahbleah  "
+  hook: tag changes detected
+  hook: +A acb14030fe0a21b60322c440ad2d20cf7685a376 bleahbleah
   $ hg tag -r 0 " bleah bleah "
+  hook: tag changes detected
+  hook: +A acb14030fe0a21b60322c440ad2d20cf7685a376 bleah bleah
 
   $ cat .hgtags
   acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
@@ -112,6 +146,8 @@
   abort: working directory is not at a branch head (use -f to force)
   [255]
   $ hg tag -f "foobar"
+  hook: tag changes detected
+  hook: +A acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
   $ cat .hgtags
   acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
   $ cat .hg/localtags
@@ -169,16 +205,24 @@
   summary:     Removed tag gack, gorp
   
   $ hg clone -q -rbleah1 test test1
+  hook: tag changes detected
+  hook: +A acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
   $ hg -R test1 parents --style=compact
   1[tip]   d4f0d2909abc   1970-01-01 00:00 +0000   test
     Added tag bleah for changeset acb14030fe0a
   
   $ hg clone -q -r5 test#bleah1 test2
+  hook: tag changes detected
+  hook: +A acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
+  hook: +A acb14030fe0a21b60322c440ad2d20cf7685a376 bleah0
+  hook: +A 336fccc858a4eb69609a291105009e484a6b6b8d gawk
   $ hg -R test2 parents --style=compact
   5[tip]   b4bb47aaff09   1970-01-01 00:00 +0000   test
     Removed tag gack, gorp
   
   $ hg clone -q -U test#bleah1 test3
+  hook: tag changes detected
+  hook: +A acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
   $ hg -R test3 parents --style=compact
 
   $ cd test
@@ -206,6 +250,8 @@
   $ cat .hgtags; echo
   acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
   $ hg tag newline
+  hook: tag changes detected
+  hook: +A a0eea09de1eeec777b46f2085260a373b2fbc293 newline
   $ cat .hgtags; echo
   acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
   a0eea09de1eeec777b46f2085260a373b2fbc293 newline
@@ -219,6 +265,8 @@
   $ hg ci -m"discouraged"
   $ hg tag tag-and-branch-same-name
   warning: tag tag-and-branch-same-name conflicts with existing branch name
+  hook: tag changes detected
+  hook: +A fc93d2ea1cd78e91216c6cfbbf26747c10ce11ae tag-and-branch-same-name
 
 test custom commit messages
 
@@ -303,6 +351,8 @@
   HG: branch 'tag-and-branch-same-name'
   HG: changed .hgtags
   ====
+  hook: tag changes detected
+  hook: +A 75a534207be6b03576e0c7a4fa5708d045f1c876 custom-tag
   $ hg log -l1 --template "{desc}\n"
   custom tag message
   second line
@@ -311,6 +361,8 @@
 local tag with .hgtags modified
 
   $ hg tag hgtags-modified
+  hook: tag changes detected
+  hook: +A 0f26aaea6f74c3ed6c4aad8844403c9ba128d23a hgtags-modified
   $ hg rollback
   repository tip rolled back to revision 13 (undo commit)
   working directory now based on revision 13
@@ -330,9 +382,17 @@
   0 files updated, 1 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   $ hg ci -m 'merge named branch'
+  hook: tag changes detected
+  hook: -R acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
+  hook: -R acb14030fe0a21b60322c440ad2d20cf7685a376 bleah bleah
+  hook: -R acb14030fe0a21b60322c440ad2d20cf7685a376 bleah0
+  hook: -R acb14030fe0a21b60322c440ad2d20cf7685a376 bleahbleah
+  hook: -R 336fccc858a4eb69609a291105009e484a6b6b8d gawk
   $ hg up 13
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg tag new-topo-head
+  hook: tag changes detected
+  hook: +A 0f26aaea6f74c3ed6c4aad8844403c9ba128d23a new-topo-head
 
 tagging on null rev
 
@@ -399,12 +459,16 @@
   > hg push "$TESTTMP/repo-tag-target"
   > EOF
   $ hg -R repo-tag --config hooks.commit="sh ../issue3344.sh" tag tag
+  hook: tag changes detected
+  hook: +A be090ea6625635128e90f7d89df8beeb2bcc1653 tag
   pushing to $TESTTMP/repo-tag-target (glob)
   searching for changes
   adding changesets
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files
+  hook: tag changes detected
+  hook: +A be090ea6625635128e90f7d89df8beeb2bcc1653 tag
 
 automatically merge resolvable tag conflicts (i.e. tags that differ in rank)
 create two clones with some different tags as well as some common tags
@@ -416,6 +480,8 @@
   $ hg ci -A -m0
   adding f0
   $ hg tag tbase
+  hook: tag changes detected
+  hook: +A 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
   $ hg up -qr '.^'
   $ hg log -r 'wdir()' -T "{latesttagdistance}\n"
   1
@@ -431,18 +497,33 @@
   $ hg ci -A -m1
   adding f1
   $ hg tag t1 t2 t3
+  hook: tag changes detected
+  hook: +A 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
+  hook: +A 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
+  hook: +A 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
   $ hg tag --remove t2
+  hook: tag changes detected
+  hook: -R 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
   $ hg tag t5
+  hook: tag changes detected
+  hook: +A 875517b4806a848f942811a315a5bce30804ae85 t5
   $ echo c2 > f2
   $ hg ci -A -m2
   adding f2
   $ hg tag -f t3
+  hook: tag changes detected
+  hook: -M 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
+  hook: +M 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
 
   $ cd ../repo-automatic-tag-merge
   $ echo c3 > f3
   $ hg ci -A -m3
   adding f3
   $ hg tag -f t4 t5 t6
+  hook: tag changes detected
+  hook: +A 9aa4e1292a27a248f8d07339bed9931d54907be7 t4
+  hook: +A 9aa4e1292a27a248f8d07339bed9931d54907be7 t5
+  hook: +A 9aa4e1292a27a248f8d07339bed9931d54907be7 t6
 
   $ hg up -q '.^'
   $ hg log -r 'wdir()' -T "{changessincelatesttag} changes since {latesttag}\n"
@@ -455,6 +536,8 @@
   $ hg up -qC
 
   $ hg tag --remove t5
+  hook: tag changes detected
+  hook: -R 9aa4e1292a27a248f8d07339bed9931d54907be7 t5
   $ echo c4 > f4
   $ hg log -r '.' -T "{changessincelatesttag} changes since {latesttag}\n"
   2 changes since t4:t6
@@ -473,7 +556,12 @@
   $ hg log -r 'wdir()' -T "{changessincelatesttag} changes since {latesttag}\n"
   4 changes since t4:t6
   $ hg tag t2
+  hook: tag changes detected
+  hook: +A 929bca7b18d067cbf3844c3896319a940059d748 t2
   $ hg tag -f t6
+  hook: tag changes detected
+  hook: -M 9aa4e1292a27a248f8d07339bed9931d54907be7 t6
+  hook: +M 09af2ce14077a94effef208b49a718f4836d4338 t6
 
   $ cd ../repo-automatic-tag-merge-clone
   $ hg pull
@@ -483,6 +571,11 @@
   adding manifests
   adding file changes
   added 6 changesets with 6 changes to 3 files (+1 heads)
+  hook: tag changes detected
+  hook: +A 929bca7b18d067cbf3844c3896319a940059d748 t2
+  hook: +A 9aa4e1292a27a248f8d07339bed9931d54907be7 t4
+  hook: -R 875517b4806a848f942811a315a5bce30804ae85 t5
+  hook: +A 09af2ce14077a94effef208b49a718f4836d4338 t6
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hg merge --tool internal:tagmerge
   merging .hgtags
@@ -543,10 +636,17 @@
   $ hg update -C -r tip
   3 files updated, 0 files merged, 2 files removed, 0 files unresolved
   $ hg tag t7
+  hook: tag changes detected
+  hook: +A b325cc5b642c5b465bdbe8c09627cb372de3d47d t7
   $ hg update -C -r 'first(sort(head()))'
   3 files updated, 0 files merged, 2 files removed, 0 files unresolved
   $ printf "%s %s\n" `hg log -r . --template "{node} t7"` >> .hgtags
   $ hg commit -m "manually add conflicting t7 tag"
+  hook: tag changes detected
+  hook: -R 929bca7b18d067cbf3844c3896319a940059d748 t2
+  hook: +A 875517b4806a848f942811a315a5bce30804ae85 t5
+  hook: -M b325cc5b642c5b465bdbe8c09627cb372de3d47d t7
+  hook: +M ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
   $ hg merge --tool internal:tagmerge
   merging .hgtags
   automatic .hgtags merge failed
@@ -581,6 +681,9 @@
   $ hg ci -A -m5
   adding f5
   $ hg tag -f t7
+  hook: tag changes detected
+  hook: -M ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
+  hook: +M fd3a9e394ce3afb354a496323bf68ac1755a30de t7
   $ hg update -r 'p1(t7)'
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ printf '' > .hgtags
--- a/tests/test-tags.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-tags.t	Tue Apr 18 12:24:34 2017 -0400
@@ -672,9 +672,9 @@
 
   $ ls tagsclient/.hg/cache
   branch2-served
-  checkisexec
-  checklink
-  checklink-target
+  checkisexec (execbit !)
+  checklink (symlink !)
+  checklink-target (symlink !)
   hgtagsfnodes1
   rbc-names-v1
   rbc-revs-v1
@@ -699,9 +699,9 @@
 
   $ ls tagsclient/.hg/cache
   branch2-served
-  checkisexec
-  checklink
-  checklink-target
+  checkisexec (execbit !)
+  checklink (symlink !)
+  checklink-target (symlink !)
   hgtagsfnodes1
   rbc-names-v1
   rbc-revs-v1
--- a/tests/test-transplant.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-transplant.t	Tue Apr 18 12:24:34 2017 -0400
@@ -94,6 +94,15 @@
   1  r2
   0  r1
 
+test format of transplant_source
+
+  $ hg log -r7 --debug | grep transplant_source
+  extra:       transplant_source=\xa52Q\xcd\xf7\x17g\x9d\x19\x07\xb2\x89\xf9\x91SK\xe0\\\x99z
+  $ hg log -r7 -T '{extras}\n'
+  branch=defaulttransplant_source=\xa52Q\xcd\xf7\x17g\x9d\x19\x07\xb2\x89\xf9\x91SK\xe0\\\x99z
+  $ hg log -r7 -T '{join(extras, " ")}\n'
+  branch=default transplant_source=\xa52Q\xcd\xf7\x17g\x9d\x19\x07\xb2\x89\xf9\x91SK\xe0\\\x99z
+
 test transplanted revset
 
   $ hg log -r 'transplanted()' --template '{rev} {parents} {desc}\n'
--- a/tests/test-treemanifest.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-treemanifest.t	Tue Apr 18 12:24:34 2017 -0400
@@ -437,6 +437,16 @@
 
   $ hg ci -Aqm 'initial'
 
+  $ echo >> .A/one.txt
+  $ echo >> .A/two.txt
+  $ echo >> b/bar/fruits.txt
+  $ echo >> b/bar/orange/fly/gnat.py
+  $ echo >> b/bar/orange/fly/housefly.txt
+  $ echo >> b/foo/apple/bees/flower.py
+  $ echo >> c.txt
+  $ echo >> d.py
+  $ hg ci -Aqm 'second'
+
 We'll see that visitdir works by removing some treemanifest revlogs and running
 the files command with various parameters.
 
@@ -468,6 +478,12 @@
   b/bar/orange/fly/gnat.py (glob)
   b/bar/orange/fly/housefly.txt (glob)
   b/foo/apple/bees/flower.py (glob)
+  $ hg diff -r '.^' -r . --stat b
+   b/bar/fruits.txt              |  1 +
+   b/bar/orange/fly/gnat.py      |  1 +
+   b/bar/orange/fly/housefly.txt |  1 +
+   b/foo/apple/bees/flower.py    |  1 +
+   4 files changed, 4 insertions(+), 0 deletions(-)
   $ cp -R .hg/store-copy/. .hg/store
 
 Test files with just includes and excludes.
@@ -477,6 +493,9 @@
   $ rm -r .hg/store/meta/b/foo/apple/bees
   $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
   b/bar/fruits.txt (glob)
+  $ hg diff -r '.^' -r . --stat -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
+   b/bar/fruits.txt |  1 +
+   1 files changed, 1 insertions(+), 0 deletions(-)
   $ cp -R .hg/store-copy/. .hg/store
 
 Test files for a subdirectory, excluding a directory within it.
@@ -487,6 +506,11 @@
   b/bar/fruits.txt (glob)
   b/bar/orange/fly/gnat.py (glob)
   b/bar/orange/fly/housefly.txt (glob)
+  $ hg diff -r '.^' -r . --stat -X path:b/foo b
+   b/bar/fruits.txt              |  1 +
+   b/bar/orange/fly/gnat.py      |  1 +
+   b/bar/orange/fly/housefly.txt |  1 +
+   3 files changed, 3 insertions(+), 0 deletions(-)
   $ cp -R .hg/store-copy/. .hg/store
 
 Test files for a sub directory, including only a directory within it, and
@@ -497,6 +521,10 @@
   $ hg files -r . -I path:b/bar/orange -I path:a b
   b/bar/orange/fly/gnat.py (glob)
   b/bar/orange/fly/housefly.txt (glob)
+  $ hg diff -r '.^' -r . --stat -I path:b/bar/orange -I path:a b
+   b/bar/orange/fly/gnat.py      |  1 +
+   b/bar/orange/fly/housefly.txt |  1 +
+   2 files changed, 2 insertions(+), 0 deletions(-)
   $ cp -R .hg/store-copy/. .hg/store
 
 Test files for a pattern, including a directory, and excluding a directory
@@ -507,6 +535,9 @@
   $ rm -r .hg/store/meta/b/bar/orange
   $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
   b/bar/fruits.txt (glob)
+  $ hg diff -r '.^' -r . --stat glob:**.txt -I path:b/bar -X path:b/bar/orange
+   b/bar/fruits.txt |  1 +
+   1 files changed, 1 insertions(+), 0 deletions(-)
   $ cp -R .hg/store-copy/. .hg/store
 
 Add some more changes to the deep repo
@@ -522,7 +553,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 3 changesets, 10 total revisions
+  8 files, 4 changesets, 18 total revisions
 
 Dirlogs are included in fncache
   $ grep meta/.A/00manifest.i .hg/store/fncache
@@ -563,8 +594,9 @@
   checking directory manifests
    0: empty or missing b/
    b/@0: parent-directory manifest refers to unknown revision 67688a370455
-   b/@1: parent-directory manifest refers to unknown revision f38e85d334c5
-   b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0
+   b/@1: parent-directory manifest refers to unknown revision f065da70369e
+   b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
+   b/@3: parent-directory manifest refers to unknown revision 367152e6af28
   warning: orphan revlog 'meta/b/bar/00manifest.i'
   warning: orphan revlog 'meta/b/bar/orange/00manifest.i'
   warning: orphan revlog 'meta/b/bar/orange/fly/00manifest.i'
@@ -577,9 +609,9 @@
    b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
    b/foo/apple/bees/flower.py@0: in changeset but not in manifest
   checking files
-  8 files, 3 changesets, 10 total revisions
+  8 files, 4 changesets, 18 total revisions
   6 warnings encountered!
-  8 integrity errors encountered!
+  9 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
   $ cp -R .hg/store-newcopy/. .hg/store
@@ -590,22 +622,22 @@
   checking changesets
   checking manifests
   checking directory manifests
-   b/@1: parent-directory manifest refers to unknown revision f38e85d334c5
-   b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0
-   b/bar/@?: rev 1 points to unexpected changeset 1
-   b/bar/@?: 5e03c4ee5e4a not in parent-directory manifest
+   b/@2: parent-directory manifest refers to unknown revision ac0d30948e0b
+   b/@3: parent-directory manifest refers to unknown revision 367152e6af28
    b/bar/@?: rev 2 points to unexpected changeset 2
-   b/bar/@?: 1b16940d66d6 not in parent-directory manifest
-   b/bar/orange/@?: rev 1 points to unexpected changeset 2
+   b/bar/@?: 44d7e1146e0d not in parent-directory manifest
+   b/bar/@?: rev 3 points to unexpected changeset 3
+   b/bar/@?: 70b10c6b17b7 not in parent-directory manifest
+   b/bar/orange/@?: rev 2 points to unexpected changeset 3
    (expected None)
-   b/bar/orange/fly/@?: rev 1 points to unexpected changeset 2
+   b/bar/orange/fly/@?: rev 2 points to unexpected changeset 3
    (expected None)
   crosschecking files in changesets and manifests
   checking files
-  8 files, 3 changesets, 10 total revisions
+  8 files, 4 changesets, 18 total revisions
   2 warnings encountered!
   8 integrity errors encountered!
-  (first damaged changeset appears to be 1)
+  (first damaged changeset appears to be 2)
   [1]
   $ cp -R .hg/store-newcopy/. .hg/store
 
@@ -621,7 +653,7 @@
   adding changesets
   adding manifests
   adding file changes
-  added 3 changesets with 10 changes to 8 files
+  added 4 changesets with 18 changes to 8 files
   updating to branch default
   8 files updated, 0 files merged, 0 files removed, 0 files unresolved
 No server errors.
@@ -656,7 +688,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 3 changesets, 10 total revisions
+  8 files, 4 changesets, 18 total revisions
   $ cd ..
 
 Create clones using old repo formats to use in later tests
@@ -667,7 +699,7 @@
   adding changesets
   adding manifests
   adding file changes
-  added 3 changesets with 10 changes to 8 files
+  added 4 changesets with 18 changes to 8 files
   updating to branch default
   8 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd deeprepo-basicstore
@@ -683,7 +715,7 @@
   adding changesets
   adding manifests
   adding file changes
-  added 3 changesets with 10 changes to 8 files
+  added 4 changesets with 18 changes to 8 files
   updating to branch default
   8 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd deeprepo-encodedstore
@@ -701,7 +733,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 3 changesets, 10 total revisions
+  8 files, 4 changesets, 18 total revisions
 
 Local clone with encodedstore
   $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
@@ -711,7 +743,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 3 changesets, 10 total revisions
+  8 files, 4 changesets, 18 total revisions
 
 Local clone with fncachestore
   $ hg clone -U deeprepo local-clone-fncachestore
@@ -721,7 +753,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 3 changesets, 10 total revisions
+  8 files, 4 changesets, 18 total revisions
 
 Stream clone with basicstore
   $ hg clone --config experimental.changegroup3=True --uncompressed -U \
@@ -737,7 +769,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 3 changesets, 10 total revisions
+  8 files, 4 changesets, 18 total revisions
 
 Stream clone with encodedstore
   $ hg clone --config experimental.changegroup3=True --uncompressed -U \
@@ -753,7 +785,7 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 3 changesets, 10 total revisions
+  8 files, 4 changesets, 18 total revisions
 
 Stream clone with fncachestore
   $ hg clone --config experimental.changegroup3=True --uncompressed -U \
@@ -769,11 +801,11 @@
   checking directory manifests
   crosschecking files in changesets and manifests
   checking files
-  8 files, 3 changesets, 10 total revisions
+  8 files, 4 changesets, 18 total revisions
 
 Packed bundle
   $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
-  writing 3349 bytes for 18 files
+  writing 5330 bytes for 18 files
   bundle requirements: generaldelta, revlogv1, treemanifest
   $ hg debugbundle --spec repo-packed.hg
   none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Ctreemanifest
@@ -825,3 +857,13 @@
   added 1 changesets with 1 changes to 1 files (+1 heads)
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
+Committing a empty commit does not duplicate root treemanifest
+  $ echo z >> z
+  $ hg commit -Aqm 'pre-empty commit'
+  $ hg rm z
+  $ hg commit --amend -m 'empty commit'
+  saved backup bundle to $TESTTMP/grafted-dir-repo-clone/.hg/strip-backup/cb99d5717cea-de37743b-amend-backup.hg (glob)
+  $ hg log -r 'tip + tip^' -T '{manifest}\n'
+  1:678d3574b88c
+  1:678d3574b88c
+  $ hg --config extensions.strip= strip -r . -q
--- a/tests/test-trusted.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-trusted.py	Tue Apr 18 12:24:34 2017 -0400
@@ -74,14 +74,14 @@
         return u
     print('trusted')
     for name, path in u.configitems('paths'):
-        print('   ', name, '=', path)
+        print('   ', name, '=', util.pconvert(path))
     print('untrusted')
     for name, path in u.configitems('paths', untrusted=True):
         print('.', end=' ')
         u.config('paths', name) # warning with debug=True
         print('.', end=' ')
         u.config('paths', name, untrusted=True) # no warnings
-        print(name, '=', path)
+        print(name, '=', util.pconvert(path))
     print()
 
     return u
@@ -201,3 +201,47 @@
     testui(debug=True, silent=True)
 except error.ParseError as inst:
     print(inst)
+
+print()
+print('# access typed information')
+with open('.hg/hgrc', 'w') as f:
+    f.write('''\
+[foo]
+sub=main
+sub:one=one
+sub:two=two
+path=monty/python
+bool=true
+int=42
+bytes=81mb
+list=spam,ham,eggs
+''')
+u = testui(user='abc', group='def', cuser='foo', silent=True)
+def configpath(section, name, default=None, untrusted=False):
+    path = u.configpath(section, name, default, untrusted)
+    if path is None:
+        return None
+    return util.pconvert(path)
+
+print('# suboptions, trusted and untrusted')
+trusted = u.configsuboptions('foo', 'sub')
+untrusted = u.configsuboptions('foo', 'sub', untrusted=True)
+print(
+    (trusted[0], sorted(trusted[1].items())),
+    (untrusted[0], sorted(untrusted[1].items())))
+print('# path, trusted and untrusted')
+print(configpath('foo', 'path'), configpath('foo', 'path', untrusted=True))
+print('# bool, trusted and untrusted')
+print(u.configbool('foo', 'bool'), u.configbool('foo', 'bool', untrusted=True))
+print('# int, trusted and untrusted')
+print(
+    u.configint('foo', 'int', 0),
+    u.configint('foo', 'int', 0, untrusted=True))
+print('# bytes, trusted and untrusted')
+print(
+    u.configbytes('foo', 'bytes', 0),
+    u.configbytes('foo', 'bytes', 0, untrusted=True))
+print('# list, trusted and untrusted')
+print(
+    u.configlist('foo', 'list', []),
+    u.configlist('foo', 'list', [], untrusted=True))
--- a/tests/test-trusted.py.out	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-trusted.py.out	Tue Apr 18 12:24:34 2017 -0400
@@ -177,3 +177,19 @@
 ('foo', '.hg/hgrc:1')
 # same user, same group
 ('foo', '.hg/hgrc:1')
+
+# access typed information
+# different user, different group
+not trusting file .hg/hgrc from untrusted user abc, group def
+# suboptions, trusted and untrusted
+(None, []) ('main', [('one', 'one'), ('two', 'two')])
+# path, trusted and untrusted
+None .hg/monty/python
+# bool, trusted and untrusted
+False True
+# int, trusted and untrusted
+0 42
+# bytes, trusted and untrusted
+0 84934656
+# list, trusted and untrusted
+[] ['spam', 'ham', 'eggs']
--- a/tests/test-ui-color.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-ui-color.py	Tue Apr 18 12:24:34 2017 -0400
@@ -1,16 +1,13 @@
 from __future__ import absolute_import, print_function
 
 import os
-from hgext import (
-    color,
-)
 from mercurial import (
     dispatch,
     ui as uimod,
 )
 
 # ensure errors aren't buffered
-testui = color.colorui()
+testui = uimod.ui()
 testui.pushbuffer()
 testui.write(('buffered\n'))
 testui.warn(('warning\n'))
@@ -35,6 +32,7 @@
     dispatch.dispatch(dispatch.request(['version', '-q'], ui_))
 
 runcmd()
-print("colored? " + str(issubclass(ui_.__class__, color.colorui)))
+print("colored? %s" % (ui_._colormode is not None))
 runcmd()
-print("colored? " + str(issubclass(ui_.__class__, color.colorui)))
+print("colored? %s" % (ui_._colormode is not None))
+
--- a/tests/test-update-branches.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-update-branches.t	Tue Apr 18 12:24:34 2017 -0400
@@ -160,6 +160,16 @@
   parent=1
   M foo
 
+  $ revtest '-m dirty linear'   dirty 1 2 -m
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  parent=2
+  M foo
+
+  $ revtest '-m dirty cross'  dirty 3 4 -m
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  parent=4
+  M foo
+
   $ revtest '-c dirtysub linear'   dirtysub 1 2 -c
   abort: uncommitted changes in subrepository 'sub'
   parent=1
@@ -171,10 +181,173 @@
   parent=2
 
   $ revtest '-cC dirty linear'  dirty 1 2 -cC
-  abort: cannot specify both -c/--check and -C/--clean
+  abort: can only specify one of -C/--clean, -c/--check, or -m/merge
+  parent=1
+  M foo
+
+  $ revtest '-mc dirty linear'  dirty 1 2 -mc
+  abort: can only specify one of -C/--clean, -c/--check, or -m/merge
+  parent=1
+  M foo
+
+  $ revtest '-mC dirty linear'  dirty 1 2 -mC
+  abort: can only specify one of -C/--clean, -c/--check, or -m/merge
+  parent=1
+  M foo
+
+  $ echo '[experimental]' >> .hg/hgrc
+  $ echo 'updatecheck = abort' >> .hg/hgrc
+
+  $ revtest 'none dirty linear' dirty 1 2
+  abort: uncommitted changes
+  parent=1
+  M foo
+
+  $ revtest 'none dirty linear' dirty 1 2 -c
+  abort: uncommitted changes
+  parent=1
+  M foo
+
+  $ revtest 'none dirty linear' dirty 1 2 -C
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  parent=2
+
+  $ echo 'updatecheck = none' >> .hg/hgrc
+
+  $ revtest 'none dirty cross'  dirty 3 4
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  parent=4
+  M foo
+
+  $ revtest 'none dirty linear' dirty 1 2
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  parent=2
+  M foo
+
+  $ revtest 'none dirty linear' dirty 1 2 -c
+  abort: uncommitted changes
   parent=1
   M foo
 
+  $ revtest 'none dirty linear' dirty 1 2 -C
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  parent=2
+
+  $ hg co -qC 3
+  $ echo dirty >> a
+  $ hg co --tool :merge3 4
+  merging a
+  warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges
+  [1]
+  $ hg st
+  M a
+  ? a.orig
+  $ cat a
+  <<<<<<< working copy: 6efa171f091b - test: 3
+  three
+  dirty
+  ||||||| base
+  three
+  =======
+  four
+  >>>>>>> destination:  d047485b3896 b1 - test: 4
+  $ rm a.orig
+
+  $ echo 'updatecheck = noconflict' >> .hg/hgrc
+
+  $ revtest 'none dirty cross'  dirty 3 4
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  parent=4
+  M foo
+
+  $ revtest 'none dirty linear' dirty 1 2
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  parent=2
+  M foo
+
+  $ revtest 'none dirty linear' dirty 1 2 -c
+  abort: uncommitted changes
+  parent=1
+  M foo
+
+  $ revtest 'none dirty linear' dirty 1 2 -C
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  parent=2
+
+Locally added file is allowed
+  $ hg up -qC 3
+  $ echo a > bar
+  $ hg add bar
+  $ hg up -q 4
+  $ hg st
+  A bar
+  $ hg forget bar
+  $ rm bar
+
+Locally removed file is allowed
+  $ hg up -qC 3
+  $ hg rm foo
+  $ hg up -q 4
+
+File conflict is not allowed
+  $ hg up -qC 3
+  $ echo dirty >> a
+  $ hg up -q 4
+  abort: conflicting changes
+  (commit or update --clean to discard changes)
+  [255]
+  $ hg up -m 4
+  merging a
+  warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+  0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+  use 'hg resolve' to retry unresolved file merges
+  [1]
+  $ rm a.orig
+
+Change/delete conflict is not allowed
+  $ hg up -qC 3
+  $ hg rm foo
+  $ hg up -q 4
+
+Uses default value of "linear" when value is misspelled
+  $ echo 'updatecheck = linyar' >> .hg/hgrc
+
+  $ revtest 'dirty cross'  dirty 3 4
+  abort: uncommitted changes
+  (commit or update --clean to discard changes)
+  parent=3
+  M foo
+
+Setup for later tests
+  $ revtest 'none dirty linear' dirty 1 2 -c
+  abort: uncommitted changes
+  parent=1
+  M foo
+
+  $ cd ..
+
+Test updating to null revision
+
+  $ hg init null-repo
+  $ cd null-repo
+  $ echo a > a
+  $ hg add a
+  $ hg ci -m a
+  $ hg up -qC 0
+  $ echo b > b
+  $ hg add b
+  $ hg up null
+  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg st
+  A b
+  $ hg up -q 0
+  $ hg st
+  A b
+  $ hg up -qC null
+  $ hg st
+  ? b
   $ cd ..
 
 Test updating with closed head
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-update-dest.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,35 @@
+Test update.requiredest
+  $ cd $TESTTMP
+  $ cat >> $HGRCPATH <<EOF
+  > [commands]
+  > update.requiredest = True
+  > EOF
+  $ hg init repo
+  $ cd repo
+  $ echo a >> a
+  $ hg commit -qAm aa
+  $ hg up
+  abort: you must specify a destination
+  (for example: hg update ".::")
+  [255]
+  $ hg up .
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ HGPLAIN=1 hg up
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ hg --config commands.update.requiredest=False up
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ cd ..
+
+Check update.requiredest interaction with pull --update
+  $ hg clone repo clone
+  updating to branch default
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ cd repo
+  $ echo a >> a
+  $ hg commit -qAm aa
+  $ cd ../clone
+  $ hg pull --update
+  abort: update destination required by configuration
+  (use hg pull followed by hg update DEST)
+  [255]
--- a/tests/test-walk.t	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/test-walk.t	Tue Apr 18 12:24:34 2017 -0400
@@ -112,6 +112,74 @@
   f  beans/navy      ../beans/navy
   f  beans/pinto     ../beans/pinto
   f  beans/turtle    ../beans/turtle
+
+  $ hg debugwalk 'rootfilesin:'
+  f  fennel      ../fennel
+  f  fenugreek   ../fenugreek
+  f  fiddlehead  ../fiddlehead
+  $ hg debugwalk -I 'rootfilesin:'
+  f  fennel      ../fennel
+  f  fenugreek   ../fenugreek
+  f  fiddlehead  ../fiddlehead
+  $ hg debugwalk 'rootfilesin:.'
+  f  fennel      ../fennel
+  f  fenugreek   ../fenugreek
+  f  fiddlehead  ../fiddlehead
+  $ hg debugwalk -I 'rootfilesin:.'
+  f  fennel      ../fennel
+  f  fenugreek   ../fenugreek
+  f  fiddlehead  ../fiddlehead
+  $ hg debugwalk -X 'rootfilesin:'
+  f  beans/black                     ../beans/black
+  f  beans/borlotti                  ../beans/borlotti
+  f  beans/kidney                    ../beans/kidney
+  f  beans/navy                      ../beans/navy
+  f  beans/pinto                     ../beans/pinto
+  f  beans/turtle                    ../beans/turtle
+  f  mammals/Procyonidae/cacomistle  Procyonidae/cacomistle
+  f  mammals/Procyonidae/coatimundi  Procyonidae/coatimundi
+  f  mammals/Procyonidae/raccoon     Procyonidae/raccoon
+  f  mammals/skunk                   skunk
+  $ hg debugwalk 'rootfilesin:fennel'
+  $ hg debugwalk -I 'rootfilesin:fennel'
+  $ hg debugwalk 'rootfilesin:skunk'
+  $ hg debugwalk -I 'rootfilesin:skunk'
+  $ hg debugwalk 'rootfilesin:beans'
+  f  beans/black     ../beans/black
+  f  beans/borlotti  ../beans/borlotti
+  f  beans/kidney    ../beans/kidney
+  f  beans/navy      ../beans/navy
+  f  beans/pinto     ../beans/pinto
+  f  beans/turtle    ../beans/turtle
+  $ hg debugwalk -I 'rootfilesin:beans'
+  f  beans/black     ../beans/black
+  f  beans/borlotti  ../beans/borlotti
+  f  beans/kidney    ../beans/kidney
+  f  beans/navy      ../beans/navy
+  f  beans/pinto     ../beans/pinto
+  f  beans/turtle    ../beans/turtle
+  $ hg debugwalk 'rootfilesin:mammals'
+  f  mammals/skunk  skunk
+  $ hg debugwalk -I 'rootfilesin:mammals'
+  f  mammals/skunk  skunk
+  $ hg debugwalk 'rootfilesin:mammals/'
+  f  mammals/skunk  skunk
+  $ hg debugwalk -I 'rootfilesin:mammals/'
+  f  mammals/skunk  skunk
+  $ hg debugwalk -X 'rootfilesin:mammals'
+  f  beans/black                     ../beans/black
+  f  beans/borlotti                  ../beans/borlotti
+  f  beans/kidney                    ../beans/kidney
+  f  beans/navy                      ../beans/navy
+  f  beans/pinto                     ../beans/pinto
+  f  beans/turtle                    ../beans/turtle
+  f  fennel                          ../fennel
+  f  fenugreek                       ../fenugreek
+  f  fiddlehead                      ../fiddlehead
+  f  mammals/Procyonidae/cacomistle  Procyonidae/cacomistle
+  f  mammals/Procyonidae/coatimundi  Procyonidae/coatimundi
+  f  mammals/Procyonidae/raccoon     Procyonidae/raccoon
+
   $ hg debugwalk .
   f  mammals/Procyonidae/cacomistle  Procyonidae/cacomistle
   f  mammals/Procyonidae/coatimundi  Procyonidae/coatimundi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-worker.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,90 @@
+Test UI worker interaction
+
+  $ cat > t.py <<EOF
+  > from __future__ import absolute_import, print_function
+  > from mercurial import (
+  >     cmdutil,
+  >     error,
+  >     ui as uimod,
+  >     worker,
+  > )
+  > def abort(ui, args):
+  >     if args[0] == 0:
+  >         # by first worker for test stability
+  >         raise error.Abort('known exception')
+  >     return runme(ui, [])
+  > def exc(ui, args):
+  >     if args[0] == 0:
+  >         # by first worker for test stability
+  >         raise Exception('unknown exception')
+  >     return runme(ui, [])
+  > def runme(ui, args):
+  >     for arg in args:
+  >         ui.status('run\n')
+  >         yield 1, arg
+  > functable = {
+  >     'abort': abort,
+  >     'exc': exc,
+  >     'runme': runme,
+  > }
+  > cmdtable = {}
+  > command = cmdutil.command(cmdtable)
+  > @command('test', [], 'hg test [COST] [FUNC]')
+  > def t(ui, repo, cost=1.0, func='runme'):
+  >     cost = float(cost)
+  >     func = functable[func]
+  >     ui.status('start\n')
+  >     runs = worker.worker(ui, cost, func, (ui,), range(8))
+  >     for n, i in runs:
+  >         pass
+  >     ui.status('done\n')
+  > EOF
+  $ abspath=`pwd`/t.py
+  $ hg init
+
+Run tests with worker enable by forcing a heigh cost
+
+  $ hg --config "extensions.t=$abspath" test 100000.0
+  start
+  run
+  run
+  run
+  run
+  run
+  run
+  run
+  run
+  done
+
+Run tests without worker by forcing a low cost
+
+  $ hg --config "extensions.t=$abspath" test 0.0000001
+  start
+  run
+  run
+  run
+  run
+  run
+  run
+  run
+  run
+  done
+
+Known exception should be caught, but printed if --traceback is enabled
+
+  $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=2' \
+  > test 100000.0 abort
+  start
+  abort: known exception
+  [255]
+
+  $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=2' \
+  > test 100000.0 abort --traceback 2>&1 | grep '^Traceback'
+  Traceback (most recent call last):
+  Traceback (most recent call last):
+
+Traceback must be printed for unknown exceptions
+
+  $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=2' \
+  > test 100000.0 exc 2>&1 | grep '^Traceback'
+  Traceback (most recent call last):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-xdg.t	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,11 @@
+#if no-windows no-osx
+
+  $ mkdir -p xdgconf/hg
+  $ echo '[ui]' > xdgconf/hg/hgrc
+  $ echo 'username = foobar' >> xdgconf/hg/hgrc
+  $ XDG_CONFIG_HOME="`pwd`/xdgconf" ; export XDG_CONFIG_HOME
+  $ unset HGRCPATH
+  $ hg config ui.username
+  foobar
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/exchange-obsmarker-util.sh	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,139 @@
+#!/bin/sh
+# setup config and various utility to test obsolescence marker exchanges tests
+
+cat >> $TESTTMP/prune.sh << EOF
+rev=\`hg log --hidden --template '{node}\n' --rev "\$3"\`
+
+hg debugobsolete --record-parents \$1 "\$2" \$rev \
+   && hg up --quiet 'max((::.) - obsolete())'
+EOF
+
+cat >> $HGRCPATH <<EOF
+[web]
+# We test http pull and push, drop authentication requirement
+push_ssl = false
+allow_push = *
+
+[ui]
+# simpler log output
+logtemplate ="{node|short} ({phase}): {desc}\n"
+
+[phases]
+# non publishing server
+publish=False
+
+[experimental]
+# reduce output changes
+bundle2-output-capture=True
+# enable evolution
+evolution=all
+
+[extensions]
+# we need to strip some changeset for some test cases
+hgext.strip=
+
+[alias]
+# fix date used to create obsolete markers.
+debugobsolete=debugobsolete -d '0 0'
+# poor man substiture to the evolve 'hg prune'. using prune makes the test clearer and 
+prune = !sh $TESTTMP/prune.sh \$1 "\$2" "\$3"
+EOF
+
+mkcommit() {
+   echo "$1" > "$1"
+   hg add "$1"
+   hg ci -m "$1"
+}
+getid() {
+   hg log --hidden --template '{node}\n' --rev "$1"
+}
+
+setuprepos() {
+    echo creating test repo for test case $1
+    mkdir $1
+    cd $1
+    echo - pulldest
+    hg init pushdest
+    cd pushdest
+    mkcommit O
+    hg phase --public .
+    cd ..
+    echo - main
+    hg clone -q pushdest main
+    echo - pushdest
+    hg clone -q main pulldest
+    echo 'cd into `main` and proceed with env setup'
+}
+
+inspect_obsmarkers (){
+    # This exist as its own function to help the evolve extension reuse the tests as is.
+    # The evolve extensions version will includes more advances query (eg:
+    # related to obsmarkers discovery) to this.
+    echo 'obsstore content'
+    echo '================'
+    hg debugobsolete
+}
+
+dotest() {
+    # dotest TESTNAME [TARGETNODE] [PUSHFLAGS+]
+    #
+    # test exchange for the given test case.
+    #
+    # This function performs push and pull in all directions through all
+    # protocols and display the resulting obsolescence markers on all sides.
+
+    testcase=$1
+    shift
+    target="$1"
+    if [ $# -gt 0 ]; then
+        shift
+    fi
+    targetnode=""
+    desccall=""
+    cd $testcase
+    echo "## Running testcase $testcase"
+    if [ -n "$target" ]; then
+        desccall="desc("\'"$target"\'")"
+        targetnode="`hg -R main id -qr \"$desccall\"`"
+        echo "# testing echange of \"$target\" ($targetnode)"
+    fi
+    echo "## initial state"
+    echo "# obstore: main"
+    hg -R main     debugobsolete | sort
+    echo "# obstore: pushdest"
+    hg -R pushdest debugobsolete | sort
+    echo "# obstore: pulldest"
+    hg -R pulldest debugobsolete | sort
+
+    if [ -n "$target" ]; then
+        echo "## pushing \"$target\"" from main to pushdest
+        hg -R main push -r "$desccall" $@ pushdest
+    else
+        echo "## pushing from main to pushdest"
+        hg -R main push pushdest $@
+    fi
+    echo "## post push state"
+    echo "# obstore: main"
+    hg -R main     debugobsolete | sort
+    echo "# obstore: pushdest"
+    hg -R pushdest debugobsolete | sort
+    echo "# obstore: pulldest"
+    hg -R pulldest debugobsolete | sort
+    if [ -n "$target" ]; then
+        echo "## pulling \"$targetnode\"" from main into pulldest
+        hg -R pulldest pull -r $targetnode $@ main
+    else
+        echo "## pulling from main into pulldest"
+        hg -R pulldest pull main $@
+    fi
+    echo "## post pull state"
+    echo "# obstore: main"
+    hg -R main     debugobsolete | sort
+    echo "# obstore: pushdest"
+    hg -R pushdest debugobsolete | sort
+    echo "# obstore: pulldest"
+    hg -R pulldest debugobsolete | sort
+
+    cd ..
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/testlib/push-checkheads-util.sh	Tue Apr 18 12:24:34 2017 -0400
@@ -0,0 +1,44 @@
+# setup config and various utility to test new heads checks on push
+
+cat >> $HGRCPATH <<EOF
+[ui]
+# simpler log output
+logtemplate ="{node|short} ({phase}): {desc}\n"
+
+[phases]
+# non publishing server
+publish=False
+
+[extensions]
+# we need to strip some changeset for some test cases
+strip=
+
+[experimental]
+# enable evolution
+evolution=all
+
+[alias]
+# fix date used to create obsolete markers.
+debugobsolete=debugobsolete -d '0 0'
+EOF
+
+mkcommit() {
+   echo "$1" > "$1"
+   hg add "$1"
+   hg ci -m "$1"
+}
+
+getid() {
+   hg log --hidden --template '{node}\n' --rev "$1"
+}
+
+setuprepos() {
+    echo creating basic server and client repo
+    hg init server
+    cd server
+    mkcommit root
+    hg phase --public .
+    mkcommit A0
+    cd ..
+    hg clone server client
+}
--- a/tests/tinyproxy.py	Tue Apr 18 11:22:42 2017 -0400
+++ b/tests/tinyproxy.py	Tue Apr 18 12:24:34 2017 -0400
@@ -23,8 +23,13 @@
 from mercurial import util
 
 httpserver = util.httpserver
-urlparse = util.urlparse
 socketserver = util.socketserver
+urlreq = util.urlreq
+
+if os.environ.get('HGIPV6', '0') == '1':
+    family = socket.AF_INET6
+else:
+    family = socket.AF_INET
 
 class ProxyHandler (httpserver.basehttprequesthandler):
     __base = httpserver.basehttprequesthandler
@@ -65,7 +70,7 @@
         return 1
 
     def do_CONNECT(self):
-        soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        soc = socket.socket(family, socket.SOCK_STREAM)
         try:
             if self._connect_to(self.path, soc):
                 self.log_request(200)
@@ -80,18 +85,18 @@
             self.connection.close()
 
     def do_GET(self):
-        (scm, netloc, path, params, query, fragment) = urlparse.urlparse(
+        (scm, netloc, path, params, query, fragment) = urlreq.urlparse(
             self.path, 'http')
         if scm != 'http' or fragment or not netloc:
             self.send_error(400, "bad url %s" % self.path)
             return
-        soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        soc = socket.socket(family, socket.SOCK_STREAM)
         try:
             if self._connect_to(netloc, soc):
                 self.log_request()
                 soc.send("%s %s %s\r\n" % (
                     self.command,
-                    urlparse.urlunparse(('', '', path, params, query, '')),
+                    urlreq.urlunparse(('', '', path, params, query, '')),
                     self.request_version))
                 self.headers['Connection'] = 'close'
                 del self.headers['Proxy-Connection']